##// END OF EJS Templates
pycompat: make fewer assumptions about sys.executable...
Rodrigo Damazio Bovendorp -
r42723:49998d5b default
parent child Browse files
Show More
@@ -1,639 +1,641 b''
1 1 # chgserver.py - command server extension for cHg
2 2 #
3 3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """command server extension for cHg
9 9
10 10 'S' channel (read/write)
11 11 propagate ui.system() request to client
12 12
13 13 'attachio' command
14 14 attach client's stdio passed by sendmsg()
15 15
16 16 'chdir' command
17 17 change current directory
18 18
19 19 'setenv' command
20 20 replace os.environ completely
21 21
22 22 'setumask' command (DEPRECATED)
23 23 'setumask2' command
24 24 set umask
25 25
26 26 'validate' command
27 27 reload the config and check if the server is up to date
28 28
29 29 Config
30 30 ------
31 31
32 32 ::
33 33
34 34 [chgserver]
35 35 # how long (in seconds) should an idle chg server exit
36 36 idletimeout = 3600
37 37
38 38 # whether to skip config or env change checks
39 39 skiphash = False
40 40 """
41 41
42 42 from __future__ import absolute_import
43 43
44 44 import hashlib
45 45 import inspect
46 46 import os
47 47 import re
48 48 import socket
49 49 import stat
50 50 import struct
51 51 import time
52 52
53 53 from .i18n import _
54 54
55 55 from . import (
56 56 commandserver,
57 57 encoding,
58 58 error,
59 59 extensions,
60 60 node,
61 61 pycompat,
62 62 util,
63 63 )
64 64
65 65 from .utils import (
66 66 procutil,
67 67 stringutil,
68 68 )
69 69
70 70 def _hashlist(items):
71 71 """return sha1 hexdigest for a list"""
72 72 return node.hex(hashlib.sha1(stringutil.pprint(items)).digest())
73 73
74 74 # sensitive config sections affecting confighash
75 75 _configsections = [
76 76 'alias', # affects global state commands.table
77 77 'eol', # uses setconfig('eol', ...)
78 78 'extdiff', # uisetup will register new commands
79 79 'extensions',
80 80 ]
81 81
82 82 _configsectionitems = [
83 83 ('commands', 'show.aliasprefix'), # show.py reads it in extsetup
84 84 ]
85 85
86 86 # sensitive environment variables affecting confighash
87 87 _envre = re.compile(br'''\A(?:
88 88 CHGHG
89 89 |HG(?:DEMANDIMPORT|EMITWARNINGS|MODULEPOLICY|PROF|RCPATH)?
90 90 |HG(?:ENCODING|PLAIN).*
91 91 |LANG(?:UAGE)?
92 92 |LC_.*
93 93 |LD_.*
94 94 |PATH
95 95 |PYTHON.*
96 96 |TERM(?:INFO)?
97 97 |TZ
98 98 )\Z''', re.X)
99 99
100 100 def _confighash(ui):
101 101 """return a quick hash for detecting config/env changes
102 102
103 103 confighash is the hash of sensitive config items and environment variables.
104 104
105 105 for chgserver, it is designed that once confighash changes, the server is
106 106 not qualified to serve its client and should redirect the client to a new
107 107 server. different from mtimehash, confighash change will not mark the
108 108 server outdated and exit since the user can have different configs at the
109 109 same time.
110 110 """
111 111 sectionitems = []
112 112 for section in _configsections:
113 113 sectionitems.append(ui.configitems(section))
114 114 for section, item in _configsectionitems:
115 115 sectionitems.append(ui.config(section, item))
116 116 sectionhash = _hashlist(sectionitems)
117 117 # If $CHGHG is set, the change to $HG should not trigger a new chg server
118 118 if 'CHGHG' in encoding.environ:
119 119 ignored = {'HG'}
120 120 else:
121 121 ignored = set()
122 122 envitems = [(k, v) for k, v in encoding.environ.iteritems()
123 123 if _envre.match(k) and k not in ignored]
124 124 envhash = _hashlist(sorted(envitems))
125 125 return sectionhash[:6] + envhash[:6]
126 126
127 127 def _getmtimepaths(ui):
128 128 """get a list of paths that should be checked to detect change
129 129
130 130 The list will include:
131 131 - extensions (will not cover all files for complex extensions)
132 132 - mercurial/__version__.py
133 133 - python binary
134 134 """
135 135 modules = [m for n, m in extensions.extensions(ui)]
136 136 try:
137 137 from . import __version__
138 138 modules.append(__version__)
139 139 except ImportError:
140 140 pass
141 files = [pycompat.sysexecutable]
141 files = []
142 if pycompat.sysexecutable:
143 files.append(pycompat.sysexecutable)
142 144 for m in modules:
143 145 try:
144 146 files.append(pycompat.fsencode(inspect.getabsfile(m)))
145 147 except TypeError:
146 148 pass
147 149 return sorted(set(files))
148 150
149 151 def _mtimehash(paths):
150 152 """return a quick hash for detecting file changes
151 153
152 154 mtimehash calls stat on given paths and calculate a hash based on size and
153 155 mtime of each file. mtimehash does not read file content because reading is
154 156 expensive. therefore it's not 100% reliable for detecting content changes.
155 157 it's possible to return different hashes for same file contents.
156 158 it's also possible to return a same hash for different file contents for
157 159 some carefully crafted situation.
158 160
159 161 for chgserver, it is designed that once mtimehash changes, the server is
160 162 considered outdated immediately and should no longer provide service.
161 163
162 164 mtimehash is not included in confighash because we only know the paths of
163 165 extensions after importing them (there is imp.find_module but that faces
164 166 race conditions). We need to calculate confighash without importing.
165 167 """
166 168 def trystat(path):
167 169 try:
168 170 st = os.stat(path)
169 171 return (st[stat.ST_MTIME], st.st_size)
170 172 except OSError:
171 173 # could be ENOENT, EPERM etc. not fatal in any case
172 174 pass
173 175 return _hashlist(map(trystat, paths))[:12]
174 176
175 177 class hashstate(object):
176 178 """a structure storing confighash, mtimehash, paths used for mtimehash"""
177 179 def __init__(self, confighash, mtimehash, mtimepaths):
178 180 self.confighash = confighash
179 181 self.mtimehash = mtimehash
180 182 self.mtimepaths = mtimepaths
181 183
182 184 @staticmethod
183 185 def fromui(ui, mtimepaths=None):
184 186 if mtimepaths is None:
185 187 mtimepaths = _getmtimepaths(ui)
186 188 confighash = _confighash(ui)
187 189 mtimehash = _mtimehash(mtimepaths)
188 190 ui.log('cmdserver', 'confighash = %s mtimehash = %s\n',
189 191 confighash, mtimehash)
190 192 return hashstate(confighash, mtimehash, mtimepaths)
191 193
192 194 def _newchgui(srcui, csystem, attachio):
193 195 class chgui(srcui.__class__):
194 196 def __init__(self, src=None):
195 197 super(chgui, self).__init__(src)
196 198 if src:
197 199 self._csystem = getattr(src, '_csystem', csystem)
198 200 else:
199 201 self._csystem = csystem
200 202
201 203 def _runsystem(self, cmd, environ, cwd, out):
202 204 # fallback to the original system method if
203 205 # a. the output stream is not stdout (e.g. stderr, cStringIO),
204 206 # b. or stdout is redirected by protectfinout(),
205 207 # because the chg client is not aware of these situations and
206 208 # will behave differently (i.e. write to stdout).
207 209 if (out is not self.fout
208 210 or not util.safehasattr(self.fout, 'fileno')
209 211 or self.fout.fileno() != procutil.stdout.fileno()
210 212 or self._finoutredirected):
211 213 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
212 214 self.flush()
213 215 return self._csystem(cmd, procutil.shellenviron(environ), cwd)
214 216
215 217 def _runpager(self, cmd, env=None):
216 218 self._csystem(cmd, procutil.shellenviron(env), type='pager',
217 219 cmdtable={'attachio': attachio})
218 220 return True
219 221
220 222 return chgui(srcui)
221 223
222 224 def _loadnewui(srcui, args, cdebug):
223 225 from . import dispatch # avoid cycle
224 226
225 227 newui = srcui.__class__.load()
226 228 for a in ['fin', 'fout', 'ferr', 'environ']:
227 229 setattr(newui, a, getattr(srcui, a))
228 230 if util.safehasattr(srcui, '_csystem'):
229 231 newui._csystem = srcui._csystem
230 232
231 233 # command line args
232 234 options = dispatch._earlyparseopts(newui, args)
233 235 dispatch._parseconfig(newui, options['config'])
234 236
235 237 # stolen from tortoisehg.util.copydynamicconfig()
236 238 for section, name, value in srcui.walkconfig():
237 239 source = srcui.configsource(section, name)
238 240 if ':' in source or source == '--config' or source.startswith('$'):
239 241 # path:line or command line, or environ
240 242 continue
241 243 newui.setconfig(section, name, value, source)
242 244
243 245 # load wd and repo config, copied from dispatch.py
244 246 cwd = options['cwd']
245 247 cwd = cwd and os.path.realpath(cwd) or None
246 248 rpath = options['repository']
247 249 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
248 250
249 251 extensions.populateui(newui)
250 252 commandserver.setuplogging(newui, fp=cdebug)
251 253 if newui is not newlui:
252 254 extensions.populateui(newlui)
253 255 commandserver.setuplogging(newlui, fp=cdebug)
254 256
255 257 return (newui, newlui)
256 258
257 259 class channeledsystem(object):
258 260 """Propagate ui.system() request in the following format:
259 261
260 262 payload length (unsigned int),
261 263 type, '\0',
262 264 cmd, '\0',
263 265 cwd, '\0',
264 266 envkey, '=', val, '\0',
265 267 ...
266 268 envkey, '=', val
267 269
268 270 if type == 'system', waits for:
269 271
270 272 exitcode length (unsigned int),
271 273 exitcode (int)
272 274
273 275 if type == 'pager', repetitively waits for a command name ending with '\n'
274 276 and executes it defined by cmdtable, or exits the loop if the command name
275 277 is empty.
276 278 """
277 279 def __init__(self, in_, out, channel):
278 280 self.in_ = in_
279 281 self.out = out
280 282 self.channel = channel
281 283
282 284 def __call__(self, cmd, environ, cwd=None, type='system', cmdtable=None):
283 285 args = [type, procutil.quotecommand(cmd), os.path.abspath(cwd or '.')]
284 286 args.extend('%s=%s' % (k, v) for k, v in environ.iteritems())
285 287 data = '\0'.join(args)
286 288 self.out.write(struct.pack('>cI', self.channel, len(data)))
287 289 self.out.write(data)
288 290 self.out.flush()
289 291
290 292 if type == 'system':
291 293 length = self.in_.read(4)
292 294 length, = struct.unpack('>I', length)
293 295 if length != 4:
294 296 raise error.Abort(_('invalid response'))
295 297 rc, = struct.unpack('>i', self.in_.read(4))
296 298 return rc
297 299 elif type == 'pager':
298 300 while True:
299 301 cmd = self.in_.readline()[:-1]
300 302 if not cmd:
301 303 break
302 304 if cmdtable and cmd in cmdtable:
303 305 cmdtable[cmd]()
304 306 else:
305 307 raise error.Abort(_('unexpected command: %s') % cmd)
306 308 else:
307 309 raise error.ProgrammingError('invalid S channel type: %s' % type)
308 310
309 311 _iochannels = [
310 312 # server.ch, ui.fp, mode
311 313 ('cin', 'fin', r'rb'),
312 314 ('cout', 'fout', r'wb'),
313 315 ('cerr', 'ferr', r'wb'),
314 316 ]
315 317
316 318 class chgcmdserver(commandserver.server):
317 319 def __init__(self, ui, repo, fin, fout, sock, prereposetups,
318 320 hashstate, baseaddress):
319 321 super(chgcmdserver, self).__init__(
320 322 _newchgui(ui, channeledsystem(fin, fout, 'S'), self.attachio),
321 323 repo, fin, fout, prereposetups)
322 324 self.clientsock = sock
323 325 self._ioattached = False
324 326 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
325 327 self.hashstate = hashstate
326 328 self.baseaddress = baseaddress
327 329 if hashstate is not None:
328 330 self.capabilities = self.capabilities.copy()
329 331 self.capabilities['validate'] = chgcmdserver.validate
330 332
331 333 def cleanup(self):
332 334 super(chgcmdserver, self).cleanup()
333 335 # dispatch._runcatch() does not flush outputs if exception is not
334 336 # handled by dispatch._dispatch()
335 337 self.ui.flush()
336 338 self._restoreio()
337 339 self._ioattached = False
338 340
339 341 def attachio(self):
340 342 """Attach to client's stdio passed via unix domain socket; all
341 343 channels except cresult will no longer be used
342 344 """
343 345 # tell client to sendmsg() with 1-byte payload, which makes it
344 346 # distinctive from "attachio\n" command consumed by client.read()
345 347 self.clientsock.sendall(struct.pack('>cI', 'I', 1))
346 348 clientfds = util.recvfds(self.clientsock.fileno())
347 349 self.ui.log('chgserver', 'received fds: %r\n', clientfds)
348 350
349 351 ui = self.ui
350 352 ui.flush()
351 353 self._saveio()
352 354 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
353 355 assert fd > 0
354 356 fp = getattr(ui, fn)
355 357 os.dup2(fd, fp.fileno())
356 358 os.close(fd)
357 359 if self._ioattached:
358 360 continue
359 361 # reset buffering mode when client is first attached. as we want
360 362 # to see output immediately on pager, the mode stays unchanged
361 363 # when client re-attached. ferr is unchanged because it should
362 364 # be unbuffered no matter if it is a tty or not.
363 365 if fn == 'ferr':
364 366 newfp = fp
365 367 else:
366 368 # make it line buffered explicitly because the default is
367 369 # decided on first write(), where fout could be a pager.
368 370 if fp.isatty():
369 371 bufsize = 1 # line buffered
370 372 else:
371 373 bufsize = -1 # system default
372 374 newfp = os.fdopen(fp.fileno(), mode, bufsize)
373 375 setattr(ui, fn, newfp)
374 376 setattr(self, cn, newfp)
375 377
376 378 self._ioattached = True
377 379 self.cresult.write(struct.pack('>i', len(clientfds)))
378 380
379 381 def _saveio(self):
380 382 if self._oldios:
381 383 return
382 384 ui = self.ui
383 385 for cn, fn, _mode in _iochannels:
384 386 ch = getattr(self, cn)
385 387 fp = getattr(ui, fn)
386 388 fd = os.dup(fp.fileno())
387 389 self._oldios.append((ch, fp, fd))
388 390
389 391 def _restoreio(self):
390 392 ui = self.ui
391 393 for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels):
392 394 newfp = getattr(ui, fn)
393 395 # close newfp while it's associated with client; otherwise it
394 396 # would be closed when newfp is deleted
395 397 if newfp is not fp:
396 398 newfp.close()
397 399 # restore original fd: fp is open again
398 400 os.dup2(fd, fp.fileno())
399 401 os.close(fd)
400 402 setattr(self, cn, ch)
401 403 setattr(ui, fn, fp)
402 404 del self._oldios[:]
403 405
404 406 def validate(self):
405 407 """Reload the config and check if the server is up to date
406 408
407 409 Read a list of '\0' separated arguments.
408 410 Write a non-empty list of '\0' separated instruction strings or '\0'
409 411 if the list is empty.
410 412 An instruction string could be either:
411 413 - "unlink $path", the client should unlink the path to stop the
412 414 outdated server.
413 415 - "redirect $path", the client should attempt to connect to $path
414 416 first. If it does not work, start a new server. It implies
415 417 "reconnect".
416 418 - "exit $n", the client should exit directly with code n.
417 419 This may happen if we cannot parse the config.
418 420 - "reconnect", the client should close the connection and
419 421 reconnect.
420 422 If neither "reconnect" nor "redirect" is included in the instruction
421 423 list, the client can continue with this server after completing all
422 424 the instructions.
423 425 """
424 426 from . import dispatch # avoid cycle
425 427
426 428 args = self._readlist()
427 429 try:
428 430 self.ui, lui = _loadnewui(self.ui, args, self.cdebug)
429 431 except error.ParseError as inst:
430 432 dispatch._formatparse(self.ui.warn, inst)
431 433 self.ui.flush()
432 434 self.cresult.write('exit 255')
433 435 return
434 436 except error.Abort as inst:
435 437 self.ui.error(_("abort: %s\n") % inst)
436 438 if inst.hint:
437 439 self.ui.error(_("(%s)\n") % inst.hint)
438 440 self.ui.flush()
439 441 self.cresult.write('exit 255')
440 442 return
441 443 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
442 444 insts = []
443 445 if newhash.mtimehash != self.hashstate.mtimehash:
444 446 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
445 447 insts.append('unlink %s' % addr)
446 448 # mtimehash is empty if one or more extensions fail to load.
447 449 # to be compatible with hg, still serve the client this time.
448 450 if self.hashstate.mtimehash:
449 451 insts.append('reconnect')
450 452 if newhash.confighash != self.hashstate.confighash:
451 453 addr = _hashaddress(self.baseaddress, newhash.confighash)
452 454 insts.append('redirect %s' % addr)
453 455 self.ui.log('chgserver', 'validate: %s\n', stringutil.pprint(insts))
454 456 self.cresult.write('\0'.join(insts) or '\0')
455 457
456 458 def chdir(self):
457 459 """Change current directory
458 460
459 461 Note that the behavior of --cwd option is bit different from this.
460 462 It does not affect --config parameter.
461 463 """
462 464 path = self._readstr()
463 465 if not path:
464 466 return
465 467 self.ui.log('chgserver', 'chdir to %r\n', path)
466 468 os.chdir(path)
467 469
468 470 def setumask(self):
469 471 """Change umask (DEPRECATED)"""
470 472 # BUG: this does not follow the message frame structure, but kept for
471 473 # backward compatibility with old chg clients for some time
472 474 self._setumask(self._read(4))
473 475
474 476 def setumask2(self):
475 477 """Change umask"""
476 478 data = self._readstr()
477 479 if len(data) != 4:
478 480 raise ValueError('invalid mask length in setumask2 request')
479 481 self._setumask(data)
480 482
481 483 def _setumask(self, data):
482 484 mask = struct.unpack('>I', data)[0]
483 485 self.ui.log('chgserver', 'setumask %r\n', mask)
484 486 os.umask(mask)
485 487
486 488 def runcommand(self):
487 489 # pager may be attached within the runcommand session, which should
488 490 # be detached at the end of the session. otherwise the pager wouldn't
489 491 # receive EOF.
490 492 globaloldios = self._oldios
491 493 self._oldios = []
492 494 try:
493 495 return super(chgcmdserver, self).runcommand()
494 496 finally:
495 497 self._restoreio()
496 498 self._oldios = globaloldios
497 499
498 500 def setenv(self):
499 501 """Clear and update os.environ
500 502
501 503 Note that not all variables can make an effect on the running process.
502 504 """
503 505 l = self._readlist()
504 506 try:
505 507 newenv = dict(s.split('=', 1) for s in l)
506 508 except ValueError:
507 509 raise ValueError('unexpected value in setenv request')
508 510 self.ui.log('chgserver', 'setenv: %r\n', sorted(newenv.keys()))
509 511 encoding.environ.clear()
510 512 encoding.environ.update(newenv)
511 513
512 514 capabilities = commandserver.server.capabilities.copy()
513 515 capabilities.update({'attachio': attachio,
514 516 'chdir': chdir,
515 517 'runcommand': runcommand,
516 518 'setenv': setenv,
517 519 'setumask': setumask,
518 520 'setumask2': setumask2})
519 521
520 522 if util.safehasattr(procutil, 'setprocname'):
521 523 def setprocname(self):
522 524 """Change process title"""
523 525 name = self._readstr()
524 526 self.ui.log('chgserver', 'setprocname: %r\n', name)
525 527 procutil.setprocname(name)
526 528 capabilities['setprocname'] = setprocname
527 529
528 530 def _tempaddress(address):
529 531 return '%s.%d.tmp' % (address, os.getpid())
530 532
531 533 def _hashaddress(address, hashstr):
532 534 # if the basename of address contains '.', use only the left part. this
533 535 # makes it possible for the client to pass 'server.tmp$PID' and follow by
534 536 # an atomic rename to avoid locking when spawning new servers.
535 537 dirname, basename = os.path.split(address)
536 538 basename = basename.split('.', 1)[0]
537 539 return '%s-%s' % (os.path.join(dirname, basename), hashstr)
538 540
539 541 class chgunixservicehandler(object):
540 542 """Set of operations for chg services"""
541 543
542 544 pollinterval = 1 # [sec]
543 545
544 546 def __init__(self, ui):
545 547 self.ui = ui
546 548 self._idletimeout = ui.configint('chgserver', 'idletimeout')
547 549 self._lastactive = time.time()
548 550
549 551 def bindsocket(self, sock, address):
550 552 self._inithashstate(address)
551 553 self._checkextensions()
552 554 self._bind(sock)
553 555 self._createsymlink()
554 556 # no "listening at" message should be printed to simulate hg behavior
555 557
556 558 def _inithashstate(self, address):
557 559 self._baseaddress = address
558 560 if self.ui.configbool('chgserver', 'skiphash'):
559 561 self._hashstate = None
560 562 self._realaddress = address
561 563 return
562 564 self._hashstate = hashstate.fromui(self.ui)
563 565 self._realaddress = _hashaddress(address, self._hashstate.confighash)
564 566
565 567 def _checkextensions(self):
566 568 if not self._hashstate:
567 569 return
568 570 if extensions.notloaded():
569 571 # one or more extensions failed to load. mtimehash becomes
570 572 # meaningless because we do not know the paths of those extensions.
571 573 # set mtimehash to an illegal hash value to invalidate the server.
572 574 self._hashstate.mtimehash = ''
573 575
574 576 def _bind(self, sock):
575 577 # use a unique temp address so we can stat the file and do ownership
576 578 # check later
577 579 tempaddress = _tempaddress(self._realaddress)
578 580 util.bindunixsocket(sock, tempaddress)
579 581 self._socketstat = os.stat(tempaddress)
580 582 sock.listen(socket.SOMAXCONN)
581 583 # rename will replace the old socket file if exists atomically. the
582 584 # old server will detect ownership change and exit.
583 585 util.rename(tempaddress, self._realaddress)
584 586
585 587 def _createsymlink(self):
586 588 if self._baseaddress == self._realaddress:
587 589 return
588 590 tempaddress = _tempaddress(self._baseaddress)
589 591 os.symlink(os.path.basename(self._realaddress), tempaddress)
590 592 util.rename(tempaddress, self._baseaddress)
591 593
592 594 def _issocketowner(self):
593 595 try:
594 596 st = os.stat(self._realaddress)
595 597 return (st.st_ino == self._socketstat.st_ino and
596 598 st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME])
597 599 except OSError:
598 600 return False
599 601
600 602 def unlinksocket(self, address):
601 603 if not self._issocketowner():
602 604 return
603 605 # it is possible to have a race condition here that we may
604 606 # remove another server's socket file. but that's okay
605 607 # since that server will detect and exit automatically and
606 608 # the client will start a new server on demand.
607 609 util.tryunlink(self._realaddress)
608 610
609 611 def shouldexit(self):
610 612 if not self._issocketowner():
611 613 self.ui.log(b'chgserver', b'%s is not owned, exiting.\n',
612 614 self._realaddress)
613 615 return True
614 616 if time.time() - self._lastactive > self._idletimeout:
615 617 self.ui.log(b'chgserver', b'being idle too long. exiting.\n')
616 618 return True
617 619 return False
618 620
619 621 def newconnection(self):
620 622 self._lastactive = time.time()
621 623
622 624 def createcmdserver(self, repo, conn, fin, fout, prereposetups):
623 625 return chgcmdserver(self.ui, repo, fin, fout, conn, prereposetups,
624 626 self._hashstate, self._baseaddress)
625 627
626 628 def chgunixservice(ui, repo, opts):
627 629 # CHGINTERNALMARK is set by chg client. It is an indication of things are
628 630 # started by chg so other code can do things accordingly, like disabling
629 631 # demandimport or detecting chg client started by chg client. When executed
630 632 # here, CHGINTERNALMARK is no longer useful and hence dropped to make
631 633 # environ cleaner.
632 634 if 'CHGINTERNALMARK' in encoding.environ:
633 635 del encoding.environ['CHGINTERNALMARK']
634 636
635 637 if repo:
636 638 # one chgserver can serve multiple repos. drop repo information
637 639 ui.setconfig('bundle', 'mainreporoot', '', 'repo')
638 640 h = chgunixservicehandler(ui)
639 641 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
@@ -1,3481 +1,3481 b''
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import codecs
11 11 import collections
12 12 import difflib
13 13 import errno
14 14 import operator
15 15 import os
16 16 import random
17 17 import re
18 18 import socket
19 19 import ssl
20 20 import stat
21 21 import string
22 22 import subprocess
23 23 import sys
24 24 import time
25 25
26 26 from .i18n import _
27 27 from .node import (
28 28 bin,
29 29 hex,
30 30 nullhex,
31 31 nullid,
32 32 nullrev,
33 33 short,
34 34 )
35 35 from . import (
36 36 bundle2,
37 37 changegroup,
38 38 cmdutil,
39 39 color,
40 40 context,
41 41 copies,
42 42 dagparser,
43 43 encoding,
44 44 error,
45 45 exchange,
46 46 extensions,
47 47 filemerge,
48 48 filesetlang,
49 49 formatter,
50 50 hg,
51 51 httppeer,
52 52 localrepo,
53 53 lock as lockmod,
54 54 logcmdutil,
55 55 merge as mergemod,
56 56 obsolete,
57 57 obsutil,
58 58 phases,
59 59 policy,
60 60 pvec,
61 61 pycompat,
62 62 registrar,
63 63 repair,
64 64 revlog,
65 65 revset,
66 66 revsetlang,
67 67 scmutil,
68 68 setdiscovery,
69 69 simplemerge,
70 70 sshpeer,
71 71 sslutil,
72 72 streamclone,
73 73 templater,
74 74 treediscovery,
75 75 upgrade,
76 76 url as urlmod,
77 77 util,
78 78 vfs as vfsmod,
79 79 wireprotoframing,
80 80 wireprotoserver,
81 81 wireprotov2peer,
82 82 )
83 83 from .utils import (
84 84 cborutil,
85 85 compression,
86 86 dateutil,
87 87 procutil,
88 88 stringutil,
89 89 )
90 90
91 91 from .revlogutils import (
92 92 deltas as deltautil
93 93 )
94 94
95 95 release = lockmod.release
96 96
97 97 command = registrar.command()
98 98
99 99 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
100 100 def debugancestor(ui, repo, *args):
101 101 """find the ancestor revision of two revisions in a given index"""
102 102 if len(args) == 3:
103 103 index, rev1, rev2 = args
104 104 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
105 105 lookup = r.lookup
106 106 elif len(args) == 2:
107 107 if not repo:
108 108 raise error.Abort(_('there is no Mercurial repository here '
109 109 '(.hg not found)'))
110 110 rev1, rev2 = args
111 111 r = repo.changelog
112 112 lookup = repo.lookup
113 113 else:
114 114 raise error.Abort(_('either two or three arguments required'))
115 115 a = r.ancestor(lookup(rev1), lookup(rev2))
116 116 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
117 117
118 118 @command('debugapplystreamclonebundle', [], 'FILE')
119 119 def debugapplystreamclonebundle(ui, repo, fname):
120 120 """apply a stream clone bundle file"""
121 121 f = hg.openpath(ui, fname)
122 122 gen = exchange.readbundle(ui, f, fname)
123 123 gen.apply(repo)
124 124
125 125 @command('debugbuilddag',
126 126 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
127 127 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
128 128 ('n', 'new-file', None, _('add new file at each rev'))],
129 129 _('[OPTION]... [TEXT]'))
130 130 def debugbuilddag(ui, repo, text=None,
131 131 mergeable_file=False,
132 132 overwritten_file=False,
133 133 new_file=False):
134 134 """builds a repo with a given DAG from scratch in the current empty repo
135 135
136 136 The description of the DAG is read from stdin if not given on the
137 137 command line.
138 138
139 139 Elements:
140 140
141 141 - "+n" is a linear run of n nodes based on the current default parent
142 142 - "." is a single node based on the current default parent
143 143 - "$" resets the default parent to null (implied at the start);
144 144 otherwise the default parent is always the last node created
145 145 - "<p" sets the default parent to the backref p
146 146 - "*p" is a fork at parent p, which is a backref
147 147 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
148 148 - "/p2" is a merge of the preceding node and p2
149 149 - ":tag" defines a local tag for the preceding node
150 150 - "@branch" sets the named branch for subsequent nodes
151 151 - "#...\\n" is a comment up to the end of the line
152 152
153 153 Whitespace between the above elements is ignored.
154 154
155 155 A backref is either
156 156
157 157 - a number n, which references the node curr-n, where curr is the current
158 158 node, or
159 159 - the name of a local tag you placed earlier using ":tag", or
160 160 - empty to denote the default parent.
161 161
162 162 All string valued-elements are either strictly alphanumeric, or must
163 163 be enclosed in double quotes ("..."), with "\\" as escape character.
164 164 """
165 165
166 166 if text is None:
167 167 ui.status(_("reading DAG from stdin\n"))
168 168 text = ui.fin.read()
169 169
170 170 cl = repo.changelog
171 171 if len(cl) > 0:
172 172 raise error.Abort(_('repository is not empty'))
173 173
174 174 # determine number of revs in DAG
175 175 total = 0
176 176 for type, data in dagparser.parsedag(text):
177 177 if type == 'n':
178 178 total += 1
179 179
180 180 if mergeable_file:
181 181 linesperrev = 2
182 182 # make a file with k lines per rev
183 183 initialmergedlines = ['%d' % i
184 184 for i in pycompat.xrange(0, total * linesperrev)]
185 185 initialmergedlines.append("")
186 186
187 187 tags = []
188 188 progress = ui.makeprogress(_('building'), unit=_('revisions'),
189 189 total=total)
190 190 with progress, repo.wlock(), repo.lock(), repo.transaction("builddag"):
191 191 at = -1
192 192 atbranch = 'default'
193 193 nodeids = []
194 194 id = 0
195 195 progress.update(id)
196 196 for type, data in dagparser.parsedag(text):
197 197 if type == 'n':
198 198 ui.note(('node %s\n' % pycompat.bytestr(data)))
199 199 id, ps = data
200 200
201 201 files = []
202 202 filecontent = {}
203 203
204 204 p2 = None
205 205 if mergeable_file:
206 206 fn = "mf"
207 207 p1 = repo[ps[0]]
208 208 if len(ps) > 1:
209 209 p2 = repo[ps[1]]
210 210 pa = p1.ancestor(p2)
211 211 base, local, other = [x[fn].data() for x in (pa, p1,
212 212 p2)]
213 213 m3 = simplemerge.Merge3Text(base, local, other)
214 214 ml = [l.strip() for l in m3.merge_lines()]
215 215 ml.append("")
216 216 elif at > 0:
217 217 ml = p1[fn].data().split("\n")
218 218 else:
219 219 ml = initialmergedlines
220 220 ml[id * linesperrev] += " r%i" % id
221 221 mergedtext = "\n".join(ml)
222 222 files.append(fn)
223 223 filecontent[fn] = mergedtext
224 224
225 225 if overwritten_file:
226 226 fn = "of"
227 227 files.append(fn)
228 228 filecontent[fn] = "r%i\n" % id
229 229
230 230 if new_file:
231 231 fn = "nf%i" % id
232 232 files.append(fn)
233 233 filecontent[fn] = "r%i\n" % id
234 234 if len(ps) > 1:
235 235 if not p2:
236 236 p2 = repo[ps[1]]
237 237 for fn in p2:
238 238 if fn.startswith("nf"):
239 239 files.append(fn)
240 240 filecontent[fn] = p2[fn].data()
241 241
242 242 def fctxfn(repo, cx, path):
243 243 if path in filecontent:
244 244 return context.memfilectx(repo, cx, path,
245 245 filecontent[path])
246 246 return None
247 247
248 248 if len(ps) == 0 or ps[0] < 0:
249 249 pars = [None, None]
250 250 elif len(ps) == 1:
251 251 pars = [nodeids[ps[0]], None]
252 252 else:
253 253 pars = [nodeids[p] for p in ps]
254 254 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
255 255 date=(id, 0),
256 256 user="debugbuilddag",
257 257 extra={'branch': atbranch})
258 258 nodeid = repo.commitctx(cx)
259 259 nodeids.append(nodeid)
260 260 at = id
261 261 elif type == 'l':
262 262 id, name = data
263 263 ui.note(('tag %s\n' % name))
264 264 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
265 265 elif type == 'a':
266 266 ui.note(('branch %s\n' % data))
267 267 atbranch = data
268 268 progress.update(id)
269 269
270 270 if tags:
271 271 repo.vfs.write("localtags", "".join(tags))
272 272
273 273 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
274 274 indent_string = ' ' * indent
275 275 if all:
276 276 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
277 277 % indent_string)
278 278
279 279 def showchunks(named):
280 280 ui.write("\n%s%s\n" % (indent_string, named))
281 281 for deltadata in gen.deltaiter():
282 282 node, p1, p2, cs, deltabase, delta, flags = deltadata
283 283 ui.write("%s%s %s %s %s %s %d\n" %
284 284 (indent_string, hex(node), hex(p1), hex(p2),
285 285 hex(cs), hex(deltabase), len(delta)))
286 286
287 287 chunkdata = gen.changelogheader()
288 288 showchunks("changelog")
289 289 chunkdata = gen.manifestheader()
290 290 showchunks("manifest")
291 291 for chunkdata in iter(gen.filelogheader, {}):
292 292 fname = chunkdata['filename']
293 293 showchunks(fname)
294 294 else:
295 295 if isinstance(gen, bundle2.unbundle20):
296 296 raise error.Abort(_('use debugbundle2 for this file'))
297 297 chunkdata = gen.changelogheader()
298 298 for deltadata in gen.deltaiter():
299 299 node, p1, p2, cs, deltabase, delta, flags = deltadata
300 300 ui.write("%s%s\n" % (indent_string, hex(node)))
301 301
302 302 def _debugobsmarkers(ui, part, indent=0, **opts):
303 303 """display version and markers contained in 'data'"""
304 304 opts = pycompat.byteskwargs(opts)
305 305 data = part.read()
306 306 indent_string = ' ' * indent
307 307 try:
308 308 version, markers = obsolete._readmarkers(data)
309 309 except error.UnknownVersion as exc:
310 310 msg = "%sunsupported version: %s (%d bytes)\n"
311 311 msg %= indent_string, exc.version, len(data)
312 312 ui.write(msg)
313 313 else:
314 314 msg = "%sversion: %d (%d bytes)\n"
315 315 msg %= indent_string, version, len(data)
316 316 ui.write(msg)
317 317 fm = ui.formatter('debugobsolete', opts)
318 318 for rawmarker in sorted(markers):
319 319 m = obsutil.marker(None, rawmarker)
320 320 fm.startitem()
321 321 fm.plain(indent_string)
322 322 cmdutil.showmarker(fm, m)
323 323 fm.end()
324 324
325 325 def _debugphaseheads(ui, data, indent=0):
326 326 """display version and markers contained in 'data'"""
327 327 indent_string = ' ' * indent
328 328 headsbyphase = phases.binarydecode(data)
329 329 for phase in phases.allphases:
330 330 for head in headsbyphase[phase]:
331 331 ui.write(indent_string)
332 332 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
333 333
334 334 def _quasirepr(thing):
335 335 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
336 336 return '{%s}' % (
337 337 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)))
338 338 return pycompat.bytestr(repr(thing))
339 339
340 340 def _debugbundle2(ui, gen, all=None, **opts):
341 341 """lists the contents of a bundle2"""
342 342 if not isinstance(gen, bundle2.unbundle20):
343 343 raise error.Abort(_('not a bundle2 file'))
344 344 ui.write(('Stream params: %s\n' % _quasirepr(gen.params)))
345 345 parttypes = opts.get(r'part_type', [])
346 346 for part in gen.iterparts():
347 347 if parttypes and part.type not in parttypes:
348 348 continue
349 349 msg = '%s -- %s (mandatory: %r)\n'
350 350 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
351 351 if part.type == 'changegroup':
352 352 version = part.params.get('version', '01')
353 353 cg = changegroup.getunbundler(version, part, 'UN')
354 354 if not ui.quiet:
355 355 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
356 356 if part.type == 'obsmarkers':
357 357 if not ui.quiet:
358 358 _debugobsmarkers(ui, part, indent=4, **opts)
359 359 if part.type == 'phase-heads':
360 360 if not ui.quiet:
361 361 _debugphaseheads(ui, part, indent=4)
362 362
363 363 @command('debugbundle',
364 364 [('a', 'all', None, _('show all details')),
365 365 ('', 'part-type', [], _('show only the named part type')),
366 366 ('', 'spec', None, _('print the bundlespec of the bundle'))],
367 367 _('FILE'),
368 368 norepo=True)
369 369 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
370 370 """lists the contents of a bundle"""
371 371 with hg.openpath(ui, bundlepath) as f:
372 372 if spec:
373 373 spec = exchange.getbundlespec(ui, f)
374 374 ui.write('%s\n' % spec)
375 375 return
376 376
377 377 gen = exchange.readbundle(ui, f, bundlepath)
378 378 if isinstance(gen, bundle2.unbundle20):
379 379 return _debugbundle2(ui, gen, all=all, **opts)
380 380 _debugchangegroup(ui, gen, all=all, **opts)
381 381
382 382 @command('debugcapabilities',
383 383 [], _('PATH'),
384 384 norepo=True)
385 385 def debugcapabilities(ui, path, **opts):
386 386 """lists the capabilities of a remote peer"""
387 387 opts = pycompat.byteskwargs(opts)
388 388 peer = hg.peer(ui, opts, path)
389 389 caps = peer.capabilities()
390 390 ui.write(('Main capabilities:\n'))
391 391 for c in sorted(caps):
392 392 ui.write((' %s\n') % c)
393 393 b2caps = bundle2.bundle2caps(peer)
394 394 if b2caps:
395 395 ui.write(('Bundle2 capabilities:\n'))
396 396 for key, values in sorted(b2caps.iteritems()):
397 397 ui.write((' %s\n') % key)
398 398 for v in values:
399 399 ui.write((' %s\n') % v)
400 400
401 401 @command('debugcheckstate', [], '')
402 402 def debugcheckstate(ui, repo):
403 403 """validate the correctness of the current dirstate"""
404 404 parent1, parent2 = repo.dirstate.parents()
405 405 m1 = repo[parent1].manifest()
406 406 m2 = repo[parent2].manifest()
407 407 errors = 0
408 408 for f in repo.dirstate:
409 409 state = repo.dirstate[f]
410 410 if state in "nr" and f not in m1:
411 411 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
412 412 errors += 1
413 413 if state in "a" and f in m1:
414 414 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
415 415 errors += 1
416 416 if state in "m" and f not in m1 and f not in m2:
417 417 ui.warn(_("%s in state %s, but not in either manifest\n") %
418 418 (f, state))
419 419 errors += 1
420 420 for f in m1:
421 421 state = repo.dirstate[f]
422 422 if state not in "nrm":
423 423 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
424 424 errors += 1
425 425 if errors:
426 426 error = _(".hg/dirstate inconsistent with current parent's manifest")
427 427 raise error.Abort(error)
428 428
429 429 @command('debugcolor',
430 430 [('', 'style', None, _('show all configured styles'))],
431 431 'hg debugcolor')
432 432 def debugcolor(ui, repo, **opts):
433 433 """show available color, effects or style"""
434 434 ui.write(('color mode: %s\n') % stringutil.pprint(ui._colormode))
435 435 if opts.get(r'style'):
436 436 return _debugdisplaystyle(ui)
437 437 else:
438 438 return _debugdisplaycolor(ui)
439 439
440 440 def _debugdisplaycolor(ui):
441 441 ui = ui.copy()
442 442 ui._styles.clear()
443 443 for effect in color._activeeffects(ui).keys():
444 444 ui._styles[effect] = effect
445 445 if ui._terminfoparams:
446 446 for k, v in ui.configitems('color'):
447 447 if k.startswith('color.'):
448 448 ui._styles[k] = k[6:]
449 449 elif k.startswith('terminfo.'):
450 450 ui._styles[k] = k[9:]
451 451 ui.write(_('available colors:\n'))
452 452 # sort label with a '_' after the other to group '_background' entry.
453 453 items = sorted(ui._styles.items(),
454 454 key=lambda i: ('_' in i[0], i[0], i[1]))
455 455 for colorname, label in items:
456 456 ui.write(('%s\n') % colorname, label=label)
457 457
458 458 def _debugdisplaystyle(ui):
459 459 ui.write(_('available style:\n'))
460 460 if not ui._styles:
461 461 return
462 462 width = max(len(s) for s in ui._styles)
463 463 for label, effects in sorted(ui._styles.items()):
464 464 ui.write('%s' % label, label=label)
465 465 if effects:
466 466 # 50
467 467 ui.write(': ')
468 468 ui.write(' ' * (max(0, width - len(label))))
469 469 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
470 470 ui.write('\n')
471 471
472 472 @command('debugcreatestreamclonebundle', [], 'FILE')
473 473 def debugcreatestreamclonebundle(ui, repo, fname):
474 474 """create a stream clone bundle file
475 475
476 476 Stream bundles are special bundles that are essentially archives of
477 477 revlog files. They are commonly used for cloning very quickly.
478 478 """
479 479 # TODO we may want to turn this into an abort when this functionality
480 480 # is moved into `hg bundle`.
481 481 if phases.hassecret(repo):
482 482 ui.warn(_('(warning: stream clone bundle will contain secret '
483 483 'revisions)\n'))
484 484
485 485 requirements, gen = streamclone.generatebundlev1(repo)
486 486 changegroup.writechunks(ui, gen, fname)
487 487
488 488 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
489 489
490 490 @command('debugdag',
491 491 [('t', 'tags', None, _('use tags as labels')),
492 492 ('b', 'branches', None, _('annotate with branch names')),
493 493 ('', 'dots', None, _('use dots for runs')),
494 494 ('s', 'spaces', None, _('separate elements by spaces'))],
495 495 _('[OPTION]... [FILE [REV]...]'),
496 496 optionalrepo=True)
497 497 def debugdag(ui, repo, file_=None, *revs, **opts):
498 498 """format the changelog or an index DAG as a concise textual description
499 499
500 500 If you pass a revlog index, the revlog's DAG is emitted. If you list
501 501 revision numbers, they get labeled in the output as rN.
502 502
503 503 Otherwise, the changelog DAG of the current repo is emitted.
504 504 """
505 505 spaces = opts.get(r'spaces')
506 506 dots = opts.get(r'dots')
507 507 if file_:
508 508 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
509 509 file_)
510 510 revs = set((int(r) for r in revs))
511 511 def events():
512 512 for r in rlog:
513 513 yield 'n', (r, list(p for p in rlog.parentrevs(r)
514 514 if p != -1))
515 515 if r in revs:
516 516 yield 'l', (r, "r%i" % r)
517 517 elif repo:
518 518 cl = repo.changelog
519 519 tags = opts.get(r'tags')
520 520 branches = opts.get(r'branches')
521 521 if tags:
522 522 labels = {}
523 523 for l, n in repo.tags().items():
524 524 labels.setdefault(cl.rev(n), []).append(l)
525 525 def events():
526 526 b = "default"
527 527 for r in cl:
528 528 if branches:
529 529 newb = cl.read(cl.node(r))[5]['branch']
530 530 if newb != b:
531 531 yield 'a', newb
532 532 b = newb
533 533 yield 'n', (r, list(p for p in cl.parentrevs(r)
534 534 if p != -1))
535 535 if tags:
536 536 ls = labels.get(r)
537 537 if ls:
538 538 for l in ls:
539 539 yield 'l', (r, l)
540 540 else:
541 541 raise error.Abort(_('need repo for changelog dag'))
542 542
543 543 for line in dagparser.dagtextlines(events(),
544 544 addspaces=spaces,
545 545 wraplabels=True,
546 546 wrapannotations=True,
547 547 wrapnonlinear=dots,
548 548 usedots=dots,
549 549 maxlinewidth=70):
550 550 ui.write(line)
551 551 ui.write("\n")
552 552
553 553 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
554 554 def debugdata(ui, repo, file_, rev=None, **opts):
555 555 """dump the contents of a data file revision"""
556 556 opts = pycompat.byteskwargs(opts)
557 557 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
558 558 if rev is not None:
559 559 raise error.CommandError('debugdata', _('invalid arguments'))
560 560 file_, rev = None, file_
561 561 elif rev is None:
562 562 raise error.CommandError('debugdata', _('invalid arguments'))
563 563 r = cmdutil.openstorage(repo, 'debugdata', file_, opts)
564 564 try:
565 565 ui.write(r.revision(r.lookup(rev), raw=True))
566 566 except KeyError:
567 567 raise error.Abort(_('invalid revision identifier %s') % rev)
568 568
569 569 @command('debugdate',
570 570 [('e', 'extended', None, _('try extended date formats'))],
571 571 _('[-e] DATE [RANGE]'),
572 572 norepo=True, optionalrepo=True)
573 573 def debugdate(ui, date, range=None, **opts):
574 574 """parse and display a date"""
575 575 if opts[r"extended"]:
576 576 d = dateutil.parsedate(date, util.extendeddateformats)
577 577 else:
578 578 d = dateutil.parsedate(date)
579 579 ui.write(("internal: %d %d\n") % d)
580 580 ui.write(("standard: %s\n") % dateutil.datestr(d))
581 581 if range:
582 582 m = dateutil.matchdate(range)
583 583 ui.write(("match: %s\n") % m(d[0]))
584 584
585 585 @command('debugdeltachain',
586 586 cmdutil.debugrevlogopts + cmdutil.formatteropts,
587 587 _('-c|-m|FILE'),
588 588 optionalrepo=True)
589 589 def debugdeltachain(ui, repo, file_=None, **opts):
590 590 """dump information about delta chains in a revlog
591 591
592 592 Output can be templatized. Available template keywords are:
593 593
594 594 :``rev``: revision number
595 595 :``chainid``: delta chain identifier (numbered by unique base)
596 596 :``chainlen``: delta chain length to this revision
597 597 :``prevrev``: previous revision in delta chain
598 598 :``deltatype``: role of delta / how it was computed
599 599 :``compsize``: compressed size of revision
600 600 :``uncompsize``: uncompressed size of revision
601 601 :``chainsize``: total size of compressed revisions in chain
602 602 :``chainratio``: total chain size divided by uncompressed revision size
603 603 (new delta chains typically start at ratio 2.00)
604 604 :``lindist``: linear distance from base revision in delta chain to end
605 605 of this revision
606 606 :``extradist``: total size of revisions not part of this delta chain from
607 607 base of delta chain to end of this revision; a measurement
608 608 of how much extra data we need to read/seek across to read
609 609 the delta chain for this revision
610 610 :``extraratio``: extradist divided by chainsize; another representation of
611 611 how much unrelated data is needed to load this delta chain
612 612
613 613 If the repository is configured to use the sparse read, additional keywords
614 614 are available:
615 615
616 616 :``readsize``: total size of data read from the disk for a revision
617 617 (sum of the sizes of all the blocks)
618 618 :``largestblock``: size of the largest block of data read from the disk
619 619 :``readdensity``: density of useful bytes in the data read from the disk
620 620 :``srchunks``: in how many data hunks the whole revision would be read
621 621
622 622 The sparse read can be enabled with experimental.sparse-read = True
623 623 """
624 624 opts = pycompat.byteskwargs(opts)
625 625 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
626 626 index = r.index
627 627 start = r.start
628 628 length = r.length
629 629 generaldelta = r.version & revlog.FLAG_GENERALDELTA
630 630 withsparseread = getattr(r, '_withsparseread', False)
631 631
632 632 def revinfo(rev):
633 633 e = index[rev]
634 634 compsize = e[1]
635 635 uncompsize = e[2]
636 636 chainsize = 0
637 637
638 638 if generaldelta:
639 639 if e[3] == e[5]:
640 640 deltatype = 'p1'
641 641 elif e[3] == e[6]:
642 642 deltatype = 'p2'
643 643 elif e[3] == rev - 1:
644 644 deltatype = 'prev'
645 645 elif e[3] == rev:
646 646 deltatype = 'base'
647 647 else:
648 648 deltatype = 'other'
649 649 else:
650 650 if e[3] == rev:
651 651 deltatype = 'base'
652 652 else:
653 653 deltatype = 'prev'
654 654
655 655 chain = r._deltachain(rev)[0]
656 656 for iterrev in chain:
657 657 e = index[iterrev]
658 658 chainsize += e[1]
659 659
660 660 return compsize, uncompsize, deltatype, chain, chainsize
661 661
662 662 fm = ui.formatter('debugdeltachain', opts)
663 663
664 664 fm.plain(' rev chain# chainlen prev delta '
665 665 'size rawsize chainsize ratio lindist extradist '
666 666 'extraratio')
667 667 if withsparseread:
668 668 fm.plain(' readsize largestblk rddensity srchunks')
669 669 fm.plain('\n')
670 670
671 671 chainbases = {}
672 672 for rev in r:
673 673 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
674 674 chainbase = chain[0]
675 675 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
676 676 basestart = start(chainbase)
677 677 revstart = start(rev)
678 678 lineardist = revstart + comp - basestart
679 679 extradist = lineardist - chainsize
680 680 try:
681 681 prevrev = chain[-2]
682 682 except IndexError:
683 683 prevrev = -1
684 684
685 685 if uncomp != 0:
686 686 chainratio = float(chainsize) / float(uncomp)
687 687 else:
688 688 chainratio = chainsize
689 689
690 690 if chainsize != 0:
691 691 extraratio = float(extradist) / float(chainsize)
692 692 else:
693 693 extraratio = extradist
694 694
695 695 fm.startitem()
696 696 fm.write('rev chainid chainlen prevrev deltatype compsize '
697 697 'uncompsize chainsize chainratio lindist extradist '
698 698 'extraratio',
699 699 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
700 700 rev, chainid, len(chain), prevrev, deltatype, comp,
701 701 uncomp, chainsize, chainratio, lineardist, extradist,
702 702 extraratio,
703 703 rev=rev, chainid=chainid, chainlen=len(chain),
704 704 prevrev=prevrev, deltatype=deltatype, compsize=comp,
705 705 uncompsize=uncomp, chainsize=chainsize,
706 706 chainratio=chainratio, lindist=lineardist,
707 707 extradist=extradist, extraratio=extraratio)
708 708 if withsparseread:
709 709 readsize = 0
710 710 largestblock = 0
711 711 srchunks = 0
712 712
713 713 for revschunk in deltautil.slicechunk(r, chain):
714 714 srchunks += 1
715 715 blkend = start(revschunk[-1]) + length(revschunk[-1])
716 716 blksize = blkend - start(revschunk[0])
717 717
718 718 readsize += blksize
719 719 if largestblock < blksize:
720 720 largestblock = blksize
721 721
722 722 if readsize:
723 723 readdensity = float(chainsize) / float(readsize)
724 724 else:
725 725 readdensity = 1
726 726
727 727 fm.write('readsize largestblock readdensity srchunks',
728 728 ' %10d %10d %9.5f %8d',
729 729 readsize, largestblock, readdensity, srchunks,
730 730 readsize=readsize, largestblock=largestblock,
731 731 readdensity=readdensity, srchunks=srchunks)
732 732
733 733 fm.plain('\n')
734 734
735 735 fm.end()
736 736
737 737 @command('debugdirstate|debugstate',
738 738 [('', 'nodates', None, _('do not display the saved mtime (DEPRECATED)')),
739 739 ('', 'dates', True, _('display the saved mtime')),
740 740 ('', 'datesort', None, _('sort by saved mtime'))],
741 741 _('[OPTION]...'))
742 742 def debugstate(ui, repo, **opts):
743 743 """show the contents of the current dirstate"""
744 744
745 745 nodates = not opts[r'dates']
746 746 if opts.get(r'nodates') is not None:
747 747 nodates = True
748 748 datesort = opts.get(r'datesort')
749 749
750 750 if datesort:
751 751 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
752 752 else:
753 753 keyfunc = None # sort by filename
754 754 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
755 755 if ent[3] == -1:
756 756 timestr = 'unset '
757 757 elif nodates:
758 758 timestr = 'set '
759 759 else:
760 760 timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
761 761 time.localtime(ent[3]))
762 762 timestr = encoding.strtolocal(timestr)
763 763 if ent[1] & 0o20000:
764 764 mode = 'lnk'
765 765 else:
766 766 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
767 767 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
768 768 for f in repo.dirstate.copies():
769 769 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
770 770
771 771 @command('debugdiscovery',
772 772 [('', 'old', None, _('use old-style discovery')),
773 773 ('', 'nonheads', None,
774 774 _('use old-style discovery with non-heads included')),
775 775 ('', 'rev', [], 'restrict discovery to this set of revs'),
776 776 ('', 'seed', '12323', 'specify the random seed use for discovery'),
777 777 ] + cmdutil.remoteopts,
778 778 _('[--rev REV] [OTHER]'))
779 779 def debugdiscovery(ui, repo, remoteurl="default", **opts):
780 780 """runs the changeset discovery protocol in isolation"""
781 781 opts = pycompat.byteskwargs(opts)
782 782 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
783 783 remote = hg.peer(repo, opts, remoteurl)
784 784 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
785 785
786 786 # make sure tests are repeatable
787 787 random.seed(int(opts['seed']))
788 788
789 789
790 790
791 791 if opts.get('old'):
792 792 def doit(pushedrevs, remoteheads, remote=remote):
793 793 if not util.safehasattr(remote, 'branches'):
794 794 # enable in-client legacy support
795 795 remote = localrepo.locallegacypeer(remote.local())
796 796 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
797 797 force=True)
798 798 common = set(common)
799 799 if not opts.get('nonheads'):
800 800 ui.write(("unpruned common: %s\n") %
801 801 " ".join(sorted(short(n) for n in common)))
802 802
803 803 clnode = repo.changelog.node
804 804 common = repo.revs('heads(::%ln)', common)
805 805 common = {clnode(r) for r in common}
806 806 return common, hds
807 807 else:
808 808 def doit(pushedrevs, remoteheads, remote=remote):
809 809 nodes = None
810 810 if pushedrevs:
811 811 revs = scmutil.revrange(repo, pushedrevs)
812 812 nodes = [repo[r].node() for r in revs]
813 813 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
814 814 ancestorsof=nodes)
815 815 return common, hds
816 816
817 817 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
818 818 localrevs = opts['rev']
819 819 with util.timedcm('debug-discovery') as t:
820 820 common, hds = doit(localrevs, remoterevs)
821 821
822 822 # compute all statistics
823 823 common = set(common)
824 824 rheads = set(hds)
825 825 lheads = set(repo.heads())
826 826
827 827 data = {}
828 828 data['elapsed'] = t.elapsed
829 829 data['nb-common'] = len(common)
830 830 data['nb-common-local'] = len(common & lheads)
831 831 data['nb-common-remote'] = len(common & rheads)
832 832 data['nb-common-both'] = len(common & rheads & lheads)
833 833 data['nb-local'] = len(lheads)
834 834 data['nb-local-missing'] = data['nb-local'] - data['nb-common-local']
835 835 data['nb-remote'] = len(rheads)
836 836 data['nb-remote-unknown'] = data['nb-remote'] - data['nb-common-remote']
837 837 data['nb-revs'] = len(repo.revs('all()'))
838 838 data['nb-revs-common'] = len(repo.revs('::%ln', common))
839 839 data['nb-revs-missing'] = data['nb-revs'] - data['nb-revs-common']
840 840
841 841 # display discovery summary
842 842 ui.write(("elapsed time: %(elapsed)f seconds\n") % data)
843 843 ui.write(("heads summary:\n"))
844 844 ui.write((" total common heads: %(nb-common)9d\n") % data)
845 845 ui.write((" also local heads: %(nb-common-local)9d\n") % data)
846 846 ui.write((" also remote heads: %(nb-common-remote)9d\n") % data)
847 847 ui.write((" both: %(nb-common-both)9d\n") % data)
848 848 ui.write((" local heads: %(nb-local)9d\n") % data)
849 849 ui.write((" common: %(nb-common-local)9d\n") % data)
850 850 ui.write((" missing: %(nb-local-missing)9d\n") % data)
851 851 ui.write((" remote heads: %(nb-remote)9d\n") % data)
852 852 ui.write((" common: %(nb-common-remote)9d\n") % data)
853 853 ui.write((" unknown: %(nb-remote-unknown)9d\n") % data)
854 854 ui.write(("local changesets: %(nb-revs)9d\n") % data)
855 855 ui.write((" common: %(nb-revs-common)9d\n") % data)
856 856 ui.write((" missing: %(nb-revs-missing)9d\n") % data)
857 857
858 858 if ui.verbose:
859 859 ui.write(("common heads: %s\n") %
860 860 " ".join(sorted(short(n) for n in common)))
861 861
862 862 _chunksize = 4 << 10
863 863
864 864 @command('debugdownload',
865 865 [
866 866 ('o', 'output', '', _('path')),
867 867 ],
868 868 optionalrepo=True)
869 869 def debugdownload(ui, repo, url, output=None, **opts):
870 870 """download a resource using Mercurial logic and config
871 871 """
872 872 fh = urlmod.open(ui, url, output)
873 873
874 874 dest = ui
875 875 if output:
876 876 dest = open(output, "wb", _chunksize)
877 877 try:
878 878 data = fh.read(_chunksize)
879 879 while data:
880 880 dest.write(data)
881 881 data = fh.read(_chunksize)
882 882 finally:
883 883 if output:
884 884 dest.close()
885 885
886 886 @command('debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
887 887 def debugextensions(ui, repo, **opts):
888 888 '''show information about active extensions'''
889 889 opts = pycompat.byteskwargs(opts)
890 890 exts = extensions.extensions(ui)
891 891 hgver = util.version()
892 892 fm = ui.formatter('debugextensions', opts)
893 893 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
894 894 isinternal = extensions.ismoduleinternal(extmod)
895 895 extsource = pycompat.fsencode(extmod.__file__)
896 896 if isinternal:
897 897 exttestedwith = [] # never expose magic string to users
898 898 else:
899 899 exttestedwith = getattr(extmod, 'testedwith', '').split()
900 900 extbuglink = getattr(extmod, 'buglink', None)
901 901
902 902 fm.startitem()
903 903
904 904 if ui.quiet or ui.verbose:
905 905 fm.write('name', '%s\n', extname)
906 906 else:
907 907 fm.write('name', '%s', extname)
908 908 if isinternal or hgver in exttestedwith:
909 909 fm.plain('\n')
910 910 elif not exttestedwith:
911 911 fm.plain(_(' (untested!)\n'))
912 912 else:
913 913 lasttestedversion = exttestedwith[-1]
914 914 fm.plain(' (%s!)\n' % lasttestedversion)
915 915
916 916 fm.condwrite(ui.verbose and extsource, 'source',
917 917 _(' location: %s\n'), extsource or "")
918 918
919 919 if ui.verbose:
920 920 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
921 921 fm.data(bundled=isinternal)
922 922
923 923 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
924 924 _(' tested with: %s\n'),
925 925 fm.formatlist(exttestedwith, name='ver'))
926 926
927 927 fm.condwrite(ui.verbose and extbuglink, 'buglink',
928 928 _(' bug reporting: %s\n'), extbuglink or "")
929 929
930 930 fm.end()
931 931
932 932 @command('debugfileset',
933 933 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')),
934 934 ('', 'all-files', False,
935 935 _('test files from all revisions and working directory')),
936 936 ('s', 'show-matcher', None,
937 937 _('print internal representation of matcher')),
938 938 ('p', 'show-stage', [],
939 939 _('print parsed tree at the given stage'), _('NAME'))],
940 940 _('[-r REV] [--all-files] [OPTION]... FILESPEC'))
941 941 def debugfileset(ui, repo, expr, **opts):
942 942 '''parse and apply a fileset specification'''
943 943 from . import fileset
944 944 fileset.symbols # force import of fileset so we have predicates to optimize
945 945 opts = pycompat.byteskwargs(opts)
946 946 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
947 947
948 948 stages = [
949 949 ('parsed', pycompat.identity),
950 950 ('analyzed', filesetlang.analyze),
951 951 ('optimized', filesetlang.optimize),
952 952 ]
953 953 stagenames = set(n for n, f in stages)
954 954
955 955 showalways = set()
956 956 if ui.verbose and not opts['show_stage']:
957 957 # show parsed tree by --verbose (deprecated)
958 958 showalways.add('parsed')
959 959 if opts['show_stage'] == ['all']:
960 960 showalways.update(stagenames)
961 961 else:
962 962 for n in opts['show_stage']:
963 963 if n not in stagenames:
964 964 raise error.Abort(_('invalid stage name: %s') % n)
965 965 showalways.update(opts['show_stage'])
966 966
967 967 tree = filesetlang.parse(expr)
968 968 for n, f in stages:
969 969 tree = f(tree)
970 970 if n in showalways:
971 971 if opts['show_stage'] or n != 'parsed':
972 972 ui.write(("* %s:\n") % n)
973 973 ui.write(filesetlang.prettyformat(tree), "\n")
974 974
975 975 files = set()
976 976 if opts['all_files']:
977 977 for r in repo:
978 978 c = repo[r]
979 979 files.update(c.files())
980 980 files.update(c.substate)
981 981 if opts['all_files'] or ctx.rev() is None:
982 982 wctx = repo[None]
983 983 files.update(repo.dirstate.walk(scmutil.matchall(repo),
984 984 subrepos=list(wctx.substate),
985 985 unknown=True, ignored=True))
986 986 files.update(wctx.substate)
987 987 else:
988 988 files.update(ctx.files())
989 989 files.update(ctx.substate)
990 990
991 991 m = ctx.matchfileset(expr)
992 992 if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
993 993 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
994 994 for f in sorted(files):
995 995 if not m(f):
996 996 continue
997 997 ui.write("%s\n" % f)
998 998
999 999 @command('debugformat',
1000 1000 [] + cmdutil.formatteropts)
1001 1001 def debugformat(ui, repo, **opts):
1002 1002 """display format information about the current repository
1003 1003
1004 1004 Use --verbose to get extra information about current config value and
1005 1005 Mercurial default."""
1006 1006 opts = pycompat.byteskwargs(opts)
1007 1007 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1008 1008 maxvariantlength = max(len('format-variant'), maxvariantlength)
1009 1009
1010 1010 def makeformatname(name):
1011 1011 return '%s:' + (' ' * (maxvariantlength - len(name)))
1012 1012
1013 1013 fm = ui.formatter('debugformat', opts)
1014 1014 if fm.isplain():
1015 1015 def formatvalue(value):
1016 1016 if util.safehasattr(value, 'startswith'):
1017 1017 return value
1018 1018 if value:
1019 1019 return 'yes'
1020 1020 else:
1021 1021 return 'no'
1022 1022 else:
1023 1023 formatvalue = pycompat.identity
1024 1024
1025 1025 fm.plain('format-variant')
1026 1026 fm.plain(' ' * (maxvariantlength - len('format-variant')))
1027 1027 fm.plain(' repo')
1028 1028 if ui.verbose:
1029 1029 fm.plain(' config default')
1030 1030 fm.plain('\n')
1031 1031 for fv in upgrade.allformatvariant:
1032 1032 fm.startitem()
1033 1033 repovalue = fv.fromrepo(repo)
1034 1034 configvalue = fv.fromconfig(repo)
1035 1035
1036 1036 if repovalue != configvalue:
1037 1037 namelabel = 'formatvariant.name.mismatchconfig'
1038 1038 repolabel = 'formatvariant.repo.mismatchconfig'
1039 1039 elif repovalue != fv.default:
1040 1040 namelabel = 'formatvariant.name.mismatchdefault'
1041 1041 repolabel = 'formatvariant.repo.mismatchdefault'
1042 1042 else:
1043 1043 namelabel = 'formatvariant.name.uptodate'
1044 1044 repolabel = 'formatvariant.repo.uptodate'
1045 1045
1046 1046 fm.write('name', makeformatname(fv.name), fv.name,
1047 1047 label=namelabel)
1048 1048 fm.write('repo', ' %3s', formatvalue(repovalue),
1049 1049 label=repolabel)
1050 1050 if fv.default != configvalue:
1051 1051 configlabel = 'formatvariant.config.special'
1052 1052 else:
1053 1053 configlabel = 'formatvariant.config.default'
1054 1054 fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
1055 1055 label=configlabel)
1056 1056 fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
1057 1057 label='formatvariant.default')
1058 1058 fm.plain('\n')
1059 1059 fm.end()
1060 1060
1061 1061 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
1062 1062 def debugfsinfo(ui, path="."):
1063 1063 """show information detected about current filesystem"""
1064 1064 ui.write(('path: %s\n') % path)
1065 1065 ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
1066 1066 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
1067 1067 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
1068 1068 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
1069 1069 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
1070 1070 casesensitive = '(unknown)'
1071 1071 try:
1072 1072 with pycompat.namedtempfile(prefix='.debugfsinfo', dir=path) as f:
1073 1073 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
1074 1074 except OSError:
1075 1075 pass
1076 1076 ui.write(('case-sensitive: %s\n') % casesensitive)
1077 1077
1078 1078 @command('debuggetbundle',
1079 1079 [('H', 'head', [], _('id of head node'), _('ID')),
1080 1080 ('C', 'common', [], _('id of common node'), _('ID')),
1081 1081 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
1082 1082 _('REPO FILE [-H|-C ID]...'),
1083 1083 norepo=True)
1084 1084 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1085 1085 """retrieves a bundle from a repo
1086 1086
1087 1087 Every ID must be a full-length hex node id string. Saves the bundle to the
1088 1088 given file.
1089 1089 """
1090 1090 opts = pycompat.byteskwargs(opts)
1091 1091 repo = hg.peer(ui, opts, repopath)
1092 1092 if not repo.capable('getbundle'):
1093 1093 raise error.Abort("getbundle() not supported by target repository")
1094 1094 args = {}
1095 1095 if common:
1096 1096 args[r'common'] = [bin(s) for s in common]
1097 1097 if head:
1098 1098 args[r'heads'] = [bin(s) for s in head]
1099 1099 # TODO: get desired bundlecaps from command line.
1100 1100 args[r'bundlecaps'] = None
1101 1101 bundle = repo.getbundle('debug', **args)
1102 1102
1103 1103 bundletype = opts.get('type', 'bzip2').lower()
1104 1104 btypes = {'none': 'HG10UN',
1105 1105 'bzip2': 'HG10BZ',
1106 1106 'gzip': 'HG10GZ',
1107 1107 'bundle2': 'HG20'}
1108 1108 bundletype = btypes.get(bundletype)
1109 1109 if bundletype not in bundle2.bundletypes:
1110 1110 raise error.Abort(_('unknown bundle type specified with --type'))
1111 1111 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1112 1112
1113 1113 @command('debugignore', [], '[FILE]')
1114 1114 def debugignore(ui, repo, *files, **opts):
1115 1115 """display the combined ignore pattern and information about ignored files
1116 1116
1117 1117 With no argument display the combined ignore pattern.
1118 1118
1119 1119 Given space separated file names, shows if the given file is ignored and
1120 1120 if so, show the ignore rule (file and line number) that matched it.
1121 1121 """
1122 1122 ignore = repo.dirstate._ignore
1123 1123 if not files:
1124 1124 # Show all the patterns
1125 1125 ui.write("%s\n" % pycompat.byterepr(ignore))
1126 1126 else:
1127 1127 m = scmutil.match(repo[None], pats=files)
1128 1128 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1129 1129 for f in m.files():
1130 1130 nf = util.normpath(f)
1131 1131 ignored = None
1132 1132 ignoredata = None
1133 1133 if nf != '.':
1134 1134 if ignore(nf):
1135 1135 ignored = nf
1136 1136 ignoredata = repo.dirstate._ignorefileandline(nf)
1137 1137 else:
1138 1138 for p in util.finddirs(nf):
1139 1139 if ignore(p):
1140 1140 ignored = p
1141 1141 ignoredata = repo.dirstate._ignorefileandline(p)
1142 1142 break
1143 1143 if ignored:
1144 1144 if ignored == nf:
1145 1145 ui.write(_("%s is ignored\n") % uipathfn(f))
1146 1146 else:
1147 1147 ui.write(_("%s is ignored because of "
1148 1148 "containing directory %s\n")
1149 1149 % (uipathfn(f), ignored))
1150 1150 ignorefile, lineno, line = ignoredata
1151 1151 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
1152 1152 % (ignorefile, lineno, line))
1153 1153 else:
1154 1154 ui.write(_("%s is not ignored\n") % uipathfn(f))
1155 1155
1156 1156 @command('debugindex', cmdutil.debugrevlogopts + cmdutil.formatteropts,
1157 1157 _('-c|-m|FILE'))
1158 1158 def debugindex(ui, repo, file_=None, **opts):
1159 1159 """dump index data for a storage primitive"""
1160 1160 opts = pycompat.byteskwargs(opts)
1161 1161 store = cmdutil.openstorage(repo, 'debugindex', file_, opts)
1162 1162
1163 1163 if ui.debugflag:
1164 1164 shortfn = hex
1165 1165 else:
1166 1166 shortfn = short
1167 1167
1168 1168 idlen = 12
1169 1169 for i in store:
1170 1170 idlen = len(shortfn(store.node(i)))
1171 1171 break
1172 1172
1173 1173 fm = ui.formatter('debugindex', opts)
1174 1174 fm.plain(b' rev linkrev %s %s p2\n' % (
1175 1175 b'nodeid'.ljust(idlen),
1176 1176 b'p1'.ljust(idlen)))
1177 1177
1178 1178 for rev in store:
1179 1179 node = store.node(rev)
1180 1180 parents = store.parents(node)
1181 1181
1182 1182 fm.startitem()
1183 1183 fm.write(b'rev', b'%6d ', rev)
1184 1184 fm.write(b'linkrev', '%7d ', store.linkrev(rev))
1185 1185 fm.write(b'node', '%s ', shortfn(node))
1186 1186 fm.write(b'p1', '%s ', shortfn(parents[0]))
1187 1187 fm.write(b'p2', '%s', shortfn(parents[1]))
1188 1188 fm.plain(b'\n')
1189 1189
1190 1190 fm.end()
1191 1191
1192 1192 @command('debugindexdot', cmdutil.debugrevlogopts,
1193 1193 _('-c|-m|FILE'), optionalrepo=True)
1194 1194 def debugindexdot(ui, repo, file_=None, **opts):
1195 1195 """dump an index DAG as a graphviz dot file"""
1196 1196 opts = pycompat.byteskwargs(opts)
1197 1197 r = cmdutil.openstorage(repo, 'debugindexdot', file_, opts)
1198 1198 ui.write(("digraph G {\n"))
1199 1199 for i in r:
1200 1200 node = r.node(i)
1201 1201 pp = r.parents(node)
1202 1202 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1203 1203 if pp[1] != nullid:
1204 1204 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1205 1205 ui.write("}\n")
1206 1206
1207 1207 @command('debugindexstats', [])
1208 1208 def debugindexstats(ui, repo):
1209 1209 """show stats related to the changelog index"""
1210 1210 repo.changelog.shortest(nullid, 1)
1211 1211 index = repo.changelog.index
1212 1212 if not util.safehasattr(index, 'stats'):
1213 1213 raise error.Abort(_('debugindexstats only works with native code'))
1214 1214 for k, v in sorted(index.stats().items()):
1215 1215 ui.write('%s: %d\n' % (k, v))
1216 1216
1217 1217 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
1218 1218 def debuginstall(ui, **opts):
1219 1219 '''test Mercurial installation
1220 1220
1221 1221 Returns 0 on success.
1222 1222 '''
1223 1223 opts = pycompat.byteskwargs(opts)
1224 1224
1225 1225 problems = 0
1226 1226
1227 1227 fm = ui.formatter('debuginstall', opts)
1228 1228 fm.startitem()
1229 1229
1230 1230 # encoding
1231 1231 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
1232 1232 err = None
1233 1233 try:
1234 1234 codecs.lookup(pycompat.sysstr(encoding.encoding))
1235 1235 except LookupError as inst:
1236 1236 err = stringutil.forcebytestr(inst)
1237 1237 problems += 1
1238 1238 fm.condwrite(err, 'encodingerror', _(" %s\n"
1239 1239 " (check that your locale is properly set)\n"), err)
1240 1240
1241 1241 # Python
1242 1242 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1243 pycompat.sysexecutable)
1243 pycompat.sysexecutable or _("unknown"))
1244 1244 fm.write('pythonver', _("checking Python version (%s)\n"),
1245 1245 ("%d.%d.%d" % sys.version_info[:3]))
1246 1246 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1247 1247 os.path.dirname(pycompat.fsencode(os.__file__)))
1248 1248
1249 1249 security = set(sslutil.supportedprotocols)
1250 1250 if sslutil.hassni:
1251 1251 security.add('sni')
1252 1252
1253 1253 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1254 1254 fm.formatlist(sorted(security), name='protocol',
1255 1255 fmt='%s', sep=','))
1256 1256
1257 1257 # These are warnings, not errors. So don't increment problem count. This
1258 1258 # may change in the future.
1259 1259 if 'tls1.2' not in security:
1260 1260 fm.plain(_(' TLS 1.2 not supported by Python install; '
1261 1261 'network connections lack modern security\n'))
1262 1262 if 'sni' not in security:
1263 1263 fm.plain(_(' SNI not supported by Python install; may have '
1264 1264 'connectivity issues with some servers\n'))
1265 1265
1266 1266 # TODO print CA cert info
1267 1267
1268 1268 # hg version
1269 1269 hgver = util.version()
1270 1270 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1271 1271 hgver.split('+')[0])
1272 1272 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1273 1273 '+'.join(hgver.split('+')[1:]))
1274 1274
1275 1275 # compiled modules
1276 1276 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1277 1277 policy.policy)
1278 1278 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1279 1279 os.path.dirname(pycompat.fsencode(__file__)))
1280 1280
1281 1281 rustandc = policy.policy in ('rust+c', 'rust+c-allow')
1282 1282 rustext = rustandc # for now, that's the only case
1283 1283 cext = policy.policy in ('c', 'allow') or rustandc
1284 1284 nopure = cext or rustext
1285 1285 if nopure:
1286 1286 err = None
1287 1287 try:
1288 1288 if cext:
1289 1289 from .cext import (
1290 1290 base85,
1291 1291 bdiff,
1292 1292 mpatch,
1293 1293 osutil,
1294 1294 )
1295 1295 # quiet pyflakes
1296 1296 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1297 1297 if rustext:
1298 1298 from .rustext import (
1299 1299 ancestor,
1300 1300 dirstate,
1301 1301 )
1302 1302 dir(ancestor), dir(dirstate) # quiet pyflakes
1303 1303 except Exception as inst:
1304 1304 err = stringutil.forcebytestr(inst)
1305 1305 problems += 1
1306 1306 fm.condwrite(err, 'extensionserror', " %s\n", err)
1307 1307
1308 1308 compengines = util.compengines._engines.values()
1309 1309 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1310 1310 fm.formatlist(sorted(e.name() for e in compengines),
1311 1311 name='compengine', fmt='%s', sep=', '))
1312 1312 fm.write('compenginesavail', _('checking available compression engines '
1313 1313 '(%s)\n'),
1314 1314 fm.formatlist(sorted(e.name() for e in compengines
1315 1315 if e.available()),
1316 1316 name='compengine', fmt='%s', sep=', '))
1317 1317 wirecompengines = compression.compengines.supportedwireengines(
1318 1318 compression.SERVERROLE)
1319 1319 fm.write('compenginesserver', _('checking available compression engines '
1320 1320 'for wire protocol (%s)\n'),
1321 1321 fm.formatlist([e.name() for e in wirecompengines
1322 1322 if e.wireprotosupport()],
1323 1323 name='compengine', fmt='%s', sep=', '))
1324 1324 re2 = 'missing'
1325 1325 if util._re2:
1326 1326 re2 = 'available'
1327 1327 fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
1328 1328 fm.data(re2=bool(util._re2))
1329 1329
1330 1330 # templates
1331 1331 p = templater.templatepaths()
1332 1332 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1333 1333 fm.condwrite(not p, '', _(" no template directories found\n"))
1334 1334 if p:
1335 1335 m = templater.templatepath("map-cmdline.default")
1336 1336 if m:
1337 1337 # template found, check if it is working
1338 1338 err = None
1339 1339 try:
1340 1340 templater.templater.frommapfile(m)
1341 1341 except Exception as inst:
1342 1342 err = stringutil.forcebytestr(inst)
1343 1343 p = None
1344 1344 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1345 1345 else:
1346 1346 p = None
1347 1347 fm.condwrite(p, 'defaulttemplate',
1348 1348 _("checking default template (%s)\n"), m)
1349 1349 fm.condwrite(not m, 'defaulttemplatenotfound',
1350 1350 _(" template '%s' not found\n"), "default")
1351 1351 if not p:
1352 1352 problems += 1
1353 1353 fm.condwrite(not p, '',
1354 1354 _(" (templates seem to have been installed incorrectly)\n"))
1355 1355
1356 1356 # editor
1357 1357 editor = ui.geteditor()
1358 1358 editor = util.expandpath(editor)
1359 1359 editorbin = procutil.shellsplit(editor)[0]
1360 1360 fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
1361 1361 cmdpath = procutil.findexe(editorbin)
1362 1362 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1363 1363 _(" No commit editor set and can't find %s in PATH\n"
1364 1364 " (specify a commit editor in your configuration"
1365 1365 " file)\n"), not cmdpath and editor == 'vi' and editorbin)
1366 1366 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1367 1367 _(" Can't find editor '%s' in PATH\n"
1368 1368 " (specify a commit editor in your configuration"
1369 1369 " file)\n"), not cmdpath and editorbin)
1370 1370 if not cmdpath and editor != 'vi':
1371 1371 problems += 1
1372 1372
1373 1373 # check username
1374 1374 username = None
1375 1375 err = None
1376 1376 try:
1377 1377 username = ui.username()
1378 1378 except error.Abort as e:
1379 1379 err = stringutil.forcebytestr(e)
1380 1380 problems += 1
1381 1381
1382 1382 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1383 1383 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1384 1384 " (specify a username in your configuration file)\n"), err)
1385 1385
1386 1386 fm.condwrite(not problems, '',
1387 1387 _("no problems detected\n"))
1388 1388 if not problems:
1389 1389 fm.data(problems=problems)
1390 1390 fm.condwrite(problems, 'problems',
1391 1391 _("%d problems detected,"
1392 1392 " please check your install!\n"), problems)
1393 1393 fm.end()
1394 1394
1395 1395 return problems
1396 1396
1397 1397 @command('debugknown', [], _('REPO ID...'), norepo=True)
1398 1398 def debugknown(ui, repopath, *ids, **opts):
1399 1399 """test whether node ids are known to a repo
1400 1400
1401 1401 Every ID must be a full-length hex node id string. Returns a list of 0s
1402 1402 and 1s indicating unknown/known.
1403 1403 """
1404 1404 opts = pycompat.byteskwargs(opts)
1405 1405 repo = hg.peer(ui, opts, repopath)
1406 1406 if not repo.capable('known'):
1407 1407 raise error.Abort("known() not supported by target repository")
1408 1408 flags = repo.known([bin(s) for s in ids])
1409 1409 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1410 1410
1411 1411 @command('debuglabelcomplete', [], _('LABEL...'))
1412 1412 def debuglabelcomplete(ui, repo, *args):
1413 1413 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1414 1414 debugnamecomplete(ui, repo, *args)
1415 1415
1416 1416 @command('debuglocks',
1417 1417 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1418 1418 ('W', 'force-wlock', None,
1419 1419 _('free the working state lock (DANGEROUS)')),
1420 1420 ('s', 'set-lock', None, _('set the store lock until stopped')),
1421 1421 ('S', 'set-wlock', None,
1422 1422 _('set the working state lock until stopped'))],
1423 1423 _('[OPTION]...'))
1424 1424 def debuglocks(ui, repo, **opts):
1425 1425 """show or modify state of locks
1426 1426
1427 1427 By default, this command will show which locks are held. This
1428 1428 includes the user and process holding the lock, the amount of time
1429 1429 the lock has been held, and the machine name where the process is
1430 1430 running if it's not local.
1431 1431
1432 1432 Locks protect the integrity of Mercurial's data, so should be
1433 1433 treated with care. System crashes or other interruptions may cause
1434 1434 locks to not be properly released, though Mercurial will usually
1435 1435 detect and remove such stale locks automatically.
1436 1436
1437 1437 However, detecting stale locks may not always be possible (for
1438 1438 instance, on a shared filesystem). Removing locks may also be
1439 1439 blocked by filesystem permissions.
1440 1440
1441 1441 Setting a lock will prevent other commands from changing the data.
1442 1442 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1443 1443 The set locks are removed when the command exits.
1444 1444
1445 1445 Returns 0 if no locks are held.
1446 1446
1447 1447 """
1448 1448
1449 1449 if opts.get(r'force_lock'):
1450 1450 repo.svfs.unlink('lock')
1451 1451 if opts.get(r'force_wlock'):
1452 1452 repo.vfs.unlink('wlock')
1453 1453 if opts.get(r'force_lock') or opts.get(r'force_wlock'):
1454 1454 return 0
1455 1455
1456 1456 locks = []
1457 1457 try:
1458 1458 if opts.get(r'set_wlock'):
1459 1459 try:
1460 1460 locks.append(repo.wlock(False))
1461 1461 except error.LockHeld:
1462 1462 raise error.Abort(_('wlock is already held'))
1463 1463 if opts.get(r'set_lock'):
1464 1464 try:
1465 1465 locks.append(repo.lock(False))
1466 1466 except error.LockHeld:
1467 1467 raise error.Abort(_('lock is already held'))
1468 1468 if len(locks):
1469 1469 ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
1470 1470 return 0
1471 1471 finally:
1472 1472 release(*locks)
1473 1473
1474 1474 now = time.time()
1475 1475 held = 0
1476 1476
1477 1477 def report(vfs, name, method):
1478 1478 # this causes stale locks to get reaped for more accurate reporting
1479 1479 try:
1480 1480 l = method(False)
1481 1481 except error.LockHeld:
1482 1482 l = None
1483 1483
1484 1484 if l:
1485 1485 l.release()
1486 1486 else:
1487 1487 try:
1488 1488 st = vfs.lstat(name)
1489 1489 age = now - st[stat.ST_MTIME]
1490 1490 user = util.username(st.st_uid)
1491 1491 locker = vfs.readlock(name)
1492 1492 if ":" in locker:
1493 1493 host, pid = locker.split(':')
1494 1494 if host == socket.gethostname():
1495 1495 locker = 'user %s, process %s' % (user or b'None', pid)
1496 1496 else:
1497 1497 locker = ('user %s, process %s, host %s'
1498 1498 % (user or b'None', pid, host))
1499 1499 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1500 1500 return 1
1501 1501 except OSError as e:
1502 1502 if e.errno != errno.ENOENT:
1503 1503 raise
1504 1504
1505 1505 ui.write(("%-6s free\n") % (name + ":"))
1506 1506 return 0
1507 1507
1508 1508 held += report(repo.svfs, "lock", repo.lock)
1509 1509 held += report(repo.vfs, "wlock", repo.wlock)
1510 1510
1511 1511 return held
1512 1512
1513 1513 @command('debugmanifestfulltextcache', [
1514 1514 ('', 'clear', False, _('clear the cache')),
1515 1515 ('a', 'add', [], _('add the given manifest nodes to the cache'),
1516 1516 _('NODE'))
1517 1517 ], '')
1518 1518 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
1519 1519 """show, clear or amend the contents of the manifest fulltext cache"""
1520 1520
1521 1521 def getcache():
1522 1522 r = repo.manifestlog.getstorage(b'')
1523 1523 try:
1524 1524 return r._fulltextcache
1525 1525 except AttributeError:
1526 1526 msg = _("Current revlog implementation doesn't appear to have a "
1527 1527 "manifest fulltext cache\n")
1528 1528 raise error.Abort(msg)
1529 1529
1530 1530 if opts.get(r'clear'):
1531 1531 with repo.wlock():
1532 1532 cache = getcache()
1533 1533 cache.clear(clear_persisted_data=True)
1534 1534 return
1535 1535
1536 1536 if add:
1537 1537 with repo.wlock():
1538 1538 m = repo.manifestlog
1539 1539 store = m.getstorage(b'')
1540 1540 for n in add:
1541 1541 try:
1542 1542 manifest = m[store.lookup(n)]
1543 1543 except error.LookupError as e:
1544 1544 raise error.Abort(e, hint="Check your manifest node id")
1545 1545 manifest.read() # stores revisision in cache too
1546 1546 return
1547 1547
1548 1548 cache = getcache()
1549 1549 if not len(cache):
1550 1550 ui.write(_('cache empty\n'))
1551 1551 else:
1552 1552 ui.write(
1553 1553 _('cache contains %d manifest entries, in order of most to '
1554 1554 'least recent:\n') % (len(cache),))
1555 1555 totalsize = 0
1556 1556 for nodeid in cache:
1557 1557 # Use cache.get to not update the LRU order
1558 1558 data = cache.peek(nodeid)
1559 1559 size = len(data)
1560 1560 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
1561 1561 ui.write(_('id: %s, size %s\n') % (
1562 1562 hex(nodeid), util.bytecount(size)))
1563 1563 ondisk = cache._opener.stat('manifestfulltextcache').st_size
1564 1564 ui.write(
1565 1565 _('total cache data size %s, on-disk %s\n') % (
1566 1566 util.bytecount(totalsize), util.bytecount(ondisk))
1567 1567 )
1568 1568
1569 1569 @command('debugmergestate', [], '')
1570 1570 def debugmergestate(ui, repo, *args):
1571 1571 """print merge state
1572 1572
1573 1573 Use --verbose to print out information about whether v1 or v2 merge state
1574 1574 was chosen."""
1575 1575 def _hashornull(h):
1576 1576 if h == nullhex:
1577 1577 return 'null'
1578 1578 else:
1579 1579 return h
1580 1580
1581 1581 def printrecords(version):
1582 1582 ui.write(('* version %d records\n') % version)
1583 1583 if version == 1:
1584 1584 records = v1records
1585 1585 else:
1586 1586 records = v2records
1587 1587
1588 1588 for rtype, record in records:
1589 1589 # pretty print some record types
1590 1590 if rtype == 'L':
1591 1591 ui.write(('local: %s\n') % record)
1592 1592 elif rtype == 'O':
1593 1593 ui.write(('other: %s\n') % record)
1594 1594 elif rtype == 'm':
1595 1595 driver, mdstate = record.split('\0', 1)
1596 1596 ui.write(('merge driver: %s (state "%s")\n')
1597 1597 % (driver, mdstate))
1598 1598 elif rtype in 'FDC':
1599 1599 r = record.split('\0')
1600 1600 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1601 1601 if version == 1:
1602 1602 onode = 'not stored in v1 format'
1603 1603 flags = r[7]
1604 1604 else:
1605 1605 onode, flags = r[7:9]
1606 1606 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1607 1607 % (f, rtype, state, _hashornull(hash)))
1608 1608 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1609 1609 ui.write((' ancestor path: %s (node %s)\n')
1610 1610 % (afile, _hashornull(anode)))
1611 1611 ui.write((' other path: %s (node %s)\n')
1612 1612 % (ofile, _hashornull(onode)))
1613 1613 elif rtype == 'f':
1614 1614 filename, rawextras = record.split('\0', 1)
1615 1615 extras = rawextras.split('\0')
1616 1616 i = 0
1617 1617 extrastrings = []
1618 1618 while i < len(extras):
1619 1619 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1620 1620 i += 2
1621 1621
1622 1622 ui.write(('file extras: %s (%s)\n')
1623 1623 % (filename, ', '.join(extrastrings)))
1624 1624 elif rtype == 'l':
1625 1625 labels = record.split('\0', 2)
1626 1626 labels = [l for l in labels if len(l) > 0]
1627 1627 ui.write(('labels:\n'))
1628 1628 ui.write((' local: %s\n' % labels[0]))
1629 1629 ui.write((' other: %s\n' % labels[1]))
1630 1630 if len(labels) > 2:
1631 1631 ui.write((' base: %s\n' % labels[2]))
1632 1632 else:
1633 1633 ui.write(('unrecognized entry: %s\t%s\n')
1634 1634 % (rtype, record.replace('\0', '\t')))
1635 1635
1636 1636 # Avoid mergestate.read() since it may raise an exception for unsupported
1637 1637 # merge state records. We shouldn't be doing this, but this is OK since this
1638 1638 # command is pretty low-level.
1639 1639 ms = mergemod.mergestate(repo)
1640 1640
1641 1641 # sort so that reasonable information is on top
1642 1642 v1records = ms._readrecordsv1()
1643 1643 v2records = ms._readrecordsv2()
1644 1644 order = 'LOml'
1645 1645 def key(r):
1646 1646 idx = order.find(r[0])
1647 1647 if idx == -1:
1648 1648 return (1, r[1])
1649 1649 else:
1650 1650 return (0, idx)
1651 1651 v1records.sort(key=key)
1652 1652 v2records.sort(key=key)
1653 1653
1654 1654 if not v1records and not v2records:
1655 1655 ui.write(('no merge state found\n'))
1656 1656 elif not v2records:
1657 1657 ui.note(('no version 2 merge state\n'))
1658 1658 printrecords(1)
1659 1659 elif ms._v1v2match(v1records, v2records):
1660 1660 ui.note(('v1 and v2 states match: using v2\n'))
1661 1661 printrecords(2)
1662 1662 else:
1663 1663 ui.note(('v1 and v2 states mismatch: using v1\n'))
1664 1664 printrecords(1)
1665 1665 if ui.verbose:
1666 1666 printrecords(2)
1667 1667
1668 1668 @command('debugnamecomplete', [], _('NAME...'))
1669 1669 def debugnamecomplete(ui, repo, *args):
1670 1670 '''complete "names" - tags, open branch names, bookmark names'''
1671 1671
1672 1672 names = set()
1673 1673 # since we previously only listed open branches, we will handle that
1674 1674 # specially (after this for loop)
1675 1675 for name, ns in repo.names.iteritems():
1676 1676 if name != 'branches':
1677 1677 names.update(ns.listnames(repo))
1678 1678 names.update(tag for (tag, heads, tip, closed)
1679 1679 in repo.branchmap().iterbranches() if not closed)
1680 1680 completions = set()
1681 1681 if not args:
1682 1682 args = ['']
1683 1683 for a in args:
1684 1684 completions.update(n for n in names if n.startswith(a))
1685 1685 ui.write('\n'.join(sorted(completions)))
1686 1686 ui.write('\n')
1687 1687
1688 1688 @command('debugobsolete',
1689 1689 [('', 'flags', 0, _('markers flag')),
1690 1690 ('', 'record-parents', False,
1691 1691 _('record parent information for the precursor')),
1692 1692 ('r', 'rev', [], _('display markers relevant to REV')),
1693 1693 ('', 'exclusive', False, _('restrict display to markers only '
1694 1694 'relevant to REV')),
1695 1695 ('', 'index', False, _('display index of the marker')),
1696 1696 ('', 'delete', [], _('delete markers specified by indices')),
1697 1697 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1698 1698 _('[OBSOLETED [REPLACEMENT ...]]'))
1699 1699 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1700 1700 """create arbitrary obsolete marker
1701 1701
1702 1702 With no arguments, displays the list of obsolescence markers."""
1703 1703
1704 1704 opts = pycompat.byteskwargs(opts)
1705 1705
1706 1706 def parsenodeid(s):
1707 1707 try:
1708 1708 # We do not use revsingle/revrange functions here to accept
1709 1709 # arbitrary node identifiers, possibly not present in the
1710 1710 # local repository.
1711 1711 n = bin(s)
1712 1712 if len(n) != len(nullid):
1713 1713 raise TypeError()
1714 1714 return n
1715 1715 except TypeError:
1716 1716 raise error.Abort('changeset references must be full hexadecimal '
1717 1717 'node identifiers')
1718 1718
1719 1719 if opts.get('delete'):
1720 1720 indices = []
1721 1721 for v in opts.get('delete'):
1722 1722 try:
1723 1723 indices.append(int(v))
1724 1724 except ValueError:
1725 1725 raise error.Abort(_('invalid index value: %r') % v,
1726 1726 hint=_('use integers for indices'))
1727 1727
1728 1728 if repo.currenttransaction():
1729 1729 raise error.Abort(_('cannot delete obsmarkers in the middle '
1730 1730 'of transaction.'))
1731 1731
1732 1732 with repo.lock():
1733 1733 n = repair.deleteobsmarkers(repo.obsstore, indices)
1734 1734 ui.write(_('deleted %i obsolescence markers\n') % n)
1735 1735
1736 1736 return
1737 1737
1738 1738 if precursor is not None:
1739 1739 if opts['rev']:
1740 1740 raise error.Abort('cannot select revision when creating marker')
1741 1741 metadata = {}
1742 1742 metadata['user'] = encoding.fromlocal(opts['user'] or ui.username())
1743 1743 succs = tuple(parsenodeid(succ) for succ in successors)
1744 1744 l = repo.lock()
1745 1745 try:
1746 1746 tr = repo.transaction('debugobsolete')
1747 1747 try:
1748 1748 date = opts.get('date')
1749 1749 if date:
1750 1750 date = dateutil.parsedate(date)
1751 1751 else:
1752 1752 date = None
1753 1753 prec = parsenodeid(precursor)
1754 1754 parents = None
1755 1755 if opts['record_parents']:
1756 1756 if prec not in repo.unfiltered():
1757 1757 raise error.Abort('cannot used --record-parents on '
1758 1758 'unknown changesets')
1759 1759 parents = repo.unfiltered()[prec].parents()
1760 1760 parents = tuple(p.node() for p in parents)
1761 1761 repo.obsstore.create(tr, prec, succs, opts['flags'],
1762 1762 parents=parents, date=date,
1763 1763 metadata=metadata, ui=ui)
1764 1764 tr.close()
1765 1765 except ValueError as exc:
1766 1766 raise error.Abort(_('bad obsmarker input: %s') %
1767 1767 pycompat.bytestr(exc))
1768 1768 finally:
1769 1769 tr.release()
1770 1770 finally:
1771 1771 l.release()
1772 1772 else:
1773 1773 if opts['rev']:
1774 1774 revs = scmutil.revrange(repo, opts['rev'])
1775 1775 nodes = [repo[r].node() for r in revs]
1776 1776 markers = list(obsutil.getmarkers(repo, nodes=nodes,
1777 1777 exclusive=opts['exclusive']))
1778 1778 markers.sort(key=lambda x: x._data)
1779 1779 else:
1780 1780 markers = obsutil.getmarkers(repo)
1781 1781
1782 1782 markerstoiter = markers
1783 1783 isrelevant = lambda m: True
1784 1784 if opts.get('rev') and opts.get('index'):
1785 1785 markerstoiter = obsutil.getmarkers(repo)
1786 1786 markerset = set(markers)
1787 1787 isrelevant = lambda m: m in markerset
1788 1788
1789 1789 fm = ui.formatter('debugobsolete', opts)
1790 1790 for i, m in enumerate(markerstoiter):
1791 1791 if not isrelevant(m):
1792 1792 # marker can be irrelevant when we're iterating over a set
1793 1793 # of markers (markerstoiter) which is bigger than the set
1794 1794 # of markers we want to display (markers)
1795 1795 # this can happen if both --index and --rev options are
1796 1796 # provided and thus we need to iterate over all of the markers
1797 1797 # to get the correct indices, but only display the ones that
1798 1798 # are relevant to --rev value
1799 1799 continue
1800 1800 fm.startitem()
1801 1801 ind = i if opts.get('index') else None
1802 1802 cmdutil.showmarker(fm, m, index=ind)
1803 1803 fm.end()
1804 1804
1805 1805 @command('debugp1copies',
1806 1806 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1807 1807 _('[-r REV]'))
1808 1808 def debugp1copies(ui, repo, **opts):
1809 1809 """dump copy information compared to p1"""
1810 1810
1811 1811 opts = pycompat.byteskwargs(opts)
1812 1812 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
1813 1813 for dst, src in ctx.p1copies().items():
1814 1814 ui.write('%s -> %s\n' % (src, dst))
1815 1815
1816 1816 @command('debugp2copies',
1817 1817 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1818 1818 _('[-r REV]'))
1819 1819 def debugp1copies(ui, repo, **opts):
1820 1820 """dump copy information compared to p2"""
1821 1821
1822 1822 opts = pycompat.byteskwargs(opts)
1823 1823 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
1824 1824 for dst, src in ctx.p2copies().items():
1825 1825 ui.write('%s -> %s\n' % (src, dst))
1826 1826
1827 1827 @command('debugpathcomplete',
1828 1828 [('f', 'full', None, _('complete an entire path')),
1829 1829 ('n', 'normal', None, _('show only normal files')),
1830 1830 ('a', 'added', None, _('show only added files')),
1831 1831 ('r', 'removed', None, _('show only removed files'))],
1832 1832 _('FILESPEC...'))
1833 1833 def debugpathcomplete(ui, repo, *specs, **opts):
1834 1834 '''complete part or all of a tracked path
1835 1835
1836 1836 This command supports shells that offer path name completion. It
1837 1837 currently completes only files already known to the dirstate.
1838 1838
1839 1839 Completion extends only to the next path segment unless
1840 1840 --full is specified, in which case entire paths are used.'''
1841 1841
1842 1842 def complete(path, acceptable):
1843 1843 dirstate = repo.dirstate
1844 1844 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
1845 1845 rootdir = repo.root + pycompat.ossep
1846 1846 if spec != repo.root and not spec.startswith(rootdir):
1847 1847 return [], []
1848 1848 if os.path.isdir(spec):
1849 1849 spec += '/'
1850 1850 spec = spec[len(rootdir):]
1851 1851 fixpaths = pycompat.ossep != '/'
1852 1852 if fixpaths:
1853 1853 spec = spec.replace(pycompat.ossep, '/')
1854 1854 speclen = len(spec)
1855 1855 fullpaths = opts[r'full']
1856 1856 files, dirs = set(), set()
1857 1857 adddir, addfile = dirs.add, files.add
1858 1858 for f, st in dirstate.iteritems():
1859 1859 if f.startswith(spec) and st[0] in acceptable:
1860 1860 if fixpaths:
1861 1861 f = f.replace('/', pycompat.ossep)
1862 1862 if fullpaths:
1863 1863 addfile(f)
1864 1864 continue
1865 1865 s = f.find(pycompat.ossep, speclen)
1866 1866 if s >= 0:
1867 1867 adddir(f[:s])
1868 1868 else:
1869 1869 addfile(f)
1870 1870 return files, dirs
1871 1871
1872 1872 acceptable = ''
1873 1873 if opts[r'normal']:
1874 1874 acceptable += 'nm'
1875 1875 if opts[r'added']:
1876 1876 acceptable += 'a'
1877 1877 if opts[r'removed']:
1878 1878 acceptable += 'r'
1879 1879 cwd = repo.getcwd()
1880 1880 if not specs:
1881 1881 specs = ['.']
1882 1882
1883 1883 files, dirs = set(), set()
1884 1884 for spec in specs:
1885 1885 f, d = complete(spec, acceptable or 'nmar')
1886 1886 files.update(f)
1887 1887 dirs.update(d)
1888 1888 files.update(dirs)
1889 1889 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1890 1890 ui.write('\n')
1891 1891
1892 1892 @command('debugpathcopies',
1893 1893 cmdutil.walkopts,
1894 1894 'hg debugpathcopies REV1 REV2 [FILE]',
1895 1895 inferrepo=True)
1896 1896 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
1897 1897 """show copies between two revisions"""
1898 1898 ctx1 = scmutil.revsingle(repo, rev1)
1899 1899 ctx2 = scmutil.revsingle(repo, rev2)
1900 1900 m = scmutil.match(ctx1, pats, opts)
1901 1901 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
1902 1902 ui.write('%s -> %s\n' % (src, dst))
1903 1903
1904 1904 @command('debugpeer', [], _('PATH'), norepo=True)
1905 1905 def debugpeer(ui, path):
1906 1906 """establish a connection to a peer repository"""
1907 1907 # Always enable peer request logging. Requires --debug to display
1908 1908 # though.
1909 1909 overrides = {
1910 1910 ('devel', 'debug.peer-request'): True,
1911 1911 }
1912 1912
1913 1913 with ui.configoverride(overrides):
1914 1914 peer = hg.peer(ui, {}, path)
1915 1915
1916 1916 local = peer.local() is not None
1917 1917 canpush = peer.canpush()
1918 1918
1919 1919 ui.write(_('url: %s\n') % peer.url())
1920 1920 ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
1921 1921 ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
1922 1922
1923 1923 @command('debugpickmergetool',
1924 1924 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1925 1925 ('', 'changedelete', None, _('emulate merging change and delete')),
1926 1926 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1927 1927 _('[PATTERN]...'),
1928 1928 inferrepo=True)
1929 1929 def debugpickmergetool(ui, repo, *pats, **opts):
1930 1930 """examine which merge tool is chosen for specified file
1931 1931
1932 1932 As described in :hg:`help merge-tools`, Mercurial examines
1933 1933 configurations below in this order to decide which merge tool is
1934 1934 chosen for specified file.
1935 1935
1936 1936 1. ``--tool`` option
1937 1937 2. ``HGMERGE`` environment variable
1938 1938 3. configurations in ``merge-patterns`` section
1939 1939 4. configuration of ``ui.merge``
1940 1940 5. configurations in ``merge-tools`` section
1941 1941 6. ``hgmerge`` tool (for historical reason only)
1942 1942 7. default tool for fallback (``:merge`` or ``:prompt``)
1943 1943
1944 1944 This command writes out examination result in the style below::
1945 1945
1946 1946 FILE = MERGETOOL
1947 1947
1948 1948 By default, all files known in the first parent context of the
1949 1949 working directory are examined. Use file patterns and/or -I/-X
1950 1950 options to limit target files. -r/--rev is also useful to examine
1951 1951 files in another context without actual updating to it.
1952 1952
1953 1953 With --debug, this command shows warning messages while matching
1954 1954 against ``merge-patterns`` and so on, too. It is recommended to
1955 1955 use this option with explicit file patterns and/or -I/-X options,
1956 1956 because this option increases amount of output per file according
1957 1957 to configurations in hgrc.
1958 1958
1959 1959 With -v/--verbose, this command shows configurations below at
1960 1960 first (only if specified).
1961 1961
1962 1962 - ``--tool`` option
1963 1963 - ``HGMERGE`` environment variable
1964 1964 - configuration of ``ui.merge``
1965 1965
1966 1966 If merge tool is chosen before matching against
1967 1967 ``merge-patterns``, this command can't show any helpful
1968 1968 information, even with --debug. In such case, information above is
1969 1969 useful to know why a merge tool is chosen.
1970 1970 """
1971 1971 opts = pycompat.byteskwargs(opts)
1972 1972 overrides = {}
1973 1973 if opts['tool']:
1974 1974 overrides[('ui', 'forcemerge')] = opts['tool']
1975 1975 ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
1976 1976
1977 1977 with ui.configoverride(overrides, 'debugmergepatterns'):
1978 1978 hgmerge = encoding.environ.get("HGMERGE")
1979 1979 if hgmerge is not None:
1980 1980 ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
1981 1981 uimerge = ui.config("ui", "merge")
1982 1982 if uimerge:
1983 1983 ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
1984 1984
1985 1985 ctx = scmutil.revsingle(repo, opts.get('rev'))
1986 1986 m = scmutil.match(ctx, pats, opts)
1987 1987 changedelete = opts['changedelete']
1988 1988 for path in ctx.walk(m):
1989 1989 fctx = ctx[path]
1990 1990 try:
1991 1991 if not ui.debugflag:
1992 1992 ui.pushbuffer(error=True)
1993 1993 tool, toolpath = filemerge._picktool(repo, ui, path,
1994 1994 fctx.isbinary(),
1995 1995 'l' in fctx.flags(),
1996 1996 changedelete)
1997 1997 finally:
1998 1998 if not ui.debugflag:
1999 1999 ui.popbuffer()
2000 2000 ui.write(('%s = %s\n') % (path, tool))
2001 2001
2002 2002 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2003 2003 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2004 2004 '''access the pushkey key/value protocol
2005 2005
2006 2006 With two args, list the keys in the given namespace.
2007 2007
2008 2008 With five args, set a key to new if it currently is set to old.
2009 2009 Reports success or failure.
2010 2010 '''
2011 2011
2012 2012 target = hg.peer(ui, {}, repopath)
2013 2013 if keyinfo:
2014 2014 key, old, new = keyinfo
2015 2015 with target.commandexecutor() as e:
2016 2016 r = e.callcommand('pushkey', {
2017 2017 'namespace': namespace,
2018 2018 'key': key,
2019 2019 'old': old,
2020 2020 'new': new,
2021 2021 }).result()
2022 2022
2023 2023 ui.status(pycompat.bytestr(r) + '\n')
2024 2024 return not r
2025 2025 else:
2026 2026 for k, v in sorted(target.listkeys(namespace).iteritems()):
2027 2027 ui.write("%s\t%s\n" % (stringutil.escapestr(k),
2028 2028 stringutil.escapestr(v)))
2029 2029
2030 2030 @command('debugpvec', [], _('A B'))
2031 2031 def debugpvec(ui, repo, a, b=None):
2032 2032 ca = scmutil.revsingle(repo, a)
2033 2033 cb = scmutil.revsingle(repo, b)
2034 2034 pa = pvec.ctxpvec(ca)
2035 2035 pb = pvec.ctxpvec(cb)
2036 2036 if pa == pb:
2037 2037 rel = "="
2038 2038 elif pa > pb:
2039 2039 rel = ">"
2040 2040 elif pa < pb:
2041 2041 rel = "<"
2042 2042 elif pa | pb:
2043 2043 rel = "|"
2044 2044 ui.write(_("a: %s\n") % pa)
2045 2045 ui.write(_("b: %s\n") % pb)
2046 2046 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2047 2047 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
2048 2048 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
2049 2049 pa.distance(pb), rel))
2050 2050
2051 2051 @command('debugrebuilddirstate|debugrebuildstate',
2052 2052 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
2053 2053 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
2054 2054 'the working copy parent')),
2055 2055 ],
2056 2056 _('[-r REV]'))
2057 2057 def debugrebuilddirstate(ui, repo, rev, **opts):
2058 2058 """rebuild the dirstate as it would look like for the given revision
2059 2059
2060 2060 If no revision is specified the first current parent will be used.
2061 2061
2062 2062 The dirstate will be set to the files of the given revision.
2063 2063 The actual working directory content or existing dirstate
2064 2064 information such as adds or removes is not considered.
2065 2065
2066 2066 ``minimal`` will only rebuild the dirstate status for files that claim to be
2067 2067 tracked but are not in the parent manifest, or that exist in the parent
2068 2068 manifest but are not in the dirstate. It will not change adds, removes, or
2069 2069 modified files that are in the working copy parent.
2070 2070
2071 2071 One use of this command is to make the next :hg:`status` invocation
2072 2072 check the actual file content.
2073 2073 """
2074 2074 ctx = scmutil.revsingle(repo, rev)
2075 2075 with repo.wlock():
2076 2076 dirstate = repo.dirstate
2077 2077 changedfiles = None
2078 2078 # See command doc for what minimal does.
2079 2079 if opts.get(r'minimal'):
2080 2080 manifestfiles = set(ctx.manifest().keys())
2081 2081 dirstatefiles = set(dirstate)
2082 2082 manifestonly = manifestfiles - dirstatefiles
2083 2083 dsonly = dirstatefiles - manifestfiles
2084 2084 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
2085 2085 changedfiles = manifestonly | dsnotadded
2086 2086
2087 2087 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2088 2088
2089 2089 @command('debugrebuildfncache', [], '')
2090 2090 def debugrebuildfncache(ui, repo):
2091 2091 """rebuild the fncache file"""
2092 2092 repair.rebuildfncache(ui, repo)
2093 2093
2094 2094 @command('debugrename',
2095 2095 [('r', 'rev', '', _('revision to debug'), _('REV'))],
2096 2096 _('[-r REV] [FILE]...'))
2097 2097 def debugrename(ui, repo, *pats, **opts):
2098 2098 """dump rename information"""
2099 2099
2100 2100 opts = pycompat.byteskwargs(opts)
2101 2101 ctx = scmutil.revsingle(repo, opts.get('rev'))
2102 2102 m = scmutil.match(ctx, pats, opts)
2103 2103 for abs in ctx.walk(m):
2104 2104 fctx = ctx[abs]
2105 2105 o = fctx.filelog().renamed(fctx.filenode())
2106 2106 rel = repo.pathto(abs)
2107 2107 if o:
2108 2108 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2109 2109 else:
2110 2110 ui.write(_("%s not renamed\n") % rel)
2111 2111
2112 2112 @command('debugrevlog', cmdutil.debugrevlogopts +
2113 2113 [('d', 'dump', False, _('dump index data'))],
2114 2114 _('-c|-m|FILE'),
2115 2115 optionalrepo=True)
2116 2116 def debugrevlog(ui, repo, file_=None, **opts):
2117 2117 """show data and statistics about a revlog"""
2118 2118 opts = pycompat.byteskwargs(opts)
2119 2119 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
2120 2120
2121 2121 if opts.get("dump"):
2122 2122 numrevs = len(r)
2123 2123 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
2124 2124 " rawsize totalsize compression heads chainlen\n"))
2125 2125 ts = 0
2126 2126 heads = set()
2127 2127
2128 2128 for rev in pycompat.xrange(numrevs):
2129 2129 dbase = r.deltaparent(rev)
2130 2130 if dbase == -1:
2131 2131 dbase = rev
2132 2132 cbase = r.chainbase(rev)
2133 2133 clen = r.chainlen(rev)
2134 2134 p1, p2 = r.parentrevs(rev)
2135 2135 rs = r.rawsize(rev)
2136 2136 ts = ts + rs
2137 2137 heads -= set(r.parentrevs(rev))
2138 2138 heads.add(rev)
2139 2139 try:
2140 2140 compression = ts / r.end(rev)
2141 2141 except ZeroDivisionError:
2142 2142 compression = 0
2143 2143 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2144 2144 "%11d %5d %8d\n" %
2145 2145 (rev, p1, p2, r.start(rev), r.end(rev),
2146 2146 r.start(dbase), r.start(cbase),
2147 2147 r.start(p1), r.start(p2),
2148 2148 rs, ts, compression, len(heads), clen))
2149 2149 return 0
2150 2150
2151 2151 v = r.version
2152 2152 format = v & 0xFFFF
2153 2153 flags = []
2154 2154 gdelta = False
2155 2155 if v & revlog.FLAG_INLINE_DATA:
2156 2156 flags.append('inline')
2157 2157 if v & revlog.FLAG_GENERALDELTA:
2158 2158 gdelta = True
2159 2159 flags.append('generaldelta')
2160 2160 if not flags:
2161 2161 flags = ['(none)']
2162 2162
2163 2163 ### tracks merge vs single parent
2164 2164 nummerges = 0
2165 2165
2166 2166 ### tracks ways the "delta" are build
2167 2167 # nodelta
2168 2168 numempty = 0
2169 2169 numemptytext = 0
2170 2170 numemptydelta = 0
2171 2171 # full file content
2172 2172 numfull = 0
2173 2173 # intermediate snapshot against a prior snapshot
2174 2174 numsemi = 0
2175 2175 # snapshot count per depth
2176 2176 numsnapdepth = collections.defaultdict(lambda: 0)
2177 2177 # delta against previous revision
2178 2178 numprev = 0
2179 2179 # delta against first or second parent (not prev)
2180 2180 nump1 = 0
2181 2181 nump2 = 0
2182 2182 # delta against neither prev nor parents
2183 2183 numother = 0
2184 2184 # delta against prev that are also first or second parent
2185 2185 # (details of `numprev`)
2186 2186 nump1prev = 0
2187 2187 nump2prev = 0
2188 2188
2189 2189 # data about delta chain of each revs
2190 2190 chainlengths = []
2191 2191 chainbases = []
2192 2192 chainspans = []
2193 2193
2194 2194 # data about each revision
2195 2195 datasize = [None, 0, 0]
2196 2196 fullsize = [None, 0, 0]
2197 2197 semisize = [None, 0, 0]
2198 2198 # snapshot count per depth
2199 2199 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2200 2200 deltasize = [None, 0, 0]
2201 2201 chunktypecounts = {}
2202 2202 chunktypesizes = {}
2203 2203
2204 2204 def addsize(size, l):
2205 2205 if l[0] is None or size < l[0]:
2206 2206 l[0] = size
2207 2207 if size > l[1]:
2208 2208 l[1] = size
2209 2209 l[2] += size
2210 2210
2211 2211 numrevs = len(r)
2212 2212 for rev in pycompat.xrange(numrevs):
2213 2213 p1, p2 = r.parentrevs(rev)
2214 2214 delta = r.deltaparent(rev)
2215 2215 if format > 0:
2216 2216 addsize(r.rawsize(rev), datasize)
2217 2217 if p2 != nullrev:
2218 2218 nummerges += 1
2219 2219 size = r.length(rev)
2220 2220 if delta == nullrev:
2221 2221 chainlengths.append(0)
2222 2222 chainbases.append(r.start(rev))
2223 2223 chainspans.append(size)
2224 2224 if size == 0:
2225 2225 numempty += 1
2226 2226 numemptytext += 1
2227 2227 else:
2228 2228 numfull += 1
2229 2229 numsnapdepth[0] += 1
2230 2230 addsize(size, fullsize)
2231 2231 addsize(size, snapsizedepth[0])
2232 2232 else:
2233 2233 chainlengths.append(chainlengths[delta] + 1)
2234 2234 baseaddr = chainbases[delta]
2235 2235 revaddr = r.start(rev)
2236 2236 chainbases.append(baseaddr)
2237 2237 chainspans.append((revaddr - baseaddr) + size)
2238 2238 if size == 0:
2239 2239 numempty += 1
2240 2240 numemptydelta += 1
2241 2241 elif r.issnapshot(rev):
2242 2242 addsize(size, semisize)
2243 2243 numsemi += 1
2244 2244 depth = r.snapshotdepth(rev)
2245 2245 numsnapdepth[depth] += 1
2246 2246 addsize(size, snapsizedepth[depth])
2247 2247 else:
2248 2248 addsize(size, deltasize)
2249 2249 if delta == rev - 1:
2250 2250 numprev += 1
2251 2251 if delta == p1:
2252 2252 nump1prev += 1
2253 2253 elif delta == p2:
2254 2254 nump2prev += 1
2255 2255 elif delta == p1:
2256 2256 nump1 += 1
2257 2257 elif delta == p2:
2258 2258 nump2 += 1
2259 2259 elif delta != nullrev:
2260 2260 numother += 1
2261 2261
2262 2262 # Obtain data on the raw chunks in the revlog.
2263 2263 if util.safehasattr(r, '_getsegmentforrevs'):
2264 2264 segment = r._getsegmentforrevs(rev, rev)[1]
2265 2265 else:
2266 2266 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2267 2267 if segment:
2268 2268 chunktype = bytes(segment[0:1])
2269 2269 else:
2270 2270 chunktype = 'empty'
2271 2271
2272 2272 if chunktype not in chunktypecounts:
2273 2273 chunktypecounts[chunktype] = 0
2274 2274 chunktypesizes[chunktype] = 0
2275 2275
2276 2276 chunktypecounts[chunktype] += 1
2277 2277 chunktypesizes[chunktype] += size
2278 2278
2279 2279 # Adjust size min value for empty cases
2280 2280 for size in (datasize, fullsize, semisize, deltasize):
2281 2281 if size[0] is None:
2282 2282 size[0] = 0
2283 2283
2284 2284 numdeltas = numrevs - numfull - numempty - numsemi
2285 2285 numoprev = numprev - nump1prev - nump2prev
2286 2286 totalrawsize = datasize[2]
2287 2287 datasize[2] /= numrevs
2288 2288 fulltotal = fullsize[2]
2289 2289 fullsize[2] /= numfull
2290 2290 semitotal = semisize[2]
2291 2291 snaptotal = {}
2292 2292 if numsemi > 0:
2293 2293 semisize[2] /= numsemi
2294 2294 for depth in snapsizedepth:
2295 2295 snaptotal[depth] = snapsizedepth[depth][2]
2296 2296 snapsizedepth[depth][2] /= numsnapdepth[depth]
2297 2297
2298 2298 deltatotal = deltasize[2]
2299 2299 if numdeltas > 0:
2300 2300 deltasize[2] /= numdeltas
2301 2301 totalsize = fulltotal + semitotal + deltatotal
2302 2302 avgchainlen = sum(chainlengths) / numrevs
2303 2303 maxchainlen = max(chainlengths)
2304 2304 maxchainspan = max(chainspans)
2305 2305 compratio = 1
2306 2306 if totalsize:
2307 2307 compratio = totalrawsize / totalsize
2308 2308
2309 2309 basedfmtstr = '%%%dd\n'
2310 2310 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2311 2311
2312 2312 def dfmtstr(max):
2313 2313 return basedfmtstr % len(str(max))
2314 2314 def pcfmtstr(max, padding=0):
2315 2315 return basepcfmtstr % (len(str(max)), ' ' * padding)
2316 2316
2317 2317 def pcfmt(value, total):
2318 2318 if total:
2319 2319 return (value, 100 * float(value) / total)
2320 2320 else:
2321 2321 return value, 100.0
2322 2322
2323 2323 ui.write(('format : %d\n') % format)
2324 2324 ui.write(('flags : %s\n') % ', '.join(flags))
2325 2325
2326 2326 ui.write('\n')
2327 2327 fmt = pcfmtstr(totalsize)
2328 2328 fmt2 = dfmtstr(totalsize)
2329 2329 ui.write(('revisions : ') + fmt2 % numrevs)
2330 2330 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2331 2331 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2332 2332 ui.write(('revisions : ') + fmt2 % numrevs)
2333 2333 ui.write((' empty : ') + fmt % pcfmt(numempty, numrevs))
2334 2334 ui.write((' text : ')
2335 2335 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta))
2336 2336 ui.write((' delta : ')
2337 2337 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta))
2338 2338 ui.write((' snapshot : ') + fmt % pcfmt(numfull + numsemi, numrevs))
2339 2339 for depth in sorted(numsnapdepth):
2340 2340 ui.write((' lvl-%-3d : ' % depth)
2341 2341 + fmt % pcfmt(numsnapdepth[depth], numrevs))
2342 2342 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2343 2343 ui.write(('revision size : ') + fmt2 % totalsize)
2344 2344 ui.write((' snapshot : ')
2345 2345 + fmt % pcfmt(fulltotal + semitotal, totalsize))
2346 2346 for depth in sorted(numsnapdepth):
2347 2347 ui.write((' lvl-%-3d : ' % depth)
2348 2348 + fmt % pcfmt(snaptotal[depth], totalsize))
2349 2349 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2350 2350
2351 2351 def fmtchunktype(chunktype):
2352 2352 if chunktype == 'empty':
2353 2353 return ' %s : ' % chunktype
2354 2354 elif chunktype in pycompat.bytestr(string.ascii_letters):
2355 2355 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2356 2356 else:
2357 2357 return ' 0x%s : ' % hex(chunktype)
2358 2358
2359 2359 ui.write('\n')
2360 2360 ui.write(('chunks : ') + fmt2 % numrevs)
2361 2361 for chunktype in sorted(chunktypecounts):
2362 2362 ui.write(fmtchunktype(chunktype))
2363 2363 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2364 2364 ui.write(('chunks size : ') + fmt2 % totalsize)
2365 2365 for chunktype in sorted(chunktypecounts):
2366 2366 ui.write(fmtchunktype(chunktype))
2367 2367 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2368 2368
2369 2369 ui.write('\n')
2370 2370 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2371 2371 ui.write(('avg chain length : ') + fmt % avgchainlen)
2372 2372 ui.write(('max chain length : ') + fmt % maxchainlen)
2373 2373 ui.write(('max chain reach : ') + fmt % maxchainspan)
2374 2374 ui.write(('compression ratio : ') + fmt % compratio)
2375 2375
2376 2376 if format > 0:
2377 2377 ui.write('\n')
2378 2378 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2379 2379 % tuple(datasize))
2380 2380 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2381 2381 % tuple(fullsize))
2382 2382 ui.write(('inter-snapshot size (min/max/avg) : %d / %d / %d\n')
2383 2383 % tuple(semisize))
2384 2384 for depth in sorted(snapsizedepth):
2385 2385 if depth == 0:
2386 2386 continue
2387 2387 ui.write((' level-%-3d (min/max/avg) : %d / %d / %d\n')
2388 2388 % ((depth,) + tuple(snapsizedepth[depth])))
2389 2389 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2390 2390 % tuple(deltasize))
2391 2391
2392 2392 if numdeltas > 0:
2393 2393 ui.write('\n')
2394 2394 fmt = pcfmtstr(numdeltas)
2395 2395 fmt2 = pcfmtstr(numdeltas, 4)
2396 2396 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2397 2397 if numprev > 0:
2398 2398 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2399 2399 numprev))
2400 2400 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2401 2401 numprev))
2402 2402 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2403 2403 numprev))
2404 2404 if gdelta:
2405 2405 ui.write(('deltas against p1 : ')
2406 2406 + fmt % pcfmt(nump1, numdeltas))
2407 2407 ui.write(('deltas against p2 : ')
2408 2408 + fmt % pcfmt(nump2, numdeltas))
2409 2409 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2410 2410 numdeltas))
2411 2411
2412 2412 @command('debugrevlogindex', cmdutil.debugrevlogopts +
2413 2413 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
2414 2414 _('[-f FORMAT] -c|-m|FILE'),
2415 2415 optionalrepo=True)
2416 2416 def debugrevlogindex(ui, repo, file_=None, **opts):
2417 2417 """dump the contents of a revlog index"""
2418 2418 opts = pycompat.byteskwargs(opts)
2419 2419 r = cmdutil.openrevlog(repo, 'debugrevlogindex', file_, opts)
2420 2420 format = opts.get('format', 0)
2421 2421 if format not in (0, 1):
2422 2422 raise error.Abort(_("unknown format %d") % format)
2423 2423
2424 2424 if ui.debugflag:
2425 2425 shortfn = hex
2426 2426 else:
2427 2427 shortfn = short
2428 2428
2429 2429 # There might not be anything in r, so have a sane default
2430 2430 idlen = 12
2431 2431 for i in r:
2432 2432 idlen = len(shortfn(r.node(i)))
2433 2433 break
2434 2434
2435 2435 if format == 0:
2436 2436 if ui.verbose:
2437 2437 ui.write((" rev offset length linkrev"
2438 2438 " %s %s p2\n") % ("nodeid".ljust(idlen),
2439 2439 "p1".ljust(idlen)))
2440 2440 else:
2441 2441 ui.write((" rev linkrev %s %s p2\n") % (
2442 2442 "nodeid".ljust(idlen), "p1".ljust(idlen)))
2443 2443 elif format == 1:
2444 2444 if ui.verbose:
2445 2445 ui.write((" rev flag offset length size link p1"
2446 2446 " p2 %s\n") % "nodeid".rjust(idlen))
2447 2447 else:
2448 2448 ui.write((" rev flag size link p1 p2 %s\n") %
2449 2449 "nodeid".rjust(idlen))
2450 2450
2451 2451 for i in r:
2452 2452 node = r.node(i)
2453 2453 if format == 0:
2454 2454 try:
2455 2455 pp = r.parents(node)
2456 2456 except Exception:
2457 2457 pp = [nullid, nullid]
2458 2458 if ui.verbose:
2459 2459 ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
2460 2460 i, r.start(i), r.length(i), r.linkrev(i),
2461 2461 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
2462 2462 else:
2463 2463 ui.write("% 6d % 7d %s %s %s\n" % (
2464 2464 i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
2465 2465 shortfn(pp[1])))
2466 2466 elif format == 1:
2467 2467 pr = r.parentrevs(i)
2468 2468 if ui.verbose:
2469 2469 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
2470 2470 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
2471 2471 r.linkrev(i), pr[0], pr[1], shortfn(node)))
2472 2472 else:
2473 2473 ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
2474 2474 i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
2475 2475 shortfn(node)))
2476 2476
2477 2477 @command('debugrevspec',
2478 2478 [('', 'optimize', None,
2479 2479 _('print parsed tree after optimizing (DEPRECATED)')),
2480 2480 ('', 'show-revs', True, _('print list of result revisions (default)')),
2481 2481 ('s', 'show-set', None, _('print internal representation of result set')),
2482 2482 ('p', 'show-stage', [],
2483 2483 _('print parsed tree at the given stage'), _('NAME')),
2484 2484 ('', 'no-optimized', False, _('evaluate tree without optimization')),
2485 2485 ('', 'verify-optimized', False, _('verify optimized result')),
2486 2486 ],
2487 2487 ('REVSPEC'))
2488 2488 def debugrevspec(ui, repo, expr, **opts):
2489 2489 """parse and apply a revision specification
2490 2490
2491 2491 Use -p/--show-stage option to print the parsed tree at the given stages.
2492 2492 Use -p all to print tree at every stage.
2493 2493
2494 2494 Use --no-show-revs option with -s or -p to print only the set
2495 2495 representation or the parsed tree respectively.
2496 2496
2497 2497 Use --verify-optimized to compare the optimized result with the unoptimized
2498 2498 one. Returns 1 if the optimized result differs.
2499 2499 """
2500 2500 opts = pycompat.byteskwargs(opts)
2501 2501 aliases = ui.configitems('revsetalias')
2502 2502 stages = [
2503 2503 ('parsed', lambda tree: tree),
2504 2504 ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases,
2505 2505 ui.warn)),
2506 2506 ('concatenated', revsetlang.foldconcat),
2507 2507 ('analyzed', revsetlang.analyze),
2508 2508 ('optimized', revsetlang.optimize),
2509 2509 ]
2510 2510 if opts['no_optimized']:
2511 2511 stages = stages[:-1]
2512 2512 if opts['verify_optimized'] and opts['no_optimized']:
2513 2513 raise error.Abort(_('cannot use --verify-optimized with '
2514 2514 '--no-optimized'))
2515 2515 stagenames = set(n for n, f in stages)
2516 2516
2517 2517 showalways = set()
2518 2518 showchanged = set()
2519 2519 if ui.verbose and not opts['show_stage']:
2520 2520 # show parsed tree by --verbose (deprecated)
2521 2521 showalways.add('parsed')
2522 2522 showchanged.update(['expanded', 'concatenated'])
2523 2523 if opts['optimize']:
2524 2524 showalways.add('optimized')
2525 2525 if opts['show_stage'] and opts['optimize']:
2526 2526 raise error.Abort(_('cannot use --optimize with --show-stage'))
2527 2527 if opts['show_stage'] == ['all']:
2528 2528 showalways.update(stagenames)
2529 2529 else:
2530 2530 for n in opts['show_stage']:
2531 2531 if n not in stagenames:
2532 2532 raise error.Abort(_('invalid stage name: %s') % n)
2533 2533 showalways.update(opts['show_stage'])
2534 2534
2535 2535 treebystage = {}
2536 2536 printedtree = None
2537 2537 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
2538 2538 for n, f in stages:
2539 2539 treebystage[n] = tree = f(tree)
2540 2540 if n in showalways or (n in showchanged and tree != printedtree):
2541 2541 if opts['show_stage'] or n != 'parsed':
2542 2542 ui.write(("* %s:\n") % n)
2543 2543 ui.write(revsetlang.prettyformat(tree), "\n")
2544 2544 printedtree = tree
2545 2545
2546 2546 if opts['verify_optimized']:
2547 2547 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2548 2548 brevs = revset.makematcher(treebystage['optimized'])(repo)
2549 2549 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2550 2550 ui.write(("* analyzed set:\n"), stringutil.prettyrepr(arevs), "\n")
2551 2551 ui.write(("* optimized set:\n"), stringutil.prettyrepr(brevs), "\n")
2552 2552 arevs = list(arevs)
2553 2553 brevs = list(brevs)
2554 2554 if arevs == brevs:
2555 2555 return 0
2556 2556 ui.write(('--- analyzed\n'), label='diff.file_a')
2557 2557 ui.write(('+++ optimized\n'), label='diff.file_b')
2558 2558 sm = difflib.SequenceMatcher(None, arevs, brevs)
2559 2559 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2560 2560 if tag in (r'delete', r'replace'):
2561 2561 for c in arevs[alo:ahi]:
2562 2562 ui.write('-%d\n' % c, label='diff.deleted')
2563 2563 if tag in (r'insert', r'replace'):
2564 2564 for c in brevs[blo:bhi]:
2565 2565 ui.write('+%d\n' % c, label='diff.inserted')
2566 2566 if tag == r'equal':
2567 2567 for c in arevs[alo:ahi]:
2568 2568 ui.write(' %d\n' % c)
2569 2569 return 1
2570 2570
2571 2571 func = revset.makematcher(tree)
2572 2572 revs = func(repo)
2573 2573 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2574 2574 ui.write(("* set:\n"), stringutil.prettyrepr(revs), "\n")
2575 2575 if not opts['show_revs']:
2576 2576 return
2577 2577 for c in revs:
2578 2578 ui.write("%d\n" % c)
2579 2579
2580 2580 @command('debugserve', [
2581 2581 ('', 'sshstdio', False, _('run an SSH server bound to process handles')),
2582 2582 ('', 'logiofd', '', _('file descriptor to log server I/O to')),
2583 2583 ('', 'logiofile', '', _('file to log server I/O to')),
2584 2584 ], '')
2585 2585 def debugserve(ui, repo, **opts):
2586 2586 """run a server with advanced settings
2587 2587
2588 2588 This command is similar to :hg:`serve`. It exists partially as a
2589 2589 workaround to the fact that ``hg serve --stdio`` must have specific
2590 2590 arguments for security reasons.
2591 2591 """
2592 2592 opts = pycompat.byteskwargs(opts)
2593 2593
2594 2594 if not opts['sshstdio']:
2595 2595 raise error.Abort(_('only --sshstdio is currently supported'))
2596 2596
2597 2597 logfh = None
2598 2598
2599 2599 if opts['logiofd'] and opts['logiofile']:
2600 2600 raise error.Abort(_('cannot use both --logiofd and --logiofile'))
2601 2601
2602 2602 if opts['logiofd']:
2603 2603 # Line buffered because output is line based.
2604 2604 try:
2605 2605 logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
2606 2606 except OSError as e:
2607 2607 if e.errno != errno.ESPIPE:
2608 2608 raise
2609 2609 # can't seek a pipe, so `ab` mode fails on py3
2610 2610 logfh = os.fdopen(int(opts['logiofd']), r'wb', 1)
2611 2611 elif opts['logiofile']:
2612 2612 logfh = open(opts['logiofile'], 'ab', 1)
2613 2613
2614 2614 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
2615 2615 s.serve_forever()
2616 2616
2617 2617 @command('debugsetparents', [], _('REV1 [REV2]'))
2618 2618 def debugsetparents(ui, repo, rev1, rev2=None):
2619 2619 """manually set the parents of the current working directory
2620 2620
2621 2621 This is useful for writing repository conversion tools, but should
2622 2622 be used with care. For example, neither the working directory nor the
2623 2623 dirstate is updated, so file status may be incorrect after running this
2624 2624 command.
2625 2625
2626 2626 Returns 0 on success.
2627 2627 """
2628 2628
2629 2629 node1 = scmutil.revsingle(repo, rev1).node()
2630 2630 node2 = scmutil.revsingle(repo, rev2, 'null').node()
2631 2631
2632 2632 with repo.wlock():
2633 2633 repo.setparents(node1, node2)
2634 2634
2635 2635 @command('debugssl', [], '[SOURCE]', optionalrepo=True)
2636 2636 def debugssl(ui, repo, source=None, **opts):
2637 2637 '''test a secure connection to a server
2638 2638
2639 2639 This builds the certificate chain for the server on Windows, installing the
2640 2640 missing intermediates and trusted root via Windows Update if necessary. It
2641 2641 does nothing on other platforms.
2642 2642
2643 2643 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
2644 2644 that server is used. See :hg:`help urls` for more information.
2645 2645
2646 2646 If the update succeeds, retry the original operation. Otherwise, the cause
2647 2647 of the SSL error is likely another issue.
2648 2648 '''
2649 2649 if not pycompat.iswindows:
2650 2650 raise error.Abort(_('certificate chain building is only possible on '
2651 2651 'Windows'))
2652 2652
2653 2653 if not source:
2654 2654 if not repo:
2655 2655 raise error.Abort(_("there is no Mercurial repository here, and no "
2656 2656 "server specified"))
2657 2657 source = "default"
2658 2658
2659 2659 source, branches = hg.parseurl(ui.expandpath(source))
2660 2660 url = util.url(source)
2661 2661
2662 2662 defaultport = {'https': 443, 'ssh': 22}
2663 2663 if url.scheme in defaultport:
2664 2664 try:
2665 2665 addr = (url.host, int(url.port or defaultport[url.scheme]))
2666 2666 except ValueError:
2667 2667 raise error.Abort(_("malformed port number in URL"))
2668 2668 else:
2669 2669 raise error.Abort(_("only https and ssh connections are supported"))
2670 2670
2671 2671 from . import win32
2672 2672
2673 2673 s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS,
2674 2674 cert_reqs=ssl.CERT_NONE, ca_certs=None)
2675 2675
2676 2676 try:
2677 2677 s.connect(addr)
2678 2678 cert = s.getpeercert(True)
2679 2679
2680 2680 ui.status(_('checking the certificate chain for %s\n') % url.host)
2681 2681
2682 2682 complete = win32.checkcertificatechain(cert, build=False)
2683 2683
2684 2684 if not complete:
2685 2685 ui.status(_('certificate chain is incomplete, updating... '))
2686 2686
2687 2687 if not win32.checkcertificatechain(cert):
2688 2688 ui.status(_('failed.\n'))
2689 2689 else:
2690 2690 ui.status(_('done.\n'))
2691 2691 else:
2692 2692 ui.status(_('full certificate chain is available\n'))
2693 2693 finally:
2694 2694 s.close()
2695 2695
2696 2696 @command('debugsub',
2697 2697 [('r', 'rev', '',
2698 2698 _('revision to check'), _('REV'))],
2699 2699 _('[-r REV] [REV]'))
2700 2700 def debugsub(ui, repo, rev=None):
2701 2701 ctx = scmutil.revsingle(repo, rev, None)
2702 2702 for k, v in sorted(ctx.substate.items()):
2703 2703 ui.write(('path %s\n') % k)
2704 2704 ui.write((' source %s\n') % v[0])
2705 2705 ui.write((' revision %s\n') % v[1])
2706 2706
2707 2707 @command('debugsuccessorssets',
2708 2708 [('', 'closest', False, _('return closest successors sets only'))],
2709 2709 _('[REV]'))
2710 2710 def debugsuccessorssets(ui, repo, *revs, **opts):
2711 2711 """show set of successors for revision
2712 2712
2713 2713 A successors set of changeset A is a consistent group of revisions that
2714 2714 succeed A. It contains non-obsolete changesets only unless closests
2715 2715 successors set is set.
2716 2716
2717 2717 In most cases a changeset A has a single successors set containing a single
2718 2718 successor (changeset A replaced by A').
2719 2719
2720 2720 A changeset that is made obsolete with no successors are called "pruned".
2721 2721 Such changesets have no successors sets at all.
2722 2722
2723 2723 A changeset that has been "split" will have a successors set containing
2724 2724 more than one successor.
2725 2725
2726 2726 A changeset that has been rewritten in multiple different ways is called
2727 2727 "divergent". Such changesets have multiple successor sets (each of which
2728 2728 may also be split, i.e. have multiple successors).
2729 2729
2730 2730 Results are displayed as follows::
2731 2731
2732 2732 <rev1>
2733 2733 <successors-1A>
2734 2734 <rev2>
2735 2735 <successors-2A>
2736 2736 <successors-2B1> <successors-2B2> <successors-2B3>
2737 2737
2738 2738 Here rev2 has two possible (i.e. divergent) successors sets. The first
2739 2739 holds one element, whereas the second holds three (i.e. the changeset has
2740 2740 been split).
2741 2741 """
2742 2742 # passed to successorssets caching computation from one call to another
2743 2743 cache = {}
2744 2744 ctx2str = bytes
2745 2745 node2str = short
2746 2746 for rev in scmutil.revrange(repo, revs):
2747 2747 ctx = repo[rev]
2748 2748 ui.write('%s\n'% ctx2str(ctx))
2749 2749 for succsset in obsutil.successorssets(repo, ctx.node(),
2750 2750 closest=opts[r'closest'],
2751 2751 cache=cache):
2752 2752 if succsset:
2753 2753 ui.write(' ')
2754 2754 ui.write(node2str(succsset[0]))
2755 2755 for node in succsset[1:]:
2756 2756 ui.write(' ')
2757 2757 ui.write(node2str(node))
2758 2758 ui.write('\n')
2759 2759
2760 2760 @command('debugtemplate',
2761 2761 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2762 2762 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2763 2763 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2764 2764 optionalrepo=True)
2765 2765 def debugtemplate(ui, repo, tmpl, **opts):
2766 2766 """parse and apply a template
2767 2767
2768 2768 If -r/--rev is given, the template is processed as a log template and
2769 2769 applied to the given changesets. Otherwise, it is processed as a generic
2770 2770 template.
2771 2771
2772 2772 Use --verbose to print the parsed tree.
2773 2773 """
2774 2774 revs = None
2775 2775 if opts[r'rev']:
2776 2776 if repo is None:
2777 2777 raise error.RepoError(_('there is no Mercurial repository here '
2778 2778 '(.hg not found)'))
2779 2779 revs = scmutil.revrange(repo, opts[r'rev'])
2780 2780
2781 2781 props = {}
2782 2782 for d in opts[r'define']:
2783 2783 try:
2784 2784 k, v = (e.strip() for e in d.split('=', 1))
2785 2785 if not k or k == 'ui':
2786 2786 raise ValueError
2787 2787 props[k] = v
2788 2788 except ValueError:
2789 2789 raise error.Abort(_('malformed keyword definition: %s') % d)
2790 2790
2791 2791 if ui.verbose:
2792 2792 aliases = ui.configitems('templatealias')
2793 2793 tree = templater.parse(tmpl)
2794 2794 ui.note(templater.prettyformat(tree), '\n')
2795 2795 newtree = templater.expandaliases(tree, aliases)
2796 2796 if newtree != tree:
2797 2797 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2798 2798
2799 2799 if revs is None:
2800 2800 tres = formatter.templateresources(ui, repo)
2801 2801 t = formatter.maketemplater(ui, tmpl, resources=tres)
2802 2802 if ui.verbose:
2803 2803 kwds, funcs = t.symbolsuseddefault()
2804 2804 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2805 2805 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2806 2806 ui.write(t.renderdefault(props))
2807 2807 else:
2808 2808 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
2809 2809 if ui.verbose:
2810 2810 kwds, funcs = displayer.t.symbolsuseddefault()
2811 2811 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2812 2812 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2813 2813 for r in revs:
2814 2814 displayer.show(repo[r], **pycompat.strkwargs(props))
2815 2815 displayer.close()
2816 2816
2817 2817 @command('debuguigetpass', [
2818 2818 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2819 2819 ], _('[-p TEXT]'), norepo=True)
2820 2820 def debuguigetpass(ui, prompt=''):
2821 2821 """show prompt to type password"""
2822 2822 r = ui.getpass(prompt)
2823 2823 ui.write(('respose: %s\n') % r)
2824 2824
2825 2825 @command('debuguiprompt', [
2826 2826 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2827 2827 ], _('[-p TEXT]'), norepo=True)
2828 2828 def debuguiprompt(ui, prompt=''):
2829 2829 """show plain prompt"""
2830 2830 r = ui.prompt(prompt)
2831 2831 ui.write(('response: %s\n') % r)
2832 2832
2833 2833 @command('debugupdatecaches', [])
2834 2834 def debugupdatecaches(ui, repo, *pats, **opts):
2835 2835 """warm all known caches in the repository"""
2836 2836 with repo.wlock(), repo.lock():
2837 2837 repo.updatecaches(full=True)
2838 2838
2839 2839 @command('debugupgraderepo', [
2840 2840 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2841 2841 ('', 'run', False, _('performs an upgrade')),
2842 2842 ('', 'backup', True, _('keep the old repository content around')),
2843 2843 ])
2844 2844 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True):
2845 2845 """upgrade a repository to use different features
2846 2846
2847 2847 If no arguments are specified, the repository is evaluated for upgrade
2848 2848 and a list of problems and potential optimizations is printed.
2849 2849
2850 2850 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2851 2851 can be influenced via additional arguments. More details will be provided
2852 2852 by the command output when run without ``--run``.
2853 2853
2854 2854 During the upgrade, the repository will be locked and no writes will be
2855 2855 allowed.
2856 2856
2857 2857 At the end of the upgrade, the repository may not be readable while new
2858 2858 repository data is swapped in. This window will be as long as it takes to
2859 2859 rename some directories inside the ``.hg`` directory. On most machines, this
2860 2860 should complete almost instantaneously and the chances of a consumer being
2861 2861 unable to access the repository should be low.
2862 2862 """
2863 2863 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize,
2864 2864 backup=backup)
2865 2865
2866 2866 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2867 2867 inferrepo=True)
2868 2868 def debugwalk(ui, repo, *pats, **opts):
2869 2869 """show how files match on given patterns"""
2870 2870 opts = pycompat.byteskwargs(opts)
2871 2871 m = scmutil.match(repo[None], pats, opts)
2872 2872 if ui.verbose:
2873 2873 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
2874 2874 items = list(repo[None].walk(m))
2875 2875 if not items:
2876 2876 return
2877 2877 f = lambda fn: fn
2878 2878 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2879 2879 f = lambda fn: util.normpath(fn)
2880 2880 fmt = 'f %%-%ds %%-%ds %%s' % (
2881 2881 max([len(abs) for abs in items]),
2882 2882 max([len(repo.pathto(abs)) for abs in items]))
2883 2883 for abs in items:
2884 2884 line = fmt % (abs, f(repo.pathto(abs)), m.exact(abs) and 'exact' or '')
2885 2885 ui.write("%s\n" % line.rstrip())
2886 2886
2887 2887 @command('debugwhyunstable', [], _('REV'))
2888 2888 def debugwhyunstable(ui, repo, rev):
2889 2889 """explain instabilities of a changeset"""
2890 2890 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
2891 2891 dnodes = ''
2892 2892 if entry.get('divergentnodes'):
2893 2893 dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr())
2894 2894 for ctx in entry['divergentnodes']) + ' '
2895 2895 ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes,
2896 2896 entry['reason'], entry['node']))
2897 2897
2898 2898 @command('debugwireargs',
2899 2899 [('', 'three', '', 'three'),
2900 2900 ('', 'four', '', 'four'),
2901 2901 ('', 'five', '', 'five'),
2902 2902 ] + cmdutil.remoteopts,
2903 2903 _('REPO [OPTIONS]... [ONE [TWO]]'),
2904 2904 norepo=True)
2905 2905 def debugwireargs(ui, repopath, *vals, **opts):
2906 2906 opts = pycompat.byteskwargs(opts)
2907 2907 repo = hg.peer(ui, opts, repopath)
2908 2908 for opt in cmdutil.remoteopts:
2909 2909 del opts[opt[1]]
2910 2910 args = {}
2911 2911 for k, v in opts.iteritems():
2912 2912 if v:
2913 2913 args[k] = v
2914 2914 args = pycompat.strkwargs(args)
2915 2915 # run twice to check that we don't mess up the stream for the next command
2916 2916 res1 = repo.debugwireargs(*vals, **args)
2917 2917 res2 = repo.debugwireargs(*vals, **args)
2918 2918 ui.write("%s\n" % res1)
2919 2919 if res1 != res2:
2920 2920 ui.warn("%s\n" % res2)
2921 2921
2922 2922 def _parsewirelangblocks(fh):
2923 2923 activeaction = None
2924 2924 blocklines = []
2925 2925 lastindent = 0
2926 2926
2927 2927 for line in fh:
2928 2928 line = line.rstrip()
2929 2929 if not line:
2930 2930 continue
2931 2931
2932 2932 if line.startswith(b'#'):
2933 2933 continue
2934 2934
2935 2935 if not line.startswith(b' '):
2936 2936 # New block. Flush previous one.
2937 2937 if activeaction:
2938 2938 yield activeaction, blocklines
2939 2939
2940 2940 activeaction = line
2941 2941 blocklines = []
2942 2942 lastindent = 0
2943 2943 continue
2944 2944
2945 2945 # Else we start with an indent.
2946 2946
2947 2947 if not activeaction:
2948 2948 raise error.Abort(_('indented line outside of block'))
2949 2949
2950 2950 indent = len(line) - len(line.lstrip())
2951 2951
2952 2952 # If this line is indented more than the last line, concatenate it.
2953 2953 if indent > lastindent and blocklines:
2954 2954 blocklines[-1] += line.lstrip()
2955 2955 else:
2956 2956 blocklines.append(line)
2957 2957 lastindent = indent
2958 2958
2959 2959 # Flush last block.
2960 2960 if activeaction:
2961 2961 yield activeaction, blocklines
2962 2962
2963 2963 @command('debugwireproto',
2964 2964 [
2965 2965 ('', 'localssh', False, _('start an SSH server for this repo')),
2966 2966 ('', 'peer', '', _('construct a specific version of the peer')),
2967 2967 ('', 'noreadstderr', False, _('do not read from stderr of the remote')),
2968 2968 ('', 'nologhandshake', False,
2969 2969 _('do not log I/O related to the peer handshake')),
2970 2970 ] + cmdutil.remoteopts,
2971 2971 _('[PATH]'),
2972 2972 optionalrepo=True)
2973 2973 def debugwireproto(ui, repo, path=None, **opts):
2974 2974 """send wire protocol commands to a server
2975 2975
2976 2976 This command can be used to issue wire protocol commands to remote
2977 2977 peers and to debug the raw data being exchanged.
2978 2978
2979 2979 ``--localssh`` will start an SSH server against the current repository
2980 2980 and connect to that. By default, the connection will perform a handshake
2981 2981 and establish an appropriate peer instance.
2982 2982
2983 2983 ``--peer`` can be used to bypass the handshake protocol and construct a
2984 2984 peer instance using the specified class type. Valid values are ``raw``,
2985 2985 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
2986 2986 raw data payloads and don't support higher-level command actions.
2987 2987
2988 2988 ``--noreadstderr`` can be used to disable automatic reading from stderr
2989 2989 of the peer (for SSH connections only). Disabling automatic reading of
2990 2990 stderr is useful for making output more deterministic.
2991 2991
2992 2992 Commands are issued via a mini language which is specified via stdin.
2993 2993 The language consists of individual actions to perform. An action is
2994 2994 defined by a block. A block is defined as a line with no leading
2995 2995 space followed by 0 or more lines with leading space. Blocks are
2996 2996 effectively a high-level command with additional metadata.
2997 2997
2998 2998 Lines beginning with ``#`` are ignored.
2999 2999
3000 3000 The following sections denote available actions.
3001 3001
3002 3002 raw
3003 3003 ---
3004 3004
3005 3005 Send raw data to the server.
3006 3006
3007 3007 The block payload contains the raw data to send as one atomic send
3008 3008 operation. The data may not actually be delivered in a single system
3009 3009 call: it depends on the abilities of the transport being used.
3010 3010
3011 3011 Each line in the block is de-indented and concatenated. Then, that
3012 3012 value is evaluated as a Python b'' literal. This allows the use of
3013 3013 backslash escaping, etc.
3014 3014
3015 3015 raw+
3016 3016 ----
3017 3017
3018 3018 Behaves like ``raw`` except flushes output afterwards.
3019 3019
3020 3020 command <X>
3021 3021 -----------
3022 3022
3023 3023 Send a request to run a named command, whose name follows the ``command``
3024 3024 string.
3025 3025
3026 3026 Arguments to the command are defined as lines in this block. The format of
3027 3027 each line is ``<key> <value>``. e.g.::
3028 3028
3029 3029 command listkeys
3030 3030 namespace bookmarks
3031 3031
3032 3032 If the value begins with ``eval:``, it will be interpreted as a Python
3033 3033 literal expression. Otherwise values are interpreted as Python b'' literals.
3034 3034 This allows sending complex types and encoding special byte sequences via
3035 3035 backslash escaping.
3036 3036
3037 3037 The following arguments have special meaning:
3038 3038
3039 3039 ``PUSHFILE``
3040 3040 When defined, the *push* mechanism of the peer will be used instead
3041 3041 of the static request-response mechanism and the content of the
3042 3042 file specified in the value of this argument will be sent as the
3043 3043 command payload.
3044 3044
3045 3045 This can be used to submit a local bundle file to the remote.
3046 3046
3047 3047 batchbegin
3048 3048 ----------
3049 3049
3050 3050 Instruct the peer to begin a batched send.
3051 3051
3052 3052 All ``command`` blocks are queued for execution until the next
3053 3053 ``batchsubmit`` block.
3054 3054
3055 3055 batchsubmit
3056 3056 -----------
3057 3057
3058 3058 Submit previously queued ``command`` blocks as a batch request.
3059 3059
3060 3060 This action MUST be paired with a ``batchbegin`` action.
3061 3061
3062 3062 httprequest <method> <path>
3063 3063 ---------------------------
3064 3064
3065 3065 (HTTP peer only)
3066 3066
3067 3067 Send an HTTP request to the peer.
3068 3068
3069 3069 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
3070 3070
3071 3071 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
3072 3072 headers to add to the request. e.g. ``Accept: foo``.
3073 3073
3074 3074 The following arguments are special:
3075 3075
3076 3076 ``BODYFILE``
3077 3077 The content of the file defined as the value to this argument will be
3078 3078 transferred verbatim as the HTTP request body.
3079 3079
3080 3080 ``frame <type> <flags> <payload>``
3081 3081 Send a unified protocol frame as part of the request body.
3082 3082
3083 3083 All frames will be collected and sent as the body to the HTTP
3084 3084 request.
3085 3085
3086 3086 close
3087 3087 -----
3088 3088
3089 3089 Close the connection to the server.
3090 3090
3091 3091 flush
3092 3092 -----
3093 3093
3094 3094 Flush data written to the server.
3095 3095
3096 3096 readavailable
3097 3097 -------------
3098 3098
3099 3099 Close the write end of the connection and read all available data from
3100 3100 the server.
3101 3101
3102 3102 If the connection to the server encompasses multiple pipes, we poll both
3103 3103 pipes and read available data.
3104 3104
3105 3105 readline
3106 3106 --------
3107 3107
3108 3108 Read a line of output from the server. If there are multiple output
3109 3109 pipes, reads only the main pipe.
3110 3110
3111 3111 ereadline
3112 3112 ---------
3113 3113
3114 3114 Like ``readline``, but read from the stderr pipe, if available.
3115 3115
3116 3116 read <X>
3117 3117 --------
3118 3118
3119 3119 ``read()`` N bytes from the server's main output pipe.
3120 3120
3121 3121 eread <X>
3122 3122 ---------
3123 3123
3124 3124 ``read()`` N bytes from the server's stderr pipe, if available.
3125 3125
3126 3126 Specifying Unified Frame-Based Protocol Frames
3127 3127 ----------------------------------------------
3128 3128
3129 3129 It is possible to emit a *Unified Frame-Based Protocol* by using special
3130 3130 syntax.
3131 3131
3132 3132 A frame is composed as a type, flags, and payload. These can be parsed
3133 3133 from a string of the form:
3134 3134
3135 3135 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
3136 3136
3137 3137 ``request-id`` and ``stream-id`` are integers defining the request and
3138 3138 stream identifiers.
3139 3139
3140 3140 ``type`` can be an integer value for the frame type or the string name
3141 3141 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
3142 3142 ``command-name``.
3143 3143
3144 3144 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
3145 3145 components. Each component (and there can be just one) can be an integer
3146 3146 or a flag name for stream flags or frame flags, respectively. Values are
3147 3147 resolved to integers and then bitwise OR'd together.
3148 3148
3149 3149 ``payload`` represents the raw frame payload. If it begins with
3150 3150 ``cbor:``, the following string is evaluated as Python code and the
3151 3151 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
3152 3152 as a Python byte string literal.
3153 3153 """
3154 3154 opts = pycompat.byteskwargs(opts)
3155 3155
3156 3156 if opts['localssh'] and not repo:
3157 3157 raise error.Abort(_('--localssh requires a repository'))
3158 3158
3159 3159 if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'):
3160 3160 raise error.Abort(_('invalid value for --peer'),
3161 3161 hint=_('valid values are "raw", "ssh1", and "ssh2"'))
3162 3162
3163 3163 if path and opts['localssh']:
3164 3164 raise error.Abort(_('cannot specify --localssh with an explicit '
3165 3165 'path'))
3166 3166
3167 3167 if ui.interactive():
3168 3168 ui.write(_('(waiting for commands on stdin)\n'))
3169 3169
3170 3170 blocks = list(_parsewirelangblocks(ui.fin))
3171 3171
3172 3172 proc = None
3173 3173 stdin = None
3174 3174 stdout = None
3175 3175 stderr = None
3176 3176 opener = None
3177 3177
3178 3178 if opts['localssh']:
3179 3179 # We start the SSH server in its own process so there is process
3180 3180 # separation. This prevents a whole class of potential bugs around
3181 3181 # shared state from interfering with server operation.
3182 3182 args = procutil.hgcmd() + [
3183 3183 '-R', repo.root,
3184 3184 'debugserve', '--sshstdio',
3185 3185 ]
3186 3186 proc = subprocess.Popen(pycompat.rapply(procutil.tonativestr, args),
3187 3187 stdin=subprocess.PIPE,
3188 3188 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
3189 3189 bufsize=0)
3190 3190
3191 3191 stdin = proc.stdin
3192 3192 stdout = proc.stdout
3193 3193 stderr = proc.stderr
3194 3194
3195 3195 # We turn the pipes into observers so we can log I/O.
3196 3196 if ui.verbose or opts['peer'] == 'raw':
3197 3197 stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
3198 3198 logdata=True)
3199 3199 stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
3200 3200 logdata=True)
3201 3201 stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
3202 3202 logdata=True)
3203 3203
3204 3204 # --localssh also implies the peer connection settings.
3205 3205
3206 3206 url = 'ssh://localserver'
3207 3207 autoreadstderr = not opts['noreadstderr']
3208 3208
3209 3209 if opts['peer'] == 'ssh1':
3210 3210 ui.write(_('creating ssh peer for wire protocol version 1\n'))
3211 3211 peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
3212 3212 None, autoreadstderr=autoreadstderr)
3213 3213 elif opts['peer'] == 'ssh2':
3214 3214 ui.write(_('creating ssh peer for wire protocol version 2\n'))
3215 3215 peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
3216 3216 None, autoreadstderr=autoreadstderr)
3217 3217 elif opts['peer'] == 'raw':
3218 3218 ui.write(_('using raw connection to peer\n'))
3219 3219 peer = None
3220 3220 else:
3221 3221 ui.write(_('creating ssh peer from handshake results\n'))
3222 3222 peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
3223 3223 autoreadstderr=autoreadstderr)
3224 3224
3225 3225 elif path:
3226 3226 # We bypass hg.peer() so we can proxy the sockets.
3227 3227 # TODO consider not doing this because we skip
3228 3228 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
3229 3229 u = util.url(path)
3230 3230 if u.scheme != 'http':
3231 3231 raise error.Abort(_('only http:// paths are currently supported'))
3232 3232
3233 3233 url, authinfo = u.authinfo()
3234 3234 openerargs = {
3235 3235 r'useragent': b'Mercurial debugwireproto',
3236 3236 }
3237 3237
3238 3238 # Turn pipes/sockets into observers so we can log I/O.
3239 3239 if ui.verbose:
3240 3240 openerargs.update({
3241 3241 r'loggingfh': ui,
3242 3242 r'loggingname': b's',
3243 3243 r'loggingopts': {
3244 3244 r'logdata': True,
3245 3245 r'logdataapis': False,
3246 3246 },
3247 3247 })
3248 3248
3249 3249 if ui.debugflag:
3250 3250 openerargs[r'loggingopts'][r'logdataapis'] = True
3251 3251
3252 3252 # Don't send default headers when in raw mode. This allows us to
3253 3253 # bypass most of the behavior of our URL handling code so we can
3254 3254 # have near complete control over what's sent on the wire.
3255 3255 if opts['peer'] == 'raw':
3256 3256 openerargs[r'sendaccept'] = False
3257 3257
3258 3258 opener = urlmod.opener(ui, authinfo, **openerargs)
3259 3259
3260 3260 if opts['peer'] == 'http2':
3261 3261 ui.write(_('creating http peer for wire protocol version 2\n'))
3262 3262 # We go through makepeer() because we need an API descriptor for
3263 3263 # the peer instance to be useful.
3264 3264 with ui.configoverride({
3265 3265 ('experimental', 'httppeer.advertise-v2'): True}):
3266 3266 if opts['nologhandshake']:
3267 3267 ui.pushbuffer()
3268 3268
3269 3269 peer = httppeer.makepeer(ui, path, opener=opener)
3270 3270
3271 3271 if opts['nologhandshake']:
3272 3272 ui.popbuffer()
3273 3273
3274 3274 if not isinstance(peer, httppeer.httpv2peer):
3275 3275 raise error.Abort(_('could not instantiate HTTP peer for '
3276 3276 'wire protocol version 2'),
3277 3277 hint=_('the server may not have the feature '
3278 3278 'enabled or is not allowing this '
3279 3279 'client version'))
3280 3280
3281 3281 elif opts['peer'] == 'raw':
3282 3282 ui.write(_('using raw connection to peer\n'))
3283 3283 peer = None
3284 3284 elif opts['peer']:
3285 3285 raise error.Abort(_('--peer %s not supported with HTTP peers') %
3286 3286 opts['peer'])
3287 3287 else:
3288 3288 peer = httppeer.makepeer(ui, path, opener=opener)
3289 3289
3290 3290 # We /could/ populate stdin/stdout with sock.makefile()...
3291 3291 else:
3292 3292 raise error.Abort(_('unsupported connection configuration'))
3293 3293
3294 3294 batchedcommands = None
3295 3295
3296 3296 # Now perform actions based on the parsed wire language instructions.
3297 3297 for action, lines in blocks:
3298 3298 if action in ('raw', 'raw+'):
3299 3299 if not stdin:
3300 3300 raise error.Abort(_('cannot call raw/raw+ on this peer'))
3301 3301
3302 3302 # Concatenate the data together.
3303 3303 data = ''.join(l.lstrip() for l in lines)
3304 3304 data = stringutil.unescapestr(data)
3305 3305 stdin.write(data)
3306 3306
3307 3307 if action == 'raw+':
3308 3308 stdin.flush()
3309 3309 elif action == 'flush':
3310 3310 if not stdin:
3311 3311 raise error.Abort(_('cannot call flush on this peer'))
3312 3312 stdin.flush()
3313 3313 elif action.startswith('command'):
3314 3314 if not peer:
3315 3315 raise error.Abort(_('cannot send commands unless peer instance '
3316 3316 'is available'))
3317 3317
3318 3318 command = action.split(' ', 1)[1]
3319 3319
3320 3320 args = {}
3321 3321 for line in lines:
3322 3322 # We need to allow empty values.
3323 3323 fields = line.lstrip().split(' ', 1)
3324 3324 if len(fields) == 1:
3325 3325 key = fields[0]
3326 3326 value = ''
3327 3327 else:
3328 3328 key, value = fields
3329 3329
3330 3330 if value.startswith('eval:'):
3331 3331 value = stringutil.evalpythonliteral(value[5:])
3332 3332 else:
3333 3333 value = stringutil.unescapestr(value)
3334 3334
3335 3335 args[key] = value
3336 3336
3337 3337 if batchedcommands is not None:
3338 3338 batchedcommands.append((command, args))
3339 3339 continue
3340 3340
3341 3341 ui.status(_('sending %s command\n') % command)
3342 3342
3343 3343 if 'PUSHFILE' in args:
3344 3344 with open(args['PUSHFILE'], r'rb') as fh:
3345 3345 del args['PUSHFILE']
3346 3346 res, output = peer._callpush(command, fh,
3347 3347 **pycompat.strkwargs(args))
3348 3348 ui.status(_('result: %s\n') % stringutil.escapestr(res))
3349 3349 ui.status(_('remote output: %s\n') %
3350 3350 stringutil.escapestr(output))
3351 3351 else:
3352 3352 with peer.commandexecutor() as e:
3353 3353 res = e.callcommand(command, args).result()
3354 3354
3355 3355 if isinstance(res, wireprotov2peer.commandresponse):
3356 3356 val = res.objects()
3357 3357 ui.status(_('response: %s\n') %
3358 3358 stringutil.pprint(val, bprefix=True, indent=2))
3359 3359 else:
3360 3360 ui.status(_('response: %s\n') %
3361 3361 stringutil.pprint(res, bprefix=True, indent=2))
3362 3362
3363 3363 elif action == 'batchbegin':
3364 3364 if batchedcommands is not None:
3365 3365 raise error.Abort(_('nested batchbegin not allowed'))
3366 3366
3367 3367 batchedcommands = []
3368 3368 elif action == 'batchsubmit':
3369 3369 # There is a batching API we could go through. But it would be
3370 3370 # difficult to normalize requests into function calls. It is easier
3371 3371 # to bypass this layer and normalize to commands + args.
3372 3372 ui.status(_('sending batch with %d sub-commands\n') %
3373 3373 len(batchedcommands))
3374 3374 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
3375 3375 ui.status(_('response #%d: %s\n') %
3376 3376 (i, stringutil.escapestr(chunk)))
3377 3377
3378 3378 batchedcommands = None
3379 3379
3380 3380 elif action.startswith('httprequest '):
3381 3381 if not opener:
3382 3382 raise error.Abort(_('cannot use httprequest without an HTTP '
3383 3383 'peer'))
3384 3384
3385 3385 request = action.split(' ', 2)
3386 3386 if len(request) != 3:
3387 3387 raise error.Abort(_('invalid httprequest: expected format is '
3388 3388 '"httprequest <method> <path>'))
3389 3389
3390 3390 method, httppath = request[1:]
3391 3391 headers = {}
3392 3392 body = None
3393 3393 frames = []
3394 3394 for line in lines:
3395 3395 line = line.lstrip()
3396 3396 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
3397 3397 if m:
3398 3398 # Headers need to use native strings.
3399 3399 key = pycompat.strurl(m.group(1))
3400 3400 value = pycompat.strurl(m.group(2))
3401 3401 headers[key] = value
3402 3402 continue
3403 3403
3404 3404 if line.startswith(b'BODYFILE '):
3405 3405 with open(line.split(b' ', 1), 'rb') as fh:
3406 3406 body = fh.read()
3407 3407 elif line.startswith(b'frame '):
3408 3408 frame = wireprotoframing.makeframefromhumanstring(
3409 3409 line[len(b'frame '):])
3410 3410
3411 3411 frames.append(frame)
3412 3412 else:
3413 3413 raise error.Abort(_('unknown argument to httprequest: %s') %
3414 3414 line)
3415 3415
3416 3416 url = path + httppath
3417 3417
3418 3418 if frames:
3419 3419 body = b''.join(bytes(f) for f in frames)
3420 3420
3421 3421 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
3422 3422
3423 3423 # urllib.Request insists on using has_data() as a proxy for
3424 3424 # determining the request method. Override that to use our
3425 3425 # explicitly requested method.
3426 3426 req.get_method = lambda: pycompat.sysstr(method)
3427 3427
3428 3428 try:
3429 3429 res = opener.open(req)
3430 3430 body = res.read()
3431 3431 except util.urlerr.urlerror as e:
3432 3432 # read() method must be called, but only exists in Python 2
3433 3433 getattr(e, 'read', lambda: None)()
3434 3434 continue
3435 3435
3436 3436 ct = res.headers.get(r'Content-Type')
3437 3437 if ct == r'application/mercurial-cbor':
3438 3438 ui.write(_('cbor> %s\n') %
3439 3439 stringutil.pprint(cborutil.decodeall(body),
3440 3440 bprefix=True,
3441 3441 indent=2))
3442 3442
3443 3443 elif action == 'close':
3444 3444 peer.close()
3445 3445 elif action == 'readavailable':
3446 3446 if not stdout or not stderr:
3447 3447 raise error.Abort(_('readavailable not available on this peer'))
3448 3448
3449 3449 stdin.close()
3450 3450 stdout.read()
3451 3451 stderr.read()
3452 3452
3453 3453 elif action == 'readline':
3454 3454 if not stdout:
3455 3455 raise error.Abort(_('readline not available on this peer'))
3456 3456 stdout.readline()
3457 3457 elif action == 'ereadline':
3458 3458 if not stderr:
3459 3459 raise error.Abort(_('ereadline not available on this peer'))
3460 3460 stderr.readline()
3461 3461 elif action.startswith('read '):
3462 3462 count = int(action.split(' ', 1)[1])
3463 3463 if not stdout:
3464 3464 raise error.Abort(_('read not available on this peer'))
3465 3465 stdout.read(count)
3466 3466 elif action.startswith('eread '):
3467 3467 count = int(action.split(' ', 1)[1])
3468 3468 if not stderr:
3469 3469 raise error.Abort(_('eread not available on this peer'))
3470 3470 stderr.read(count)
3471 3471 else:
3472 3472 raise error.Abort(_('unknown action: %s') % action)
3473 3473
3474 3474 if batchedcommands is not None:
3475 3475 raise error.Abort(_('unclosed "batchbegin" request'))
3476 3476
3477 3477 if peer:
3478 3478 peer.close()
3479 3479
3480 3480 if proc:
3481 3481 proc.kill()
@@ -1,3316 +1,3325 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import argparse
49 49 import collections
50 50 import difflib
51 51 import distutils.version as version
52 52 import errno
53 53 import json
54 54 import multiprocessing
55 55 import os
56 56 import random
57 57 import re
58 58 import shutil
59 59 import signal
60 60 import socket
61 61 import subprocess
62 62 import sys
63 63 import sysconfig
64 64 import tempfile
65 65 import threading
66 66 import time
67 67 import unittest
68 68 import uuid
69 69 import xml.dom.minidom as minidom
70 70
71 71 try:
72 72 import Queue as queue
73 73 except ImportError:
74 74 import queue
75 75
76 76 try:
77 77 import shlex
78 78 shellquote = shlex.quote
79 79 except (ImportError, AttributeError):
80 80 import pipes
81 81 shellquote = pipes.quote
82 82
83 83 if os.environ.get('RTUNICODEPEDANTRY', False):
84 84 try:
85 85 reload(sys)
86 86 sys.setdefaultencoding("undefined")
87 87 except NameError:
88 88 pass
89 89
90 90 processlock = threading.Lock()
91 91
92 92 pygmentspresent = False
93 93 # ANSI color is unsupported prior to Windows 10
94 94 if os.name != 'nt':
95 95 try: # is pygments installed
96 96 import pygments
97 97 import pygments.lexers as lexers
98 98 import pygments.lexer as lexer
99 99 import pygments.formatters as formatters
100 100 import pygments.token as token
101 101 import pygments.style as style
102 102 pygmentspresent = True
103 103 difflexer = lexers.DiffLexer()
104 104 terminal256formatter = formatters.Terminal256Formatter()
105 105 except ImportError:
106 106 pass
107 107
108 108 if pygmentspresent:
109 109 class TestRunnerStyle(style.Style):
110 110 default_style = ""
111 111 skipped = token.string_to_tokentype("Token.Generic.Skipped")
112 112 failed = token.string_to_tokentype("Token.Generic.Failed")
113 113 skippedname = token.string_to_tokentype("Token.Generic.SName")
114 114 failedname = token.string_to_tokentype("Token.Generic.FName")
115 115 styles = {
116 116 skipped: '#e5e5e5',
117 117 skippedname: '#00ffff',
118 118 failed: '#7f0000',
119 119 failedname: '#ff0000',
120 120 }
121 121
122 122 class TestRunnerLexer(lexer.RegexLexer):
123 123 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
124 124 tokens = {
125 125 'root': [
126 126 (r'^Skipped', token.Generic.Skipped, 'skipped'),
127 127 (r'^Failed ', token.Generic.Failed, 'failed'),
128 128 (r'^ERROR: ', token.Generic.Failed, 'failed'),
129 129 ],
130 130 'skipped': [
131 131 (testpattern, token.Generic.SName),
132 132 (r':.*', token.Generic.Skipped),
133 133 ],
134 134 'failed': [
135 135 (testpattern, token.Generic.FName),
136 136 (r'(:| ).*', token.Generic.Failed),
137 137 ]
138 138 }
139 139
140 140 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
141 141 runnerlexer = TestRunnerLexer()
142 142
143 143 origenviron = os.environ.copy()
144 144
145 145 if sys.version_info > (3, 5, 0):
146 146 PYTHON3 = True
147 147 xrange = range # we use xrange in one place, and we'd rather not use range
148 148 def _bytespath(p):
149 149 if p is None:
150 150 return p
151 151 return p.encode('utf-8')
152 152
153 153 def _strpath(p):
154 154 if p is None:
155 155 return p
156 156 return p.decode('utf-8')
157 157
158 158 osenvironb = getattr(os, 'environb', None)
159 159 if osenvironb is None:
160 160 # Windows lacks os.environb, for instance. A proxy over the real thing
161 161 # instead of a copy allows the environment to be updated via bytes on
162 162 # all platforms.
163 163 class environbytes(object):
164 164 def __init__(self, strenv):
165 165 self.__len__ = strenv.__len__
166 166 self.clear = strenv.clear
167 167 self._strenv = strenv
168 168 def __getitem__(self, k):
169 169 v = self._strenv.__getitem__(_strpath(k))
170 170 return _bytespath(v)
171 171 def __setitem__(self, k, v):
172 172 self._strenv.__setitem__(_strpath(k), _strpath(v))
173 173 def __delitem__(self, k):
174 174 self._strenv.__delitem__(_strpath(k))
175 175 def __contains__(self, k):
176 176 return self._strenv.__contains__(_strpath(k))
177 177 def __iter__(self):
178 178 return iter([_bytespath(k) for k in iter(self._strenv)])
179 179 def get(self, k, default=None):
180 180 v = self._strenv.get(_strpath(k), _strpath(default))
181 181 return _bytespath(v)
182 182 def pop(self, k, default=None):
183 183 v = self._strenv.pop(_strpath(k), _strpath(default))
184 184 return _bytespath(v)
185 185
186 186 osenvironb = environbytes(os.environ)
187 187
188 188 getcwdb = getattr(os, 'getcwdb')
189 189 if not getcwdb or os.name == 'nt':
190 190 getcwdb = lambda: _bytespath(os.getcwd())
191 191
192 192 elif sys.version_info >= (3, 0, 0):
193 193 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
194 194 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
195 195 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
196 196 else:
197 197 PYTHON3 = False
198 198
199 199 # In python 2.x, path operations are generally done using
200 200 # bytestrings by default, so we don't have to do any extra
201 201 # fiddling there. We define the wrapper functions anyway just to
202 202 # help keep code consistent between platforms.
203 203 def _bytespath(p):
204 204 return p
205 205
206 206 _strpath = _bytespath
207 207 osenvironb = os.environ
208 208 getcwdb = os.getcwd
209 209
210 210 # For Windows support
211 211 wifexited = getattr(os, "WIFEXITED", lambda x: False)
212 212
213 213 # Whether to use IPv6
214 214 def checksocketfamily(name, port=20058):
215 215 """return true if we can listen on localhost using family=name
216 216
217 217 name should be either 'AF_INET', or 'AF_INET6'.
218 218 port being used is okay - EADDRINUSE is considered as successful.
219 219 """
220 220 family = getattr(socket, name, None)
221 221 if family is None:
222 222 return False
223 223 try:
224 224 s = socket.socket(family, socket.SOCK_STREAM)
225 225 s.bind(('localhost', port))
226 226 s.close()
227 227 return True
228 228 except socket.error as exc:
229 229 if exc.errno == errno.EADDRINUSE:
230 230 return True
231 231 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
232 232 return False
233 233 else:
234 234 raise
235 235 else:
236 236 return False
237 237
238 238 # useipv6 will be set by parseargs
239 239 useipv6 = None
240 240
241 241 def checkportisavailable(port):
242 242 """return true if a port seems free to bind on localhost"""
243 243 if useipv6:
244 244 family = socket.AF_INET6
245 245 else:
246 246 family = socket.AF_INET
247 247 try:
248 248 s = socket.socket(family, socket.SOCK_STREAM)
249 249 s.bind(('localhost', port))
250 250 s.close()
251 251 return True
252 252 except socket.error as exc:
253 253 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
254 254 errno.EPROTONOSUPPORT):
255 255 raise
256 256 return False
257 257
258 258 closefds = os.name == 'posix'
259 259 def Popen4(cmd, wd, timeout, env=None):
260 260 processlock.acquire()
261 261 p = subprocess.Popen(_strpath(cmd), shell=True, bufsize=-1,
262 262 cwd=_strpath(wd), env=env,
263 263 close_fds=closefds,
264 264 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
265 265 stderr=subprocess.STDOUT)
266 266 processlock.release()
267 267
268 268 p.fromchild = p.stdout
269 269 p.tochild = p.stdin
270 270 p.childerr = p.stderr
271 271
272 272 p.timeout = False
273 273 if timeout:
274 274 def t():
275 275 start = time.time()
276 276 while time.time() - start < timeout and p.returncode is None:
277 277 time.sleep(.1)
278 278 p.timeout = True
279 279 if p.returncode is None:
280 280 terminate(p)
281 281 threading.Thread(target=t).start()
282 282
283 283 return p
284 284
285 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
285 if sys.executable:
286 sysexecutable = sys.executable
287 elif os.environ.get('PYTHONEXECUTABLE'):
288 sysexecutable = os.environ['PYTHONEXECUTABLE']
289 elif os.environ.get('PYTHON'):
290 sysexecutable = os.environ['PYTHON']
291 else:
292 raise AssertionError('Could not find Python interpreter')
293
294 PYTHON = _bytespath(sysexecutable.replace('\\', '/'))
286 295 IMPL_PATH = b'PYTHONPATH'
287 296 if 'java' in sys.platform:
288 297 IMPL_PATH = b'JYTHONPATH'
289 298
290 299 defaults = {
291 300 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
292 301 'timeout': ('HGTEST_TIMEOUT', 180),
293 302 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 1500),
294 303 'port': ('HGTEST_PORT', 20059),
295 304 'shell': ('HGTEST_SHELL', 'sh'),
296 305 }
297 306
298 307 def canonpath(path):
299 308 return os.path.realpath(os.path.expanduser(path))
300 309
301 310 def parselistfiles(files, listtype, warn=True):
302 311 entries = dict()
303 312 for filename in files:
304 313 try:
305 314 path = os.path.expanduser(os.path.expandvars(filename))
306 315 f = open(path, "rb")
307 316 except IOError as err:
308 317 if err.errno != errno.ENOENT:
309 318 raise
310 319 if warn:
311 320 print("warning: no such %s file: %s" % (listtype, filename))
312 321 continue
313 322
314 323 for line in f.readlines():
315 324 line = line.split(b'#', 1)[0].strip()
316 325 if line:
317 326 entries[line] = filename
318 327
319 328 f.close()
320 329 return entries
321 330
322 331 def parsettestcases(path):
323 332 """read a .t test file, return a set of test case names
324 333
325 334 If path does not exist, return an empty set.
326 335 """
327 336 cases = []
328 337 try:
329 338 with open(path, 'rb') as f:
330 339 for l in f:
331 340 if l.startswith(b'#testcases '):
332 341 cases.append(sorted(l[11:].split()))
333 342 except IOError as ex:
334 343 if ex.errno != errno.ENOENT:
335 344 raise
336 345 return cases
337 346
338 347 def getparser():
339 348 """Obtain the OptionParser used by the CLI."""
340 349 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
341 350
342 351 selection = parser.add_argument_group('Test Selection')
343 352 selection.add_argument('--allow-slow-tests', action='store_true',
344 353 help='allow extremely slow tests')
345 354 selection.add_argument("--blacklist", action="append",
346 355 help="skip tests listed in the specified blacklist file")
347 356 selection.add_argument("--changed",
348 357 help="run tests that are changed in parent rev or working directory")
349 358 selection.add_argument("-k", "--keywords",
350 359 help="run tests matching keywords")
351 360 selection.add_argument("-r", "--retest", action="store_true",
352 361 help = "retest failed tests")
353 362 selection.add_argument("--test-list", action="append",
354 363 help="read tests to run from the specified file")
355 364 selection.add_argument("--whitelist", action="append",
356 365 help="always run tests listed in the specified whitelist file")
357 366 selection.add_argument('tests', metavar='TESTS', nargs='*',
358 367 help='Tests to run')
359 368
360 369 harness = parser.add_argument_group('Test Harness Behavior')
361 370 harness.add_argument('--bisect-repo',
362 371 metavar='bisect_repo',
363 372 help=("Path of a repo to bisect. Use together with "
364 373 "--known-good-rev"))
365 374 harness.add_argument("-d", "--debug", action="store_true",
366 375 help="debug mode: write output of test scripts to console"
367 376 " rather than capturing and diffing it (disables timeout)")
368 377 harness.add_argument("-f", "--first", action="store_true",
369 378 help="exit on the first test failure")
370 379 harness.add_argument("-i", "--interactive", action="store_true",
371 380 help="prompt to accept changed output")
372 381 harness.add_argument("-j", "--jobs", type=int,
373 382 help="number of jobs to run in parallel"
374 383 " (default: $%s or %d)" % defaults['jobs'])
375 384 harness.add_argument("--keep-tmpdir", action="store_true",
376 385 help="keep temporary directory after running tests")
377 386 harness.add_argument('--known-good-rev',
378 387 metavar="known_good_rev",
379 388 help=("Automatically bisect any failures using this "
380 389 "revision as a known-good revision."))
381 390 harness.add_argument("--list-tests", action="store_true",
382 391 help="list tests instead of running them")
383 392 harness.add_argument("--loop", action="store_true",
384 393 help="loop tests repeatedly")
385 394 harness.add_argument('--random', action="store_true",
386 395 help='run tests in random order')
387 396 harness.add_argument('--order-by-runtime', action="store_true",
388 397 help='run slowest tests first, according to .testtimes')
389 398 harness.add_argument("-p", "--port", type=int,
390 399 help="port on which servers should listen"
391 400 " (default: $%s or %d)" % defaults['port'])
392 401 harness.add_argument('--profile-runner', action='store_true',
393 402 help='run statprof on run-tests')
394 403 harness.add_argument("-R", "--restart", action="store_true",
395 404 help="restart at last error")
396 405 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
397 406 help="run each test N times (default=1)", default=1)
398 407 harness.add_argument("--shell",
399 408 help="shell to use (default: $%s or %s)" % defaults['shell'])
400 409 harness.add_argument('--showchannels', action='store_true',
401 410 help='show scheduling channels')
402 411 harness.add_argument("--slowtimeout", type=int,
403 412 help="kill errant slow tests after SLOWTIMEOUT seconds"
404 413 " (default: $%s or %d)" % defaults['slowtimeout'])
405 414 harness.add_argument("-t", "--timeout", type=int,
406 415 help="kill errant tests after TIMEOUT seconds"
407 416 " (default: $%s or %d)" % defaults['timeout'])
408 417 harness.add_argument("--tmpdir",
409 418 help="run tests in the given temporary directory"
410 419 " (implies --keep-tmpdir)")
411 420 harness.add_argument("-v", "--verbose", action="store_true",
412 421 help="output verbose messages")
413 422
414 423 hgconf = parser.add_argument_group('Mercurial Configuration')
415 424 hgconf.add_argument("--chg", action="store_true",
416 425 help="install and use chg wrapper in place of hg")
417 426 hgconf.add_argument("--compiler",
418 427 help="compiler to build with")
419 428 hgconf.add_argument('--extra-config-opt', action="append", default=[],
420 429 help='set the given config opt in the test hgrc')
421 430 hgconf.add_argument("-l", "--local", action="store_true",
422 431 help="shortcut for --with-hg=<testdir>/../hg, "
423 432 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
424 433 hgconf.add_argument("--ipv6", action="store_true",
425 434 help="prefer IPv6 to IPv4 for network related tests")
426 435 hgconf.add_argument("--pure", action="store_true",
427 436 help="use pure Python code instead of C extensions")
428 437 hgconf.add_argument("-3", "--py3-warnings", action="store_true",
429 438 help="enable Py3k warnings on Python 2.7+")
430 439 hgconf.add_argument("--with-chg", metavar="CHG",
431 440 help="use specified chg wrapper in place of hg")
432 441 hgconf.add_argument("--with-hg",
433 442 metavar="HG",
434 443 help="test using specified hg script rather than a "
435 444 "temporary installation")
436 445
437 446 reporting = parser.add_argument_group('Results Reporting')
438 447 reporting.add_argument("-C", "--annotate", action="store_true",
439 448 help="output files annotated with coverage")
440 449 reporting.add_argument("--color", choices=["always", "auto", "never"],
441 450 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
442 451 help="colorisation: always|auto|never (default: auto)")
443 452 reporting.add_argument("-c", "--cover", action="store_true",
444 453 help="print a test coverage report")
445 454 reporting.add_argument('--exceptions', action='store_true',
446 455 help='log all exceptions and generate an exception report')
447 456 reporting.add_argument("-H", "--htmlcov", action="store_true",
448 457 help="create an HTML report of the coverage of the files")
449 458 reporting.add_argument("--json", action="store_true",
450 459 help="store test result data in 'report.json' file")
451 460 reporting.add_argument("--outputdir",
452 461 help="directory to write error logs to (default=test directory)")
453 462 reporting.add_argument("-n", "--nodiff", action="store_true",
454 463 help="skip showing test changes")
455 464 reporting.add_argument("-S", "--noskips", action="store_true",
456 465 help="don't report skip tests verbosely")
457 466 reporting.add_argument("--time", action="store_true",
458 467 help="time how long each test takes")
459 468 reporting.add_argument("--view",
460 469 help="external diff viewer")
461 470 reporting.add_argument("--xunit",
462 471 help="record xunit results at specified path")
463 472
464 473 for option, (envvar, default) in defaults.items():
465 474 defaults[option] = type(default)(os.environ.get(envvar, default))
466 475 parser.set_defaults(**defaults)
467 476
468 477 return parser
469 478
470 479 def parseargs(args, parser):
471 480 """Parse arguments with our OptionParser and validate results."""
472 481 options = parser.parse_args(args)
473 482
474 483 # jython is always pure
475 484 if 'java' in sys.platform or '__pypy__' in sys.modules:
476 485 options.pure = True
477 486
478 487 if options.with_hg:
479 488 options.with_hg = canonpath(_bytespath(options.with_hg))
480 489 if not (os.path.isfile(options.with_hg) and
481 490 os.access(options.with_hg, os.X_OK)):
482 491 parser.error('--with-hg must specify an executable hg script')
483 492 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
484 493 sys.stderr.write('warning: --with-hg should specify an hg script\n')
485 494 sys.stderr.flush()
486 495 if options.local:
487 496 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
488 497 reporootdir = os.path.dirname(testdir)
489 498 pathandattrs = [(b'hg', 'with_hg')]
490 499 if options.chg:
491 500 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
492 501 for relpath, attr in pathandattrs:
493 502 binpath = os.path.join(reporootdir, relpath)
494 503 if os.name != 'nt' and not os.access(binpath, os.X_OK):
495 504 parser.error('--local specified, but %r not found or '
496 505 'not executable' % binpath)
497 506 setattr(options, attr, binpath)
498 507
499 508 if (options.chg or options.with_chg) and os.name == 'nt':
500 509 parser.error('chg does not work on %s' % os.name)
501 510 if options.with_chg:
502 511 options.chg = False # no installation to temporary location
503 512 options.with_chg = canonpath(_bytespath(options.with_chg))
504 513 if not (os.path.isfile(options.with_chg) and
505 514 os.access(options.with_chg, os.X_OK)):
506 515 parser.error('--with-chg must specify a chg executable')
507 516 if options.chg and options.with_hg:
508 517 # chg shares installation location with hg
509 518 parser.error('--chg does not work when --with-hg is specified '
510 519 '(use --with-chg instead)')
511 520
512 521 if options.color == 'always' and not pygmentspresent:
513 522 sys.stderr.write('warning: --color=always ignored because '
514 523 'pygments is not installed\n')
515 524
516 525 if options.bisect_repo and not options.known_good_rev:
517 526 parser.error("--bisect-repo cannot be used without --known-good-rev")
518 527
519 528 global useipv6
520 529 if options.ipv6:
521 530 useipv6 = checksocketfamily('AF_INET6')
522 531 else:
523 532 # only use IPv6 if IPv4 is unavailable and IPv6 is available
524 533 useipv6 = ((not checksocketfamily('AF_INET'))
525 534 and checksocketfamily('AF_INET6'))
526 535
527 536 options.anycoverage = options.cover or options.annotate or options.htmlcov
528 537 if options.anycoverage:
529 538 try:
530 539 import coverage
531 540 covver = version.StrictVersion(coverage.__version__).version
532 541 if covver < (3, 3):
533 542 parser.error('coverage options require coverage 3.3 or later')
534 543 except ImportError:
535 544 parser.error('coverage options now require the coverage package')
536 545
537 546 if options.anycoverage and options.local:
538 547 # this needs some path mangling somewhere, I guess
539 548 parser.error("sorry, coverage options do not work when --local "
540 549 "is specified")
541 550
542 551 if options.anycoverage and options.with_hg:
543 552 parser.error("sorry, coverage options do not work when --with-hg "
544 553 "is specified")
545 554
546 555 global verbose
547 556 if options.verbose:
548 557 verbose = ''
549 558
550 559 if options.tmpdir:
551 560 options.tmpdir = canonpath(options.tmpdir)
552 561
553 562 if options.jobs < 1:
554 563 parser.error('--jobs must be positive')
555 564 if options.interactive and options.debug:
556 565 parser.error("-i/--interactive and -d/--debug are incompatible")
557 566 if options.debug:
558 567 if options.timeout != defaults['timeout']:
559 568 sys.stderr.write(
560 569 'warning: --timeout option ignored with --debug\n')
561 570 if options.slowtimeout != defaults['slowtimeout']:
562 571 sys.stderr.write(
563 572 'warning: --slowtimeout option ignored with --debug\n')
564 573 options.timeout = 0
565 574 options.slowtimeout = 0
566 575 if options.py3_warnings:
567 576 if PYTHON3:
568 577 parser.error(
569 578 '--py3-warnings can only be used on Python 2.7')
570 579
571 580 if options.blacklist:
572 581 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
573 582 if options.whitelist:
574 583 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
575 584 else:
576 585 options.whitelisted = {}
577 586
578 587 if options.showchannels:
579 588 options.nodiff = True
580 589
581 590 return options
582 591
583 592 def rename(src, dst):
584 593 """Like os.rename(), trade atomicity and opened files friendliness
585 594 for existing destination support.
586 595 """
587 596 shutil.copy(src, dst)
588 597 os.remove(src)
589 598
590 599 def makecleanable(path):
591 600 """Try to fix directory permission recursively so that the entire tree
592 601 can be deleted"""
593 602 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
594 603 for d in dirnames:
595 604 p = os.path.join(dirpath, d)
596 605 try:
597 606 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
598 607 except OSError:
599 608 pass
600 609
601 610 _unified_diff = difflib.unified_diff
602 611 if PYTHON3:
603 612 import functools
604 613 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
605 614
606 615 def getdiff(expected, output, ref, err):
607 616 servefail = False
608 617 lines = []
609 618 for line in _unified_diff(expected, output, ref, err):
610 619 if line.startswith(b'+++') or line.startswith(b'---'):
611 620 line = line.replace(b'\\', b'/')
612 621 if line.endswith(b' \n'):
613 622 line = line[:-2] + b'\n'
614 623 lines.append(line)
615 624 if not servefail and line.startswith(
616 625 b'+ abort: child process failed to start'):
617 626 servefail = True
618 627
619 628 return servefail, lines
620 629
621 630 verbose = False
622 631 def vlog(*msg):
623 632 """Log only when in verbose mode."""
624 633 if verbose is False:
625 634 return
626 635
627 636 return log(*msg)
628 637
629 638 # Bytes that break XML even in a CDATA block: control characters 0-31
630 639 # sans \t, \n and \r
631 640 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
632 641
633 642 # Match feature conditionalized output lines in the form, capturing the feature
634 643 # list in group 2, and the preceeding line output in group 1:
635 644 #
636 645 # output..output (feature !)\n
637 646 optline = re.compile(br'(.*) \((.+?) !\)\n$')
638 647
639 648 def cdatasafe(data):
640 649 """Make a string safe to include in a CDATA block.
641 650
642 651 Certain control characters are illegal in a CDATA block, and
643 652 there's no way to include a ]]> in a CDATA either. This function
644 653 replaces illegal bytes with ? and adds a space between the ]] so
645 654 that it won't break the CDATA block.
646 655 """
647 656 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
648 657
649 658 def log(*msg):
650 659 """Log something to stdout.
651 660
652 661 Arguments are strings to print.
653 662 """
654 663 with iolock:
655 664 if verbose:
656 665 print(verbose, end=' ')
657 666 for m in msg:
658 667 print(m, end=' ')
659 668 print()
660 669 sys.stdout.flush()
661 670
662 671 def highlightdiff(line, color):
663 672 if not color:
664 673 return line
665 674 assert pygmentspresent
666 675 return pygments.highlight(line.decode('latin1'), difflexer,
667 676 terminal256formatter).encode('latin1')
668 677
669 678 def highlightmsg(msg, color):
670 679 if not color:
671 680 return msg
672 681 assert pygmentspresent
673 682 return pygments.highlight(msg, runnerlexer, runnerformatter)
674 683
675 684 def terminate(proc):
676 685 """Terminate subprocess"""
677 686 vlog('# Terminating process %d' % proc.pid)
678 687 try:
679 688 proc.terminate()
680 689 except OSError:
681 690 pass
682 691
683 692 def killdaemons(pidfile):
684 693 import killdaemons as killmod
685 694 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
686 695 logfn=vlog)
687 696
688 697 class Test(unittest.TestCase):
689 698 """Encapsulates a single, runnable test.
690 699
691 700 While this class conforms to the unittest.TestCase API, it differs in that
692 701 instances need to be instantiated manually. (Typically, unittest.TestCase
693 702 classes are instantiated automatically by scanning modules.)
694 703 """
695 704
696 705 # Status code reserved for skipped tests (used by hghave).
697 706 SKIPPED_STATUS = 80
698 707
699 708 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
700 709 debug=False,
701 710 first=False,
702 711 timeout=None,
703 712 startport=None, extraconfigopts=None,
704 713 py3warnings=False, shell=None, hgcommand=None,
705 714 slowtimeout=None, usechg=False,
706 715 useipv6=False):
707 716 """Create a test from parameters.
708 717
709 718 path is the full path to the file defining the test.
710 719
711 720 tmpdir is the main temporary directory to use for this test.
712 721
713 722 keeptmpdir determines whether to keep the test's temporary directory
714 723 after execution. It defaults to removal (False).
715 724
716 725 debug mode will make the test execute verbosely, with unfiltered
717 726 output.
718 727
719 728 timeout controls the maximum run time of the test. It is ignored when
720 729 debug is True. See slowtimeout for tests with #require slow.
721 730
722 731 slowtimeout overrides timeout if the test has #require slow.
723 732
724 733 startport controls the starting port number to use for this test. Each
725 734 test will reserve 3 port numbers for execution. It is the caller's
726 735 responsibility to allocate a non-overlapping port range to Test
727 736 instances.
728 737
729 738 extraconfigopts is an iterable of extra hgrc config options. Values
730 739 must have the form "key=value" (something understood by hgrc). Values
731 740 of the form "foo.key=value" will result in "[foo] key=value".
732 741
733 742 py3warnings enables Py3k warnings.
734 743
735 744 shell is the shell to execute tests in.
736 745 """
737 746 if timeout is None:
738 747 timeout = defaults['timeout']
739 748 if startport is None:
740 749 startport = defaults['port']
741 750 if slowtimeout is None:
742 751 slowtimeout = defaults['slowtimeout']
743 752 self.path = path
744 753 self.bname = os.path.basename(path)
745 754 self.name = _strpath(self.bname)
746 755 self._testdir = os.path.dirname(path)
747 756 self._outputdir = outputdir
748 757 self._tmpname = os.path.basename(path)
749 758 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
750 759
751 760 self._threadtmp = tmpdir
752 761 self._keeptmpdir = keeptmpdir
753 762 self._debug = debug
754 763 self._first = first
755 764 self._timeout = timeout
756 765 self._slowtimeout = slowtimeout
757 766 self._startport = startport
758 767 self._extraconfigopts = extraconfigopts or []
759 768 self._py3warnings = py3warnings
760 769 self._shell = _bytespath(shell)
761 770 self._hgcommand = hgcommand or b'hg'
762 771 self._usechg = usechg
763 772 self._useipv6 = useipv6
764 773
765 774 self._aborted = False
766 775 self._daemonpids = []
767 776 self._finished = None
768 777 self._ret = None
769 778 self._out = None
770 779 self._skipped = None
771 780 self._testtmp = None
772 781 self._chgsockdir = None
773 782
774 783 self._refout = self.readrefout()
775 784
776 785 def readrefout(self):
777 786 """read reference output"""
778 787 # If we're not in --debug mode and reference output file exists,
779 788 # check test output against it.
780 789 if self._debug:
781 790 return None # to match "out is None"
782 791 elif os.path.exists(self.refpath):
783 792 with open(self.refpath, 'rb') as f:
784 793 return f.read().splitlines(True)
785 794 else:
786 795 return []
787 796
788 797 # needed to get base class __repr__ running
789 798 @property
790 799 def _testMethodName(self):
791 800 return self.name
792 801
793 802 def __str__(self):
794 803 return self.name
795 804
796 805 def shortDescription(self):
797 806 return self.name
798 807
799 808 def setUp(self):
800 809 """Tasks to perform before run()."""
801 810 self._finished = False
802 811 self._ret = None
803 812 self._out = None
804 813 self._skipped = None
805 814
806 815 try:
807 816 os.mkdir(self._threadtmp)
808 817 except OSError as e:
809 818 if e.errno != errno.EEXIST:
810 819 raise
811 820
812 821 name = self._tmpname
813 822 self._testtmp = os.path.join(self._threadtmp, name)
814 823 os.mkdir(self._testtmp)
815 824
816 825 # Remove any previous output files.
817 826 if os.path.exists(self.errpath):
818 827 try:
819 828 os.remove(self.errpath)
820 829 except OSError as e:
821 830 # We might have raced another test to clean up a .err
822 831 # file, so ignore ENOENT when removing a previous .err
823 832 # file.
824 833 if e.errno != errno.ENOENT:
825 834 raise
826 835
827 836 if self._usechg:
828 837 self._chgsockdir = os.path.join(self._threadtmp,
829 838 b'%s.chgsock' % name)
830 839 os.mkdir(self._chgsockdir)
831 840
832 841 def run(self, result):
833 842 """Run this test and report results against a TestResult instance."""
834 843 # This function is extremely similar to unittest.TestCase.run(). Once
835 844 # we require Python 2.7 (or at least its version of unittest), this
836 845 # function can largely go away.
837 846 self._result = result
838 847 result.startTest(self)
839 848 try:
840 849 try:
841 850 self.setUp()
842 851 except (KeyboardInterrupt, SystemExit):
843 852 self._aborted = True
844 853 raise
845 854 except Exception:
846 855 result.addError(self, sys.exc_info())
847 856 return
848 857
849 858 success = False
850 859 try:
851 860 self.runTest()
852 861 except KeyboardInterrupt:
853 862 self._aborted = True
854 863 raise
855 864 except unittest.SkipTest as e:
856 865 result.addSkip(self, str(e))
857 866 # The base class will have already counted this as a
858 867 # test we "ran", but we want to exclude skipped tests
859 868 # from those we count towards those run.
860 869 result.testsRun -= 1
861 870 except self.failureException as e:
862 871 # This differs from unittest in that we don't capture
863 872 # the stack trace. This is for historical reasons and
864 873 # this decision could be revisited in the future,
865 874 # especially for PythonTest instances.
866 875 if result.addFailure(self, str(e)):
867 876 success = True
868 877 except Exception:
869 878 result.addError(self, sys.exc_info())
870 879 else:
871 880 success = True
872 881
873 882 try:
874 883 self.tearDown()
875 884 except (KeyboardInterrupt, SystemExit):
876 885 self._aborted = True
877 886 raise
878 887 except Exception:
879 888 result.addError(self, sys.exc_info())
880 889 success = False
881 890
882 891 if success:
883 892 result.addSuccess(self)
884 893 finally:
885 894 result.stopTest(self, interrupted=self._aborted)
886 895
887 896 def runTest(self):
888 897 """Run this test instance.
889 898
890 899 This will return a tuple describing the result of the test.
891 900 """
892 901 env = self._getenv()
893 902 self._genrestoreenv(env)
894 903 self._daemonpids.append(env['DAEMON_PIDS'])
895 904 self._createhgrc(env['HGRCPATH'])
896 905
897 906 vlog('# Test', self.name)
898 907
899 908 ret, out = self._run(env)
900 909 self._finished = True
901 910 self._ret = ret
902 911 self._out = out
903 912
904 913 def describe(ret):
905 914 if ret < 0:
906 915 return 'killed by signal: %d' % -ret
907 916 return 'returned error code %d' % ret
908 917
909 918 self._skipped = False
910 919
911 920 if ret == self.SKIPPED_STATUS:
912 921 if out is None: # Debug mode, nothing to parse.
913 922 missing = ['unknown']
914 923 failed = None
915 924 else:
916 925 missing, failed = TTest.parsehghaveoutput(out)
917 926
918 927 if not missing:
919 928 missing = ['skipped']
920 929
921 930 if failed:
922 931 self.fail('hg have failed checking for %s' % failed[-1])
923 932 else:
924 933 self._skipped = True
925 934 raise unittest.SkipTest(missing[-1])
926 935 elif ret == 'timeout':
927 936 self.fail('timed out')
928 937 elif ret is False:
929 938 self.fail('no result code from test')
930 939 elif out != self._refout:
931 940 # Diff generation may rely on written .err file.
932 941 if ((ret != 0 or out != self._refout) and not self._skipped
933 942 and not self._debug):
934 943 with open(self.errpath, 'wb') as f:
935 944 for line in out:
936 945 f.write(line)
937 946
938 947 # The result object handles diff calculation for us.
939 948 with firstlock:
940 949 if self._result.addOutputMismatch(self, ret, out, self._refout):
941 950 # change was accepted, skip failing
942 951 return
943 952 if self._first:
944 953 global firsterror
945 954 firsterror = True
946 955
947 956 if ret:
948 957 msg = 'output changed and ' + describe(ret)
949 958 else:
950 959 msg = 'output changed'
951 960
952 961 self.fail(msg)
953 962 elif ret:
954 963 self.fail(describe(ret))
955 964
956 965 def tearDown(self):
957 966 """Tasks to perform after run()."""
958 967 for entry in self._daemonpids:
959 968 killdaemons(entry)
960 969 self._daemonpids = []
961 970
962 971 if self._keeptmpdir:
963 972 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
964 973 (self._testtmp.decode('utf-8'),
965 974 self._threadtmp.decode('utf-8')))
966 975 else:
967 976 try:
968 977 shutil.rmtree(self._testtmp)
969 978 except OSError:
970 979 # unreadable directory may be left in $TESTTMP; fix permission
971 980 # and try again
972 981 makecleanable(self._testtmp)
973 982 shutil.rmtree(self._testtmp, True)
974 983 shutil.rmtree(self._threadtmp, True)
975 984
976 985 if self._usechg:
977 986 # chgservers will stop automatically after they find the socket
978 987 # files are deleted
979 988 shutil.rmtree(self._chgsockdir, True)
980 989
981 990 if ((self._ret != 0 or self._out != self._refout) and not self._skipped
982 991 and not self._debug and self._out):
983 992 with open(self.errpath, 'wb') as f:
984 993 for line in self._out:
985 994 f.write(line)
986 995
987 996 vlog("# Ret was:", self._ret, '(%s)' % self.name)
988 997
989 998 def _run(self, env):
990 999 # This should be implemented in child classes to run tests.
991 1000 raise unittest.SkipTest('unknown test type')
992 1001
993 1002 def abort(self):
994 1003 """Terminate execution of this test."""
995 1004 self._aborted = True
996 1005
997 1006 def _portmap(self, i):
998 1007 offset = b'' if i == 0 else b'%d' % i
999 1008 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
1000 1009
1001 1010 def _getreplacements(self):
1002 1011 """Obtain a mapping of text replacements to apply to test output.
1003 1012
1004 1013 Test output needs to be normalized so it can be compared to expected
1005 1014 output. This function defines how some of that normalization will
1006 1015 occur.
1007 1016 """
1008 1017 r = [
1009 1018 # This list should be parallel to defineport in _getenv
1010 1019 self._portmap(0),
1011 1020 self._portmap(1),
1012 1021 self._portmap(2),
1013 1022 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1014 1023 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1015 1024 ]
1016 1025 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1017 1026
1018 1027 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1019 1028
1020 1029 if os.path.exists(replacementfile):
1021 1030 data = {}
1022 1031 with open(replacementfile, mode='rb') as source:
1023 1032 # the intermediate 'compile' step help with debugging
1024 1033 code = compile(source.read(), replacementfile, 'exec')
1025 1034 exec(code, data)
1026 1035 for value in data.get('substitutions', ()):
1027 1036 if len(value) != 2:
1028 1037 msg = 'malformatted substitution in %s: %r'
1029 1038 msg %= (replacementfile, value)
1030 1039 raise ValueError(msg)
1031 1040 r.append(value)
1032 1041 return r
1033 1042
1034 1043 def _escapepath(self, p):
1035 1044 if os.name == 'nt':
1036 1045 return (
1037 1046 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
1038 1047 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
1039 1048 for c in [p[i:i + 1] for i in range(len(p))]))
1040 1049 )
1041 1050 else:
1042 1051 return re.escape(p)
1043 1052
1044 1053 def _localip(self):
1045 1054 if self._useipv6:
1046 1055 return b'::1'
1047 1056 else:
1048 1057 return b'127.0.0.1'
1049 1058
1050 1059 def _genrestoreenv(self, testenv):
1051 1060 """Generate a script that can be used by tests to restore the original
1052 1061 environment."""
1053 1062 # Put the restoreenv script inside self._threadtmp
1054 1063 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1055 1064 testenv['HGTEST_RESTOREENV'] = _strpath(scriptpath)
1056 1065
1057 1066 # Only restore environment variable names that the shell allows
1058 1067 # us to export.
1059 1068 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1060 1069
1061 1070 # Do not restore these variables; otherwise tests would fail.
1062 1071 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1063 1072
1064 1073 with open(scriptpath, 'w') as envf:
1065 1074 for name, value in origenviron.items():
1066 1075 if not name_regex.match(name):
1067 1076 # Skip environment variables with unusual names not
1068 1077 # allowed by most shells.
1069 1078 continue
1070 1079 if name in reqnames:
1071 1080 continue
1072 1081 envf.write('%s=%s\n' % (name, shellquote(value)))
1073 1082
1074 1083 for name in testenv:
1075 1084 if name in origenviron or name in reqnames:
1076 1085 continue
1077 1086 envf.write('unset %s\n' % (name,))
1078 1087
1079 1088 def _getenv(self):
1080 1089 """Obtain environment variables to use during test execution."""
1081 1090 def defineport(i):
1082 1091 offset = '' if i == 0 else '%s' % i
1083 1092 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1084 1093 env = os.environ.copy()
1085 1094 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1086 1095 env['HGEMITWARNINGS'] = '1'
1087 1096 env['TESTTMP'] = _strpath(self._testtmp)
1088 1097 env['TESTNAME'] = self.name
1089 1098 env['HOME'] = _strpath(self._testtmp)
1090 1099 # This number should match portneeded in _getport
1091 1100 for port in xrange(3):
1092 1101 # This list should be parallel to _portmap in _getreplacements
1093 1102 defineport(port)
1094 1103 env["HGRCPATH"] = _strpath(os.path.join(self._threadtmp, b'.hgrc'))
1095 1104 env["DAEMON_PIDS"] = _strpath(os.path.join(self._threadtmp,
1096 1105 b'daemon.pids'))
1097 env["HGEDITOR"] = ('"' + sys.executable + '"'
1106 env["HGEDITOR"] = ('"' + sysexecutable + '"'
1098 1107 + ' -c "import sys; sys.exit(0)"')
1099 1108 env["HGUSER"] = "test"
1100 1109 env["HGENCODING"] = "ascii"
1101 1110 env["HGENCODINGMODE"] = "strict"
1102 1111 env["HGHOSTNAME"] = "test-hostname"
1103 1112 env['HGIPV6'] = str(int(self._useipv6))
1104 1113 # See contrib/catapipe.py for how to use this functionality.
1105 1114 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1106 1115 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1107 1116 # non-test one in as a default, otherwise set to devnull
1108 1117 env['HGTESTCATAPULTSERVERPIPE'] = env.get(
1109 1118 'HGCATAPULTSERVERPIPE', os.devnull)
1110 1119
1111 1120 extraextensions = []
1112 1121 for opt in self._extraconfigopts:
1113 1122 section, key = opt.encode('utf-8').split(b'.', 1)
1114 1123 if section != 'extensions':
1115 1124 continue
1116 1125 name = key.split(b'=', 1)[0]
1117 1126 extraextensions.append(name)
1118 1127
1119 1128 if extraextensions:
1120 1129 env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions)
1121 1130
1122 1131 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1123 1132 # IP addresses.
1124 1133 env['LOCALIP'] = _strpath(self._localip())
1125 1134
1126 1135 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1127 1136 # but this is needed for testing python instances like dummyssh,
1128 1137 # dummysmtpd.py, and dumbhttp.py.
1129 1138 if PYTHON3 and os.name == 'nt':
1130 1139 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1131 1140
1132 1141 # Reset some environment variables to well-known values so that
1133 1142 # the tests produce repeatable output.
1134 1143 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1135 1144 env['TZ'] = 'GMT'
1136 1145 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1137 1146 env['COLUMNS'] = '80'
1138 1147 env['TERM'] = 'xterm'
1139 1148
1140 1149 dropped = [
1141 1150 'CDPATH',
1142 1151 'CHGDEBUG',
1143 1152 'EDITOR',
1144 1153 'GREP_OPTIONS',
1145 1154 'HG',
1146 1155 'HGMERGE',
1147 1156 'HGPLAIN',
1148 1157 'HGPLAINEXCEPT',
1149 1158 'HGPROF',
1150 1159 'http_proxy',
1151 1160 'no_proxy',
1152 1161 'NO_PROXY',
1153 1162 'PAGER',
1154 1163 'VISUAL',
1155 1164 ]
1156 1165
1157 1166 for k in dropped:
1158 1167 if k in env:
1159 1168 del env[k]
1160 1169
1161 1170 # unset env related to hooks
1162 1171 for k in list(env):
1163 1172 if k.startswith('HG_'):
1164 1173 del env[k]
1165 1174
1166 1175 if self._usechg:
1167 1176 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1168 1177
1169 1178 return env
1170 1179
1171 1180 def _createhgrc(self, path):
1172 1181 """Create an hgrc file for this test."""
1173 1182 with open(path, 'wb') as hgrc:
1174 1183 hgrc.write(b'[ui]\n')
1175 1184 hgrc.write(b'slash = True\n')
1176 1185 hgrc.write(b'interactive = False\n')
1177 1186 hgrc.write(b'merge = internal:merge\n')
1178 1187 hgrc.write(b'mergemarkers = detailed\n')
1179 1188 hgrc.write(b'promptecho = True\n')
1180 1189 hgrc.write(b'[defaults]\n')
1181 1190 hgrc.write(b'[devel]\n')
1182 1191 hgrc.write(b'all-warnings = true\n')
1183 1192 hgrc.write(b'default-date = 0 0\n')
1184 1193 hgrc.write(b'[largefiles]\n')
1185 1194 hgrc.write(b'usercache = %s\n' %
1186 1195 (os.path.join(self._testtmp, b'.cache/largefiles')))
1187 1196 hgrc.write(b'[lfs]\n')
1188 1197 hgrc.write(b'usercache = %s\n' %
1189 1198 (os.path.join(self._testtmp, b'.cache/lfs')))
1190 1199 hgrc.write(b'[web]\n')
1191 1200 hgrc.write(b'address = localhost\n')
1192 1201 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1193 1202 hgrc.write(b'server-header = testing stub value\n')
1194 1203
1195 1204 for opt in self._extraconfigopts:
1196 1205 section, key = opt.encode('utf-8').split(b'.', 1)
1197 1206 assert b'=' in key, ('extra config opt %s must '
1198 1207 'have an = for assignment' % opt)
1199 1208 hgrc.write(b'[%s]\n%s\n' % (section, key))
1200 1209
1201 1210 def fail(self, msg):
1202 1211 # unittest differentiates between errored and failed.
1203 1212 # Failed is denoted by AssertionError (by default at least).
1204 1213 raise AssertionError(msg)
1205 1214
1206 1215 def _runcommand(self, cmd, env, normalizenewlines=False):
1207 1216 """Run command in a sub-process, capturing the output (stdout and
1208 1217 stderr).
1209 1218
1210 1219 Return a tuple (exitcode, output). output is None in debug mode.
1211 1220 """
1212 1221 if self._debug:
1213 1222 proc = subprocess.Popen(_strpath(cmd), shell=True,
1214 1223 cwd=_strpath(self._testtmp),
1215 1224 env=env)
1216 1225 ret = proc.wait()
1217 1226 return (ret, None)
1218 1227
1219 1228 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1220 1229 def cleanup():
1221 1230 terminate(proc)
1222 1231 ret = proc.wait()
1223 1232 if ret == 0:
1224 1233 ret = signal.SIGTERM << 8
1225 1234 killdaemons(env['DAEMON_PIDS'])
1226 1235 return ret
1227 1236
1228 1237 proc.tochild.close()
1229 1238
1230 1239 try:
1231 1240 output = proc.fromchild.read()
1232 1241 except KeyboardInterrupt:
1233 1242 vlog('# Handling keyboard interrupt')
1234 1243 cleanup()
1235 1244 raise
1236 1245
1237 1246 ret = proc.wait()
1238 1247 if wifexited(ret):
1239 1248 ret = os.WEXITSTATUS(ret)
1240 1249
1241 1250 if proc.timeout:
1242 1251 ret = 'timeout'
1243 1252
1244 1253 if ret:
1245 1254 killdaemons(env['DAEMON_PIDS'])
1246 1255
1247 1256 for s, r in self._getreplacements():
1248 1257 output = re.sub(s, r, output)
1249 1258
1250 1259 if normalizenewlines:
1251 1260 output = output.replace(b'\r\n', b'\n')
1252 1261
1253 1262 return ret, output.splitlines(True)
1254 1263
1255 1264 class PythonTest(Test):
1256 1265 """A Python-based test."""
1257 1266
1258 1267 @property
1259 1268 def refpath(self):
1260 1269 return os.path.join(self._testdir, b'%s.out' % self.bname)
1261 1270
1262 1271 def _run(self, env):
1263 1272 py3switch = self._py3warnings and b' -3' or b''
1264 1273 # Quote the python(3) executable for Windows
1265 1274 cmd = b'"%s"%s "%s"' % (PYTHON, py3switch, self.path)
1266 1275 vlog("# Running", cmd)
1267 1276 normalizenewlines = os.name == 'nt'
1268 1277 result = self._runcommand(cmd, env,
1269 1278 normalizenewlines=normalizenewlines)
1270 1279 if self._aborted:
1271 1280 raise KeyboardInterrupt()
1272 1281
1273 1282 return result
1274 1283
1275 1284 # Some glob patterns apply only in some circumstances, so the script
1276 1285 # might want to remove (glob) annotations that otherwise should be
1277 1286 # retained.
1278 1287 checkcodeglobpats = [
1279 1288 # On Windows it looks like \ doesn't require a (glob), but we know
1280 1289 # better.
1281 1290 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1282 1291 re.compile(br'^moving \S+/.*[^)]$'),
1283 1292 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1284 1293 # Not all platforms have 127.0.0.1 as loopback (though most do),
1285 1294 # so we always glob that too.
1286 1295 re.compile(br'.*\$LOCALIP.*$'),
1287 1296 ]
1288 1297
1289 1298 bchr = chr
1290 1299 if PYTHON3:
1291 1300 bchr = lambda x: bytes([x])
1292 1301
1293 1302 class TTest(Test):
1294 1303 """A "t test" is a test backed by a .t file."""
1295 1304
1296 1305 SKIPPED_PREFIX = b'skipped: '
1297 1306 FAILED_PREFIX = b'hghave check failed: '
1298 1307 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1299 1308
1300 1309 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1301 1310 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1302 1311 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1303 1312
1304 1313 def __init__(self, path, *args, **kwds):
1305 1314 # accept an extra "case" parameter
1306 1315 case = kwds.pop('case', [])
1307 1316 self._case = case
1308 1317 self._allcases = {x for y in parsettestcases(path) for x in y}
1309 1318 super(TTest, self).__init__(path, *args, **kwds)
1310 1319 if case:
1311 1320 casepath = b'#'.join(case)
1312 1321 self.name = '%s#%s' % (self.name, _strpath(casepath))
1313 1322 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1314 1323 self._tmpname += b'-%s' % casepath
1315 1324 self._have = {}
1316 1325
1317 1326 @property
1318 1327 def refpath(self):
1319 1328 return os.path.join(self._testdir, self.bname)
1320 1329
1321 1330 def _run(self, env):
1322 1331 with open(self.path, 'rb') as f:
1323 1332 lines = f.readlines()
1324 1333
1325 1334 # .t file is both reference output and the test input, keep reference
1326 1335 # output updated with the the test input. This avoids some race
1327 1336 # conditions where the reference output does not match the actual test.
1328 1337 if self._refout is not None:
1329 1338 self._refout = lines
1330 1339
1331 1340 salt, script, after, expected = self._parsetest(lines)
1332 1341
1333 1342 # Write out the generated script.
1334 1343 fname = b'%s.sh' % self._testtmp
1335 1344 with open(fname, 'wb') as f:
1336 1345 for l in script:
1337 1346 f.write(l)
1338 1347
1339 1348 cmd = b'%s "%s"' % (self._shell, fname)
1340 1349 vlog("# Running", cmd)
1341 1350
1342 1351 exitcode, output = self._runcommand(cmd, env)
1343 1352
1344 1353 if self._aborted:
1345 1354 raise KeyboardInterrupt()
1346 1355
1347 1356 # Do not merge output if skipped. Return hghave message instead.
1348 1357 # Similarly, with --debug, output is None.
1349 1358 if exitcode == self.SKIPPED_STATUS or output is None:
1350 1359 return exitcode, output
1351 1360
1352 1361 return self._processoutput(exitcode, output, salt, after, expected)
1353 1362
1354 1363 def _hghave(self, reqs):
1355 1364 allreqs = b' '.join(reqs)
1356 1365
1357 1366 self._detectslow(reqs)
1358 1367
1359 1368 if allreqs in self._have:
1360 1369 return self._have.get(allreqs)
1361 1370
1362 1371 # TODO do something smarter when all other uses of hghave are gone.
1363 1372 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1364 1373 tdir = runtestdir.replace(b'\\', b'/')
1365 1374 proc = Popen4(b'%s -c "%s/hghave %s"' %
1366 1375 (self._shell, tdir, allreqs),
1367 1376 self._testtmp, 0, self._getenv())
1368 1377 stdout, stderr = proc.communicate()
1369 1378 ret = proc.wait()
1370 1379 if wifexited(ret):
1371 1380 ret = os.WEXITSTATUS(ret)
1372 1381 if ret == 2:
1373 1382 print(stdout.decode('utf-8'))
1374 1383 sys.exit(1)
1375 1384
1376 1385 if ret != 0:
1377 1386 self._have[allreqs] = (False, stdout)
1378 1387 return False, stdout
1379 1388
1380 1389 self._have[allreqs] = (True, None)
1381 1390 return True, None
1382 1391
1383 1392 def _detectslow(self, reqs):
1384 1393 """update the timeout of slow test when appropriate"""
1385 1394 if b'slow' in reqs:
1386 1395 self._timeout = self._slowtimeout
1387 1396
1388 1397 def _iftest(self, args):
1389 1398 # implements "#if"
1390 1399 reqs = []
1391 1400 for arg in args:
1392 1401 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1393 1402 if arg[3:] in self._case:
1394 1403 return False
1395 1404 elif arg in self._allcases:
1396 1405 if arg not in self._case:
1397 1406 return False
1398 1407 else:
1399 1408 reqs.append(arg)
1400 1409 self._detectslow(reqs)
1401 1410 return self._hghave(reqs)[0]
1402 1411
1403 1412 def _parsetest(self, lines):
1404 1413 # We generate a shell script which outputs unique markers to line
1405 1414 # up script results with our source. These markers include input
1406 1415 # line number and the last return code.
1407 1416 salt = b"SALT%d" % time.time()
1408 1417 def addsalt(line, inpython):
1409 1418 if inpython:
1410 1419 script.append(b'%s %d 0\n' % (salt, line))
1411 1420 else:
1412 1421 script.append(b'echo %s %d $?\n' % (salt, line))
1413 1422 activetrace = []
1414 1423 session = str(uuid.uuid4())
1415 1424 if PYTHON3:
1416 1425 session = session.encode('ascii')
1417 1426 hgcatapult = (os.getenv('HGTESTCATAPULTSERVERPIPE') or
1418 1427 os.getenv('HGCATAPULTSERVERPIPE'))
1419 1428 def toggletrace(cmd=None):
1420 1429 if not hgcatapult or hgcatapult == os.devnull:
1421 1430 return
1422 1431
1423 1432 if activetrace:
1424 1433 script.append(
1425 1434 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' % (
1426 1435 session, activetrace[0]))
1427 1436 if cmd is None:
1428 1437 return
1429 1438
1430 1439 if isinstance(cmd, str):
1431 1440 quoted = shellquote(cmd.strip())
1432 1441 else:
1433 1442 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1434 1443 quoted = quoted.replace(b'\\', b'\\\\')
1435 1444 script.append(
1436 1445 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' % (
1437 1446 session, quoted))
1438 1447 activetrace[0:] = [quoted]
1439 1448
1440 1449 script = []
1441 1450
1442 1451 # After we run the shell script, we re-unify the script output
1443 1452 # with non-active parts of the source, with synchronization by our
1444 1453 # SALT line number markers. The after table contains the non-active
1445 1454 # components, ordered by line number.
1446 1455 after = {}
1447 1456
1448 1457 # Expected shell script output.
1449 1458 expected = {}
1450 1459
1451 1460 pos = prepos = -1
1452 1461
1453 1462 # True or False when in a true or false conditional section
1454 1463 skipping = None
1455 1464
1456 1465 # We keep track of whether or not we're in a Python block so we
1457 1466 # can generate the surrounding doctest magic.
1458 1467 inpython = False
1459 1468
1460 1469 if self._debug:
1461 1470 script.append(b'set -x\n')
1462 1471 if self._hgcommand != b'hg':
1463 1472 script.append(b'alias hg="%s"\n' % self._hgcommand)
1464 1473 if os.getenv('MSYSTEM'):
1465 1474 script.append(b'alias pwd="pwd -W"\n')
1466 1475
1467 1476 if hgcatapult and hgcatapult != os.devnull:
1468 1477 # Kludge: use a while loop to keep the pipe from getting
1469 1478 # closed by our echo commands. The still-running file gets
1470 1479 # reaped at the end of the script, which causes the while
1471 1480 # loop to exit and closes the pipe. Sigh.
1472 1481 script.append(
1473 1482 b'rtendtracing() {\n'
1474 1483 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1475 1484 b' rm -f "$TESTTMP/.still-running"\n'
1476 1485 b'}\n'
1477 1486 b'trap "rtendtracing" 0\n'
1478 1487 b'touch "$TESTTMP/.still-running"\n'
1479 1488 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1480 1489 b'> %(catapult)s &\n'
1481 1490 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1482 1491 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1483 1492 % {
1484 1493 'name': self.name,
1485 1494 'session': session,
1486 1495 'catapult': hgcatapult,
1487 1496 }
1488 1497 )
1489 1498
1490 1499 if self._case:
1491 1500 casestr = b'#'.join(self._case)
1492 1501 if isinstance(self._case, str):
1493 1502 quoted = shellquote(casestr)
1494 1503 else:
1495 1504 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1496 1505 script.append(b'TESTCASE=%s\n' % quoted)
1497 1506 script.append(b'export TESTCASE\n')
1498 1507
1499 1508 n = 0
1500 1509 for n, l in enumerate(lines):
1501 1510 if not l.endswith(b'\n'):
1502 1511 l += b'\n'
1503 1512 if l.startswith(b'#require'):
1504 1513 lsplit = l.split()
1505 1514 if len(lsplit) < 2 or lsplit[0] != b'#require':
1506 1515 after.setdefault(pos, []).append(' !!! invalid #require\n')
1507 1516 if not skipping:
1508 1517 haveresult, message = self._hghave(lsplit[1:])
1509 1518 if not haveresult:
1510 1519 script = [b'echo "%s"\nexit 80\n' % message]
1511 1520 break
1512 1521 after.setdefault(pos, []).append(l)
1513 1522 elif l.startswith(b'#if'):
1514 1523 lsplit = l.split()
1515 1524 if len(lsplit) < 2 or lsplit[0] != b'#if':
1516 1525 after.setdefault(pos, []).append(' !!! invalid #if\n')
1517 1526 if skipping is not None:
1518 1527 after.setdefault(pos, []).append(' !!! nested #if\n')
1519 1528 skipping = not self._iftest(lsplit[1:])
1520 1529 after.setdefault(pos, []).append(l)
1521 1530 elif l.startswith(b'#else'):
1522 1531 if skipping is None:
1523 1532 after.setdefault(pos, []).append(' !!! missing #if\n')
1524 1533 skipping = not skipping
1525 1534 after.setdefault(pos, []).append(l)
1526 1535 elif l.startswith(b'#endif'):
1527 1536 if skipping is None:
1528 1537 after.setdefault(pos, []).append(' !!! missing #if\n')
1529 1538 skipping = None
1530 1539 after.setdefault(pos, []).append(l)
1531 1540 elif skipping:
1532 1541 after.setdefault(pos, []).append(l)
1533 1542 elif l.startswith(b' >>> '): # python inlines
1534 1543 after.setdefault(pos, []).append(l)
1535 1544 prepos = pos
1536 1545 pos = n
1537 1546 if not inpython:
1538 1547 # We've just entered a Python block. Add the header.
1539 1548 inpython = True
1540 1549 addsalt(prepos, False) # Make sure we report the exit code.
1541 1550 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1542 1551 addsalt(n, True)
1543 1552 script.append(l[2:])
1544 1553 elif l.startswith(b' ... '): # python inlines
1545 1554 after.setdefault(prepos, []).append(l)
1546 1555 script.append(l[2:])
1547 1556 elif l.startswith(b' $ '): # commands
1548 1557 if inpython:
1549 1558 script.append(b'EOF\n')
1550 1559 inpython = False
1551 1560 after.setdefault(pos, []).append(l)
1552 1561 prepos = pos
1553 1562 pos = n
1554 1563 addsalt(n, False)
1555 1564 rawcmd = l[4:]
1556 1565 cmd = rawcmd.split()
1557 1566 toggletrace(rawcmd)
1558 1567 if len(cmd) == 2 and cmd[0] == b'cd':
1559 1568 l = b' $ cd %s || exit 1\n' % cmd[1]
1560 1569 script.append(rawcmd)
1561 1570 elif l.startswith(b' > '): # continuations
1562 1571 after.setdefault(prepos, []).append(l)
1563 1572 script.append(l[4:])
1564 1573 elif l.startswith(b' '): # results
1565 1574 # Queue up a list of expected results.
1566 1575 expected.setdefault(pos, []).append(l[2:])
1567 1576 else:
1568 1577 if inpython:
1569 1578 script.append(b'EOF\n')
1570 1579 inpython = False
1571 1580 # Non-command/result. Queue up for merged output.
1572 1581 after.setdefault(pos, []).append(l)
1573 1582
1574 1583 if inpython:
1575 1584 script.append(b'EOF\n')
1576 1585 if skipping is not None:
1577 1586 after.setdefault(pos, []).append(' !!! missing #endif\n')
1578 1587 addsalt(n + 1, False)
1579 1588 # Need to end any current per-command trace
1580 1589 if activetrace:
1581 1590 toggletrace()
1582 1591 return salt, script, after, expected
1583 1592
1584 1593 def _processoutput(self, exitcode, output, salt, after, expected):
1585 1594 # Merge the script output back into a unified test.
1586 1595 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1587 1596 if exitcode != 0:
1588 1597 warnonly = 3
1589 1598
1590 1599 pos = -1
1591 1600 postout = []
1592 1601 for l in output:
1593 1602 lout, lcmd = l, None
1594 1603 if salt in l:
1595 1604 lout, lcmd = l.split(salt, 1)
1596 1605
1597 1606 while lout:
1598 1607 if not lout.endswith(b'\n'):
1599 1608 lout += b' (no-eol)\n'
1600 1609
1601 1610 # Find the expected output at the current position.
1602 1611 els = [None]
1603 1612 if expected.get(pos, None):
1604 1613 els = expected[pos]
1605 1614
1606 1615 optional = []
1607 1616 for i, el in enumerate(els):
1608 1617 r = False
1609 1618 if el:
1610 1619 r, exact = self.linematch(el, lout)
1611 1620 if isinstance(r, str):
1612 1621 if r == '-glob':
1613 1622 lout = ''.join(el.rsplit(' (glob)', 1))
1614 1623 r = '' # Warn only this line.
1615 1624 elif r == "retry":
1616 1625 postout.append(b' ' + el)
1617 1626 else:
1618 1627 log('\ninfo, unknown linematch result: %r\n' % r)
1619 1628 r = False
1620 1629 if r:
1621 1630 els.pop(i)
1622 1631 break
1623 1632 if el:
1624 1633 if el.endswith(b" (?)\n"):
1625 1634 optional.append(i)
1626 1635 else:
1627 1636 m = optline.match(el)
1628 1637 if m:
1629 1638 conditions = [
1630 1639 c for c in m.group(2).split(b' ')]
1631 1640
1632 1641 if not self._iftest(conditions):
1633 1642 optional.append(i)
1634 1643 if exact:
1635 1644 # Don't allow line to be matches against a later
1636 1645 # line in the output
1637 1646 els.pop(i)
1638 1647 break
1639 1648
1640 1649 if r:
1641 1650 if r == "retry":
1642 1651 continue
1643 1652 # clean up any optional leftovers
1644 1653 for i in optional:
1645 1654 postout.append(b' ' + els[i])
1646 1655 for i in reversed(optional):
1647 1656 del els[i]
1648 1657 postout.append(b' ' + el)
1649 1658 else:
1650 1659 if self.NEEDESCAPE(lout):
1651 1660 lout = TTest._stringescape(b'%s (esc)\n' %
1652 1661 lout.rstrip(b'\n'))
1653 1662 postout.append(b' ' + lout) # Let diff deal with it.
1654 1663 if r != '': # If line failed.
1655 1664 warnonly = 3 # for sure not
1656 1665 elif warnonly == 1: # Is "not yet" and line is warn only.
1657 1666 warnonly = 2 # Yes do warn.
1658 1667 break
1659 1668 else:
1660 1669 # clean up any optional leftovers
1661 1670 while expected.get(pos, None):
1662 1671 el = expected[pos].pop(0)
1663 1672 if el:
1664 1673 if not el.endswith(b" (?)\n"):
1665 1674 m = optline.match(el)
1666 1675 if m:
1667 1676 conditions = [c for c in m.group(2).split(b' ')]
1668 1677
1669 1678 if self._iftest(conditions):
1670 1679 # Don't append as optional line
1671 1680 continue
1672 1681 else:
1673 1682 continue
1674 1683 postout.append(b' ' + el)
1675 1684
1676 1685 if lcmd:
1677 1686 # Add on last return code.
1678 1687 ret = int(lcmd.split()[1])
1679 1688 if ret != 0:
1680 1689 postout.append(b' [%d]\n' % ret)
1681 1690 if pos in after:
1682 1691 # Merge in non-active test bits.
1683 1692 postout += after.pop(pos)
1684 1693 pos = int(lcmd.split()[0])
1685 1694
1686 1695 if pos in after:
1687 1696 postout += after.pop(pos)
1688 1697
1689 1698 if warnonly == 2:
1690 1699 exitcode = False # Set exitcode to warned.
1691 1700
1692 1701 return exitcode, postout
1693 1702
1694 1703 @staticmethod
1695 1704 def rematch(el, l):
1696 1705 try:
1697 1706 el = b'(?:' + el + b')'
1698 1707 # use \Z to ensure that the regex matches to the end of the string
1699 1708 if os.name == 'nt':
1700 1709 return re.match(el + br'\r?\n\Z', l)
1701 1710 return re.match(el + br'\n\Z', l)
1702 1711 except re.error:
1703 1712 # el is an invalid regex
1704 1713 return False
1705 1714
1706 1715 @staticmethod
1707 1716 def globmatch(el, l):
1708 1717 # The only supported special characters are * and ? plus / which also
1709 1718 # matches \ on windows. Escaping of these characters is supported.
1710 1719 if el + b'\n' == l:
1711 1720 if os.altsep:
1712 1721 # matching on "/" is not needed for this line
1713 1722 for pat in checkcodeglobpats:
1714 1723 if pat.match(el):
1715 1724 return True
1716 1725 return b'-glob'
1717 1726 return True
1718 1727 el = el.replace(b'$LOCALIP', b'*')
1719 1728 i, n = 0, len(el)
1720 1729 res = b''
1721 1730 while i < n:
1722 1731 c = el[i:i + 1]
1723 1732 i += 1
1724 1733 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1725 1734 res += el[i - 1:i + 1]
1726 1735 i += 1
1727 1736 elif c == b'*':
1728 1737 res += b'.*'
1729 1738 elif c == b'?':
1730 1739 res += b'.'
1731 1740 elif c == b'/' and os.altsep:
1732 1741 res += b'[/\\\\]'
1733 1742 else:
1734 1743 res += re.escape(c)
1735 1744 return TTest.rematch(res, l)
1736 1745
1737 1746 def linematch(self, el, l):
1738 1747 if el == l: # perfect match (fast)
1739 1748 return True, True
1740 1749 retry = False
1741 1750 if el.endswith(b" (?)\n"):
1742 1751 retry = "retry"
1743 1752 el = el[:-5] + b"\n"
1744 1753 else:
1745 1754 m = optline.match(el)
1746 1755 if m:
1747 1756 conditions = [c for c in m.group(2).split(b' ')]
1748 1757
1749 1758 el = m.group(1) + b"\n"
1750 1759 if not self._iftest(conditions):
1751 1760 # listed feature missing, should not match
1752 1761 return "retry", False
1753 1762
1754 1763 if el.endswith(b" (esc)\n"):
1755 1764 if PYTHON3:
1756 1765 el = el[:-7].decode('unicode_escape') + '\n'
1757 1766 el = el.encode('utf-8')
1758 1767 else:
1759 1768 el = el[:-7].decode('string-escape') + '\n'
1760 1769 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1761 1770 return True, True
1762 1771 if el.endswith(b" (re)\n"):
1763 1772 return (TTest.rematch(el[:-6], l) or retry), False
1764 1773 if el.endswith(b" (glob)\n"):
1765 1774 # ignore '(glob)' added to l by 'replacements'
1766 1775 if l.endswith(b" (glob)\n"):
1767 1776 l = l[:-8] + b"\n"
1768 1777 return (TTest.globmatch(el[:-8], l) or retry), False
1769 1778 if os.altsep:
1770 1779 _l = l.replace(b'\\', b'/')
1771 1780 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
1772 1781 return True, True
1773 1782 return retry, True
1774 1783
1775 1784 @staticmethod
1776 1785 def parsehghaveoutput(lines):
1777 1786 '''Parse hghave log lines.
1778 1787
1779 1788 Return tuple of lists (missing, failed):
1780 1789 * the missing/unknown features
1781 1790 * the features for which existence check failed'''
1782 1791 missing = []
1783 1792 failed = []
1784 1793 for line in lines:
1785 1794 if line.startswith(TTest.SKIPPED_PREFIX):
1786 1795 line = line.splitlines()[0]
1787 1796 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1788 1797 elif line.startswith(TTest.FAILED_PREFIX):
1789 1798 line = line.splitlines()[0]
1790 1799 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1791 1800
1792 1801 return missing, failed
1793 1802
1794 1803 @staticmethod
1795 1804 def _escapef(m):
1796 1805 return TTest.ESCAPEMAP[m.group(0)]
1797 1806
1798 1807 @staticmethod
1799 1808 def _stringescape(s):
1800 1809 return TTest.ESCAPESUB(TTest._escapef, s)
1801 1810
1802 1811 iolock = threading.RLock()
1803 1812 firstlock = threading.RLock()
1804 1813 firsterror = False
1805 1814
1806 1815 class TestResult(unittest._TextTestResult):
1807 1816 """Holds results when executing via unittest."""
1808 1817 # Don't worry too much about accessing the non-public _TextTestResult.
1809 1818 # It is relatively common in Python testing tools.
1810 1819 def __init__(self, options, *args, **kwargs):
1811 1820 super(TestResult, self).__init__(*args, **kwargs)
1812 1821
1813 1822 self._options = options
1814 1823
1815 1824 # unittest.TestResult didn't have skipped until 2.7. We need to
1816 1825 # polyfill it.
1817 1826 self.skipped = []
1818 1827
1819 1828 # We have a custom "ignored" result that isn't present in any Python
1820 1829 # unittest implementation. It is very similar to skipped. It may make
1821 1830 # sense to map it into skip some day.
1822 1831 self.ignored = []
1823 1832
1824 1833 self.times = []
1825 1834 self._firststarttime = None
1826 1835 # Data stored for the benefit of generating xunit reports.
1827 1836 self.successes = []
1828 1837 self.faildata = {}
1829 1838
1830 1839 if options.color == 'auto':
1831 1840 self.color = pygmentspresent and self.stream.isatty()
1832 1841 elif options.color == 'never':
1833 1842 self.color = False
1834 1843 else: # 'always', for testing purposes
1835 1844 self.color = pygmentspresent
1836 1845
1837 1846 def onStart(self, test):
1838 1847 """ Can be overriden by custom TestResult
1839 1848 """
1840 1849
1841 1850 def onEnd(self):
1842 1851 """ Can be overriden by custom TestResult
1843 1852 """
1844 1853
1845 1854 def addFailure(self, test, reason):
1846 1855 self.failures.append((test, reason))
1847 1856
1848 1857 if self._options.first:
1849 1858 self.stop()
1850 1859 else:
1851 1860 with iolock:
1852 1861 if reason == "timed out":
1853 1862 self.stream.write('t')
1854 1863 else:
1855 1864 if not self._options.nodiff:
1856 1865 self.stream.write('\n')
1857 1866 # Exclude the '\n' from highlighting to lex correctly
1858 1867 formatted = 'ERROR: %s output changed\n' % test
1859 1868 self.stream.write(highlightmsg(formatted, self.color))
1860 1869 self.stream.write('!')
1861 1870
1862 1871 self.stream.flush()
1863 1872
1864 1873 def addSuccess(self, test):
1865 1874 with iolock:
1866 1875 super(TestResult, self).addSuccess(test)
1867 1876 self.successes.append(test)
1868 1877
1869 1878 def addError(self, test, err):
1870 1879 super(TestResult, self).addError(test, err)
1871 1880 if self._options.first:
1872 1881 self.stop()
1873 1882
1874 1883 # Polyfill.
1875 1884 def addSkip(self, test, reason):
1876 1885 self.skipped.append((test, reason))
1877 1886 with iolock:
1878 1887 if self.showAll:
1879 1888 self.stream.writeln('skipped %s' % reason)
1880 1889 else:
1881 1890 self.stream.write('s')
1882 1891 self.stream.flush()
1883 1892
1884 1893 def addIgnore(self, test, reason):
1885 1894 self.ignored.append((test, reason))
1886 1895 with iolock:
1887 1896 if self.showAll:
1888 1897 self.stream.writeln('ignored %s' % reason)
1889 1898 else:
1890 1899 if reason not in ('not retesting', "doesn't match keyword"):
1891 1900 self.stream.write('i')
1892 1901 else:
1893 1902 self.testsRun += 1
1894 1903 self.stream.flush()
1895 1904
1896 1905 def addOutputMismatch(self, test, ret, got, expected):
1897 1906 """Record a mismatch in test output for a particular test."""
1898 1907 if self.shouldStop or firsterror:
1899 1908 # don't print, some other test case already failed and
1900 1909 # printed, we're just stale and probably failed due to our
1901 1910 # temp dir getting cleaned up.
1902 1911 return
1903 1912
1904 1913 accepted = False
1905 1914 lines = []
1906 1915
1907 1916 with iolock:
1908 1917 if self._options.nodiff:
1909 1918 pass
1910 1919 elif self._options.view:
1911 1920 v = self._options.view
1912 1921 subprocess.call(r'"%s" "%s" "%s"' %
1913 1922 (v, _strpath(test.refpath),
1914 1923 _strpath(test.errpath)), shell=True)
1915 1924 else:
1916 1925 servefail, lines = getdiff(expected, got,
1917 1926 test.refpath, test.errpath)
1918 1927 self.stream.write('\n')
1919 1928 for line in lines:
1920 1929 line = highlightdiff(line, self.color)
1921 1930 if PYTHON3:
1922 1931 self.stream.flush()
1923 1932 self.stream.buffer.write(line)
1924 1933 self.stream.buffer.flush()
1925 1934 else:
1926 1935 self.stream.write(line)
1927 1936 self.stream.flush()
1928 1937
1929 1938 if servefail:
1930 1939 raise test.failureException(
1931 1940 'server failed to start (HGPORT=%s)' % test._startport)
1932 1941
1933 1942 # handle interactive prompt without releasing iolock
1934 1943 if self._options.interactive:
1935 1944 if test.readrefout() != expected:
1936 1945 self.stream.write(
1937 1946 'Reference output has changed (run again to prompt '
1938 1947 'changes)')
1939 1948 else:
1940 1949 self.stream.write('Accept this change? [n] ')
1941 1950 self.stream.flush()
1942 1951 answer = sys.stdin.readline().strip()
1943 1952 if answer.lower() in ('y', 'yes'):
1944 1953 if test.path.endswith(b'.t'):
1945 1954 rename(test.errpath, test.path)
1946 1955 else:
1947 1956 rename(test.errpath, '%s.out' % test.path)
1948 1957 accepted = True
1949 1958 if not accepted:
1950 1959 self.faildata[test.name] = b''.join(lines)
1951 1960
1952 1961 return accepted
1953 1962
1954 1963 def startTest(self, test):
1955 1964 super(TestResult, self).startTest(test)
1956 1965
1957 1966 # os.times module computes the user time and system time spent by
1958 1967 # child's processes along with real elapsed time taken by a process.
1959 1968 # This module has one limitation. It can only work for Linux user
1960 1969 # and not for Windows.
1961 1970 test.started = os.times()
1962 1971 if self._firststarttime is None: # thread racy but irrelevant
1963 1972 self._firststarttime = test.started[4]
1964 1973
1965 1974 def stopTest(self, test, interrupted=False):
1966 1975 super(TestResult, self).stopTest(test)
1967 1976
1968 1977 test.stopped = os.times()
1969 1978
1970 1979 starttime = test.started
1971 1980 endtime = test.stopped
1972 1981 origin = self._firststarttime
1973 1982 self.times.append((test.name,
1974 1983 endtime[2] - starttime[2], # user space CPU time
1975 1984 endtime[3] - starttime[3], # sys space CPU time
1976 1985 endtime[4] - starttime[4], # real time
1977 1986 starttime[4] - origin, # start date in run context
1978 1987 endtime[4] - origin, # end date in run context
1979 1988 ))
1980 1989
1981 1990 if interrupted:
1982 1991 with iolock:
1983 1992 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1984 1993 test.name, self.times[-1][3]))
1985 1994
1986 1995 def getTestResult():
1987 1996 """
1988 1997 Returns the relevant test result
1989 1998 """
1990 1999 if "CUSTOM_TEST_RESULT" in os.environ:
1991 2000 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
1992 2001 return testresultmodule.TestResult
1993 2002 else:
1994 2003 return TestResult
1995 2004
1996 2005 class TestSuite(unittest.TestSuite):
1997 2006 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1998 2007
1999 2008 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
2000 2009 retest=False, keywords=None, loop=False, runs_per_test=1,
2001 2010 loadtest=None, showchannels=False,
2002 2011 *args, **kwargs):
2003 2012 """Create a new instance that can run tests with a configuration.
2004 2013
2005 2014 testdir specifies the directory where tests are executed from. This
2006 2015 is typically the ``tests`` directory from Mercurial's source
2007 2016 repository.
2008 2017
2009 2018 jobs specifies the number of jobs to run concurrently. Each test
2010 2019 executes on its own thread. Tests actually spawn new processes, so
2011 2020 state mutation should not be an issue.
2012 2021
2013 2022 If there is only one job, it will use the main thread.
2014 2023
2015 2024 whitelist and blacklist denote tests that have been whitelisted and
2016 2025 blacklisted, respectively. These arguments don't belong in TestSuite.
2017 2026 Instead, whitelist and blacklist should be handled by the thing that
2018 2027 populates the TestSuite with tests. They are present to preserve
2019 2028 backwards compatible behavior which reports skipped tests as part
2020 2029 of the results.
2021 2030
2022 2031 retest denotes whether to retest failed tests. This arguably belongs
2023 2032 outside of TestSuite.
2024 2033
2025 2034 keywords denotes key words that will be used to filter which tests
2026 2035 to execute. This arguably belongs outside of TestSuite.
2027 2036
2028 2037 loop denotes whether to loop over tests forever.
2029 2038 """
2030 2039 super(TestSuite, self).__init__(*args, **kwargs)
2031 2040
2032 2041 self._jobs = jobs
2033 2042 self._whitelist = whitelist
2034 2043 self._blacklist = blacklist
2035 2044 self._retest = retest
2036 2045 self._keywords = keywords
2037 2046 self._loop = loop
2038 2047 self._runs_per_test = runs_per_test
2039 2048 self._loadtest = loadtest
2040 2049 self._showchannels = showchannels
2041 2050
2042 2051 def run(self, result):
2043 2052 # We have a number of filters that need to be applied. We do this
2044 2053 # here instead of inside Test because it makes the running logic for
2045 2054 # Test simpler.
2046 2055 tests = []
2047 2056 num_tests = [0]
2048 2057 for test in self._tests:
2049 2058 def get():
2050 2059 num_tests[0] += 1
2051 2060 if getattr(test, 'should_reload', False):
2052 2061 return self._loadtest(test, num_tests[0])
2053 2062 return test
2054 2063 if not os.path.exists(test.path):
2055 2064 result.addSkip(test, "Doesn't exist")
2056 2065 continue
2057 2066
2058 2067 if not (self._whitelist and test.bname in self._whitelist):
2059 2068 if self._blacklist and test.bname in self._blacklist:
2060 2069 result.addSkip(test, 'blacklisted')
2061 2070 continue
2062 2071
2063 2072 if self._retest and not os.path.exists(test.errpath):
2064 2073 result.addIgnore(test, 'not retesting')
2065 2074 continue
2066 2075
2067 2076 if self._keywords:
2068 2077 with open(test.path, 'rb') as f:
2069 2078 t = f.read().lower() + test.bname.lower()
2070 2079 ignored = False
2071 2080 for k in self._keywords.lower().split():
2072 2081 if k not in t:
2073 2082 result.addIgnore(test, "doesn't match keyword")
2074 2083 ignored = True
2075 2084 break
2076 2085
2077 2086 if ignored:
2078 2087 continue
2079 2088 for _ in xrange(self._runs_per_test):
2080 2089 tests.append(get())
2081 2090
2082 2091 runtests = list(tests)
2083 2092 done = queue.Queue()
2084 2093 running = 0
2085 2094
2086 2095 channels = [""] * self._jobs
2087 2096
2088 2097 def job(test, result):
2089 2098 for n, v in enumerate(channels):
2090 2099 if not v:
2091 2100 channel = n
2092 2101 break
2093 2102 else:
2094 2103 raise ValueError('Could not find output channel')
2095 2104 channels[channel] = "=" + test.name[5:].split(".")[0]
2096 2105 try:
2097 2106 test(result)
2098 2107 done.put(None)
2099 2108 except KeyboardInterrupt:
2100 2109 pass
2101 2110 except: # re-raises
2102 2111 done.put(('!', test, 'run-test raised an error, see traceback'))
2103 2112 raise
2104 2113 finally:
2105 2114 try:
2106 2115 channels[channel] = ''
2107 2116 except IndexError:
2108 2117 pass
2109 2118
2110 2119 def stat():
2111 2120 count = 0
2112 2121 while channels:
2113 2122 d = '\n%03s ' % count
2114 2123 for n, v in enumerate(channels):
2115 2124 if v:
2116 2125 d += v[0]
2117 2126 channels[n] = v[1:] or '.'
2118 2127 else:
2119 2128 d += ' '
2120 2129 d += ' '
2121 2130 with iolock:
2122 2131 sys.stdout.write(d + ' ')
2123 2132 sys.stdout.flush()
2124 2133 for x in xrange(10):
2125 2134 if channels:
2126 2135 time.sleep(.1)
2127 2136 count += 1
2128 2137
2129 2138 stoppedearly = False
2130 2139
2131 2140 if self._showchannels:
2132 2141 statthread = threading.Thread(target=stat, name="stat")
2133 2142 statthread.start()
2134 2143
2135 2144 try:
2136 2145 while tests or running:
2137 2146 if not done.empty() or running == self._jobs or not tests:
2138 2147 try:
2139 2148 done.get(True, 1)
2140 2149 running -= 1
2141 2150 if result and result.shouldStop:
2142 2151 stoppedearly = True
2143 2152 break
2144 2153 except queue.Empty:
2145 2154 continue
2146 2155 if tests and not running == self._jobs:
2147 2156 test = tests.pop(0)
2148 2157 if self._loop:
2149 2158 if getattr(test, 'should_reload', False):
2150 2159 num_tests[0] += 1
2151 2160 tests.append(
2152 2161 self._loadtest(test, num_tests[0]))
2153 2162 else:
2154 2163 tests.append(test)
2155 2164 if self._jobs == 1:
2156 2165 job(test, result)
2157 2166 else:
2158 2167 t = threading.Thread(target=job, name=test.name,
2159 2168 args=(test, result))
2160 2169 t.start()
2161 2170 running += 1
2162 2171
2163 2172 # If we stop early we still need to wait on started tests to
2164 2173 # finish. Otherwise, there is a race between the test completing
2165 2174 # and the test's cleanup code running. This could result in the
2166 2175 # test reporting incorrect.
2167 2176 if stoppedearly:
2168 2177 while running:
2169 2178 try:
2170 2179 done.get(True, 1)
2171 2180 running -= 1
2172 2181 except queue.Empty:
2173 2182 continue
2174 2183 except KeyboardInterrupt:
2175 2184 for test in runtests:
2176 2185 test.abort()
2177 2186
2178 2187 channels = []
2179 2188
2180 2189 return result
2181 2190
2182 2191 # Save the most recent 5 wall-clock runtimes of each test to a
2183 2192 # human-readable text file named .testtimes. Tests are sorted
2184 2193 # alphabetically, while times for each test are listed from oldest to
2185 2194 # newest.
2186 2195
2187 2196 def loadtimes(outputdir):
2188 2197 times = []
2189 2198 try:
2190 2199 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2191 2200 for line in fp:
2192 2201 m = re.match('(.*?) ([0-9. ]+)', line)
2193 2202 times.append((m.group(1),
2194 2203 [float(t) for t in m.group(2).split()]))
2195 2204 except IOError as err:
2196 2205 if err.errno != errno.ENOENT:
2197 2206 raise
2198 2207 return times
2199 2208
2200 2209 def savetimes(outputdir, result):
2201 2210 saved = dict(loadtimes(outputdir))
2202 2211 maxruns = 5
2203 2212 skipped = set([str(t[0]) for t in result.skipped])
2204 2213 for tdata in result.times:
2205 2214 test, real = tdata[0], tdata[3]
2206 2215 if test not in skipped:
2207 2216 ts = saved.setdefault(test, [])
2208 2217 ts.append(real)
2209 2218 ts[:] = ts[-maxruns:]
2210 2219
2211 2220 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2212 2221 dir=outputdir, text=True)
2213 2222 with os.fdopen(fd, 'w') as fp:
2214 2223 for name, ts in sorted(saved.items()):
2215 2224 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2216 2225 timepath = os.path.join(outputdir, b'.testtimes')
2217 2226 try:
2218 2227 os.unlink(timepath)
2219 2228 except OSError:
2220 2229 pass
2221 2230 try:
2222 2231 os.rename(tmpname, timepath)
2223 2232 except OSError:
2224 2233 pass
2225 2234
2226 2235 class TextTestRunner(unittest.TextTestRunner):
2227 2236 """Custom unittest test runner that uses appropriate settings."""
2228 2237
2229 2238 def __init__(self, runner, *args, **kwargs):
2230 2239 super(TextTestRunner, self).__init__(*args, **kwargs)
2231 2240
2232 2241 self._runner = runner
2233 2242
2234 2243 self._result = getTestResult()(self._runner.options, self.stream,
2235 2244 self.descriptions, self.verbosity)
2236 2245
2237 2246 def listtests(self, test):
2238 2247 test = sorted(test, key=lambda t: t.name)
2239 2248
2240 2249 self._result.onStart(test)
2241 2250
2242 2251 for t in test:
2243 2252 print(t.name)
2244 2253 self._result.addSuccess(t)
2245 2254
2246 2255 if self._runner.options.xunit:
2247 2256 with open(self._runner.options.xunit, "wb") as xuf:
2248 2257 self._writexunit(self._result, xuf)
2249 2258
2250 2259 if self._runner.options.json:
2251 2260 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2252 2261 with open(jsonpath, 'w') as fp:
2253 2262 self._writejson(self._result, fp)
2254 2263
2255 2264 return self._result
2256 2265
2257 2266 def run(self, test):
2258 2267 self._result.onStart(test)
2259 2268 test(self._result)
2260 2269
2261 2270 failed = len(self._result.failures)
2262 2271 skipped = len(self._result.skipped)
2263 2272 ignored = len(self._result.ignored)
2264 2273
2265 2274 with iolock:
2266 2275 self.stream.writeln('')
2267 2276
2268 2277 if not self._runner.options.noskips:
2269 2278 for test, msg in sorted(self._result.skipped,
2270 2279 key=lambda s: s[0].name):
2271 2280 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2272 2281 msg = highlightmsg(formatted, self._result.color)
2273 2282 self.stream.write(msg)
2274 2283 for test, msg in sorted(self._result.failures,
2275 2284 key=lambda f: f[0].name):
2276 2285 formatted = 'Failed %s: %s\n' % (test.name, msg)
2277 2286 self.stream.write(highlightmsg(formatted, self._result.color))
2278 2287 for test, msg in sorted(self._result.errors,
2279 2288 key=lambda e: e[0].name):
2280 2289 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2281 2290
2282 2291 if self._runner.options.xunit:
2283 2292 with open(self._runner.options.xunit, "wb") as xuf:
2284 2293 self._writexunit(self._result, xuf)
2285 2294
2286 2295 if self._runner.options.json:
2287 2296 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2288 2297 with open(jsonpath, 'w') as fp:
2289 2298 self._writejson(self._result, fp)
2290 2299
2291 2300 self._runner._checkhglib('Tested')
2292 2301
2293 2302 savetimes(self._runner._outputdir, self._result)
2294 2303
2295 2304 if failed and self._runner.options.known_good_rev:
2296 2305 self._bisecttests(t for t, m in self._result.failures)
2297 2306 self.stream.writeln(
2298 2307 '# Ran %d tests, %d skipped, %d failed.'
2299 2308 % (self._result.testsRun, skipped + ignored, failed))
2300 2309 if failed:
2301 2310 self.stream.writeln('python hash seed: %s' %
2302 2311 os.environ['PYTHONHASHSEED'])
2303 2312 if self._runner.options.time:
2304 2313 self.printtimes(self._result.times)
2305 2314
2306 2315 if self._runner.options.exceptions:
2307 2316 exceptions = aggregateexceptions(
2308 2317 os.path.join(self._runner._outputdir, b'exceptions'))
2309 2318
2310 2319 self.stream.writeln('Exceptions Report:')
2311 2320 self.stream.writeln('%d total from %d frames' %
2312 2321 (exceptions['total'],
2313 2322 len(exceptions['exceptioncounts'])))
2314 2323 combined = exceptions['combined']
2315 2324 for key in sorted(combined, key=combined.get, reverse=True):
2316 2325 frame, line, exc = key
2317 2326 totalcount, testcount, leastcount, leasttest = combined[key]
2318 2327
2319 2328 self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
2320 2329 % (totalcount,
2321 2330 testcount,
2322 2331 frame, exc,
2323 2332 leasttest, leastcount))
2324 2333
2325 2334 self.stream.flush()
2326 2335
2327 2336 return self._result
2328 2337
2329 2338 def _bisecttests(self, tests):
2330 2339 bisectcmd = ['hg', 'bisect']
2331 2340 bisectrepo = self._runner.options.bisect_repo
2332 2341 if bisectrepo:
2333 2342 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2334 2343 def pread(args):
2335 2344 env = os.environ.copy()
2336 2345 env['HGPLAIN'] = '1'
2337 2346 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2338 2347 stdout=subprocess.PIPE, env=env)
2339 2348 data = p.stdout.read()
2340 2349 p.wait()
2341 2350 return data
2342 2351 for test in tests:
2343 2352 pread(bisectcmd + ['--reset']),
2344 2353 pread(bisectcmd + ['--bad', '.'])
2345 2354 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2346 2355 # TODO: we probably need to forward more options
2347 2356 # that alter hg's behavior inside the tests.
2348 2357 opts = ''
2349 2358 withhg = self._runner.options.with_hg
2350 2359 if withhg:
2351 2360 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2352 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2361 rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts,
2353 2362 test)
2354 2363 data = pread(bisectcmd + ['--command', rtc])
2355 2364 m = re.search(
2356 2365 (br'\nThe first (?P<goodbad>bad|good) revision '
2357 2366 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2358 2367 br'summary: +(?P<summary>[^\n]+)\n'),
2359 2368 data, (re.MULTILINE | re.DOTALL))
2360 2369 if m is None:
2361 2370 self.stream.writeln(
2362 2371 'Failed to identify failure point for %s' % test)
2363 2372 continue
2364 2373 dat = m.groupdict()
2365 2374 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2366 2375 self.stream.writeln(
2367 2376 '%s %s by %s (%s)' % (
2368 2377 test, verb, dat['node'].decode('ascii'),
2369 2378 dat['summary'].decode('utf8', 'ignore')))
2370 2379
2371 2380 def printtimes(self, times):
2372 2381 # iolock held by run
2373 2382 self.stream.writeln('# Producing time report')
2374 2383 times.sort(key=lambda t: (t[3]))
2375 2384 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2376 2385 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2377 2386 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2378 2387 for tdata in times:
2379 2388 test = tdata[0]
2380 2389 cuser, csys, real, start, end = tdata[1:6]
2381 2390 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2382 2391
2383 2392 @staticmethod
2384 2393 def _writexunit(result, outf):
2385 2394 # See http://llg.cubic.org/docs/junit/ for a reference.
2386 2395 timesd = dict((t[0], t[3]) for t in result.times)
2387 2396 doc = minidom.Document()
2388 2397 s = doc.createElement('testsuite')
2389 2398 s.setAttribute('errors', "0") # TODO
2390 2399 s.setAttribute('failures', str(len(result.failures)))
2391 2400 s.setAttribute('name', 'run-tests')
2392 2401 s.setAttribute('skipped', str(len(result.skipped) +
2393 2402 len(result.ignored)))
2394 2403 s.setAttribute('tests', str(result.testsRun))
2395 2404 doc.appendChild(s)
2396 2405 for tc in result.successes:
2397 2406 t = doc.createElement('testcase')
2398 2407 t.setAttribute('name', tc.name)
2399 2408 tctime = timesd.get(tc.name)
2400 2409 if tctime is not None:
2401 2410 t.setAttribute('time', '%.3f' % tctime)
2402 2411 s.appendChild(t)
2403 2412 for tc, err in sorted(result.faildata.items()):
2404 2413 t = doc.createElement('testcase')
2405 2414 t.setAttribute('name', tc)
2406 2415 tctime = timesd.get(tc)
2407 2416 if tctime is not None:
2408 2417 t.setAttribute('time', '%.3f' % tctime)
2409 2418 # createCDATASection expects a unicode or it will
2410 2419 # convert using default conversion rules, which will
2411 2420 # fail if string isn't ASCII.
2412 2421 err = cdatasafe(err).decode('utf-8', 'replace')
2413 2422 cd = doc.createCDATASection(err)
2414 2423 # Use 'failure' here instead of 'error' to match errors = 0,
2415 2424 # failures = len(result.failures) in the testsuite element.
2416 2425 failelem = doc.createElement('failure')
2417 2426 failelem.setAttribute('message', 'output changed')
2418 2427 failelem.setAttribute('type', 'output-mismatch')
2419 2428 failelem.appendChild(cd)
2420 2429 t.appendChild(failelem)
2421 2430 s.appendChild(t)
2422 2431 for tc, message in result.skipped:
2423 2432 # According to the schema, 'skipped' has no attributes. So store
2424 2433 # the skip message as a text node instead.
2425 2434 t = doc.createElement('testcase')
2426 2435 t.setAttribute('name', tc.name)
2427 2436 binmessage = message.encode('utf-8')
2428 2437 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2429 2438 cd = doc.createCDATASection(message)
2430 2439 skipelem = doc.createElement('skipped')
2431 2440 skipelem.appendChild(cd)
2432 2441 t.appendChild(skipelem)
2433 2442 s.appendChild(t)
2434 2443 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2435 2444
2436 2445 @staticmethod
2437 2446 def _writejson(result, outf):
2438 2447 timesd = {}
2439 2448 for tdata in result.times:
2440 2449 test = tdata[0]
2441 2450 timesd[test] = tdata[1:]
2442 2451
2443 2452 outcome = {}
2444 2453 groups = [('success', ((tc, None)
2445 2454 for tc in result.successes)),
2446 2455 ('failure', result.failures),
2447 2456 ('skip', result.skipped)]
2448 2457 for res, testcases in groups:
2449 2458 for tc, __ in testcases:
2450 2459 if tc.name in timesd:
2451 2460 diff = result.faildata.get(tc.name, b'')
2452 2461 try:
2453 2462 diff = diff.decode('unicode_escape')
2454 2463 except UnicodeDecodeError as e:
2455 2464 diff = '%r decoding diff, sorry' % e
2456 2465 tres = {'result': res,
2457 2466 'time': ('%0.3f' % timesd[tc.name][2]),
2458 2467 'cuser': ('%0.3f' % timesd[tc.name][0]),
2459 2468 'csys': ('%0.3f' % timesd[tc.name][1]),
2460 2469 'start': ('%0.3f' % timesd[tc.name][3]),
2461 2470 'end': ('%0.3f' % timesd[tc.name][4]),
2462 2471 'diff': diff,
2463 2472 }
2464 2473 else:
2465 2474 # blacklisted test
2466 2475 tres = {'result': res}
2467 2476
2468 2477 outcome[tc.name] = tres
2469 2478 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2470 2479 separators=(',', ': '))
2471 2480 outf.writelines(("testreport =", jsonout))
2472 2481
2473 2482 def sorttests(testdescs, previoustimes, shuffle=False):
2474 2483 """Do an in-place sort of tests."""
2475 2484 if shuffle:
2476 2485 random.shuffle(testdescs)
2477 2486 return
2478 2487
2479 2488 if previoustimes:
2480 2489 def sortkey(f):
2481 2490 f = f['path']
2482 2491 if f in previoustimes:
2483 2492 # Use most recent time as estimate
2484 2493 return -previoustimes[f][-1]
2485 2494 else:
2486 2495 # Default to a rather arbitrary value of 1 second for new tests
2487 2496 return -1.0
2488 2497 else:
2489 2498 # keywords for slow tests
2490 2499 slow = {b'svn': 10,
2491 2500 b'cvs': 10,
2492 2501 b'hghave': 10,
2493 2502 b'largefiles-update': 10,
2494 2503 b'run-tests': 10,
2495 2504 b'corruption': 10,
2496 2505 b'race': 10,
2497 2506 b'i18n': 10,
2498 2507 b'check': 100,
2499 2508 b'gendoc': 100,
2500 2509 b'contrib-perf': 200,
2501 2510 }
2502 2511 perf = {}
2503 2512
2504 2513 def sortkey(f):
2505 2514 # run largest tests first, as they tend to take the longest
2506 2515 f = f['path']
2507 2516 try:
2508 2517 return perf[f]
2509 2518 except KeyError:
2510 2519 try:
2511 2520 val = -os.stat(f).st_size
2512 2521 except OSError as e:
2513 2522 if e.errno != errno.ENOENT:
2514 2523 raise
2515 2524 perf[f] = -1e9 # file does not exist, tell early
2516 2525 return -1e9
2517 2526 for kw, mul in slow.items():
2518 2527 if kw in f:
2519 2528 val *= mul
2520 2529 if f.endswith(b'.py'):
2521 2530 val /= 10.0
2522 2531 perf[f] = val / 1000.0
2523 2532 return perf[f]
2524 2533
2525 2534 testdescs.sort(key=sortkey)
2526 2535
2527 2536 class TestRunner(object):
2528 2537 """Holds context for executing tests.
2529 2538
2530 2539 Tests rely on a lot of state. This object holds it for them.
2531 2540 """
2532 2541
2533 2542 # Programs required to run tests.
2534 2543 REQUIREDTOOLS = [
2535 2544 b'diff',
2536 2545 b'grep',
2537 2546 b'unzip',
2538 2547 b'gunzip',
2539 2548 b'bunzip2',
2540 2549 b'sed',
2541 2550 ]
2542 2551
2543 2552 # Maps file extensions to test class.
2544 2553 TESTTYPES = [
2545 2554 (b'.py', PythonTest),
2546 2555 (b'.t', TTest),
2547 2556 ]
2548 2557
2549 2558 def __init__(self):
2550 2559 self.options = None
2551 2560 self._hgroot = None
2552 2561 self._testdir = None
2553 2562 self._outputdir = None
2554 2563 self._hgtmp = None
2555 2564 self._installdir = None
2556 2565 self._bindir = None
2557 2566 self._tmpbinddir = None
2558 2567 self._pythondir = None
2559 2568 self._coveragefile = None
2560 2569 self._createdfiles = []
2561 2570 self._hgcommand = None
2562 2571 self._hgpath = None
2563 2572 self._portoffset = 0
2564 2573 self._ports = {}
2565 2574
2566 2575 def run(self, args, parser=None):
2567 2576 """Run the test suite."""
2568 2577 oldmask = os.umask(0o22)
2569 2578 try:
2570 2579 parser = parser or getparser()
2571 2580 options = parseargs(args, parser)
2572 2581 tests = [_bytespath(a) for a in options.tests]
2573 2582 if options.test_list is not None:
2574 2583 for listfile in options.test_list:
2575 2584 with open(listfile, 'rb') as f:
2576 2585 tests.extend(t for t in f.read().splitlines() if t)
2577 2586 self.options = options
2578 2587
2579 2588 self._checktools()
2580 2589 testdescs = self.findtests(tests)
2581 2590 if options.profile_runner:
2582 2591 import statprof
2583 2592 statprof.start()
2584 2593 result = self._run(testdescs)
2585 2594 if options.profile_runner:
2586 2595 statprof.stop()
2587 2596 statprof.display()
2588 2597 return result
2589 2598
2590 2599 finally:
2591 2600 os.umask(oldmask)
2592 2601
2593 2602 def _run(self, testdescs):
2594 2603 testdir = getcwdb()
2595 2604 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
2596 2605 # assume all tests in same folder for now
2597 2606 if testdescs:
2598 2607 pathname = os.path.dirname(testdescs[0]['path'])
2599 2608 if pathname:
2600 2609 testdir = os.path.join(testdir, pathname)
2601 2610 self._testdir = osenvironb[b'TESTDIR'] = testdir
2602 2611 if self.options.outputdir:
2603 2612 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2604 2613 else:
2605 2614 self._outputdir = getcwdb()
2606 2615 if testdescs and pathname:
2607 2616 self._outputdir = os.path.join(self._outputdir, pathname)
2608 2617 previoustimes = {}
2609 2618 if self.options.order_by_runtime:
2610 2619 previoustimes = dict(loadtimes(self._outputdir))
2611 2620 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2612 2621
2613 2622 if 'PYTHONHASHSEED' not in os.environ:
2614 2623 # use a random python hash seed all the time
2615 2624 # we do the randomness ourself to know what seed is used
2616 2625 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2617 2626
2618 2627 if self.options.tmpdir:
2619 2628 self.options.keep_tmpdir = True
2620 2629 tmpdir = _bytespath(self.options.tmpdir)
2621 2630 if os.path.exists(tmpdir):
2622 2631 # Meaning of tmpdir has changed since 1.3: we used to create
2623 2632 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2624 2633 # tmpdir already exists.
2625 2634 print("error: temp dir %r already exists" % tmpdir)
2626 2635 return 1
2627 2636
2628 2637 os.makedirs(tmpdir)
2629 2638 else:
2630 2639 d = None
2631 2640 if os.name == 'nt':
2632 2641 # without this, we get the default temp dir location, but
2633 2642 # in all lowercase, which causes troubles with paths (issue3490)
2634 2643 d = osenvironb.get(b'TMP', None)
2635 2644 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2636 2645
2637 2646 self._hgtmp = osenvironb[b'HGTMP'] = (
2638 2647 os.path.realpath(tmpdir))
2639 2648
2640 2649 if self.options.with_hg:
2641 2650 self._installdir = None
2642 2651 whg = self.options.with_hg
2643 2652 self._bindir = os.path.dirname(os.path.realpath(whg))
2644 2653 assert isinstance(self._bindir, bytes)
2645 2654 self._hgcommand = os.path.basename(whg)
2646 2655 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2647 2656 os.makedirs(self._tmpbindir)
2648 2657
2649 2658 normbin = os.path.normpath(os.path.abspath(whg))
2650 2659 normbin = normbin.replace(os.sep.encode('ascii'), b'/')
2651 2660
2652 2661 # Other Python scripts in the test harness need to
2653 2662 # `import mercurial`. If `hg` is a Python script, we assume
2654 2663 # the Mercurial modules are relative to its path and tell the tests
2655 2664 # to load Python modules from its directory.
2656 2665 with open(whg, 'rb') as fh:
2657 2666 initial = fh.read(1024)
2658 2667
2659 2668 if re.match(b'#!.*python', initial):
2660 2669 self._pythondir = self._bindir
2661 2670 # If it looks like our in-repo Rust binary, use the source root.
2662 2671 # This is a bit hacky. But rhg is still not supported outside the
2663 2672 # source directory. So until it is, do the simple thing.
2664 2673 elif re.search(b'/rust/target/[^/]+/hg', normbin):
2665 2674 self._pythondir = os.path.dirname(self._testdir)
2666 2675 # Fall back to the legacy behavior.
2667 2676 else:
2668 2677 self._pythondir = self._bindir
2669 2678
2670 2679 else:
2671 2680 self._installdir = os.path.join(self._hgtmp, b"install")
2672 2681 self._bindir = os.path.join(self._installdir, b"bin")
2673 2682 self._hgcommand = b'hg'
2674 2683 self._tmpbindir = self._bindir
2675 2684 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2676 2685
2677 2686 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
2678 2687 # a python script and feed it to python.exe. Legacy stdio is force
2679 2688 # enabled by hg.exe, and this is a more realistic way to launch hg
2680 2689 # anyway.
2681 2690 if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'):
2682 2691 self._hgcommand += b'.exe'
2683 2692
2684 2693 # set CHGHG, then replace "hg" command by "chg"
2685 2694 chgbindir = self._bindir
2686 2695 if self.options.chg or self.options.with_chg:
2687 2696 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2688 2697 else:
2689 2698 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2690 2699 if self.options.chg:
2691 2700 self._hgcommand = b'chg'
2692 2701 elif self.options.with_chg:
2693 2702 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2694 2703 self._hgcommand = os.path.basename(self.options.with_chg)
2695 2704
2696 2705 osenvironb[b"BINDIR"] = self._bindir
2697 2706 osenvironb[b"PYTHON"] = PYTHON
2698 2707
2699 2708 fileb = _bytespath(__file__)
2700 2709 runtestdir = os.path.abspath(os.path.dirname(fileb))
2701 2710 osenvironb[b'RUNTESTDIR'] = runtestdir
2702 2711 if PYTHON3:
2703 2712 sepb = _bytespath(os.pathsep)
2704 2713 else:
2705 2714 sepb = os.pathsep
2706 2715 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2707 2716 if os.path.islink(__file__):
2708 2717 # test helper will likely be at the end of the symlink
2709 2718 realfile = os.path.realpath(fileb)
2710 2719 realdir = os.path.abspath(os.path.dirname(realfile))
2711 2720 path.insert(2, realdir)
2712 2721 if chgbindir != self._bindir:
2713 2722 path.insert(1, chgbindir)
2714 2723 if self._testdir != runtestdir:
2715 2724 path = [self._testdir] + path
2716 2725 if self._tmpbindir != self._bindir:
2717 2726 path = [self._tmpbindir] + path
2718 2727 osenvironb[b"PATH"] = sepb.join(path)
2719 2728
2720 2729 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2721 2730 # can run .../tests/run-tests.py test-foo where test-foo
2722 2731 # adds an extension to HGRC. Also include run-test.py directory to
2723 2732 # import modules like heredoctest.
2724 2733 pypath = [self._pythondir, self._testdir, runtestdir]
2725 2734 # We have to augment PYTHONPATH, rather than simply replacing
2726 2735 # it, in case external libraries are only available via current
2727 2736 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2728 2737 # are in /opt/subversion.)
2729 2738 oldpypath = osenvironb.get(IMPL_PATH)
2730 2739 if oldpypath:
2731 2740 pypath.append(oldpypath)
2732 2741 osenvironb[IMPL_PATH] = sepb.join(pypath)
2733 2742
2734 2743 if self.options.pure:
2735 2744 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2736 2745 os.environ["HGMODULEPOLICY"] = "py"
2737 2746
2738 2747 if self.options.allow_slow_tests:
2739 2748 os.environ["HGTEST_SLOW"] = "slow"
2740 2749 elif 'HGTEST_SLOW' in os.environ:
2741 2750 del os.environ['HGTEST_SLOW']
2742 2751
2743 2752 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2744 2753
2745 2754 if self.options.exceptions:
2746 2755 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2747 2756 try:
2748 2757 os.makedirs(exceptionsdir)
2749 2758 except OSError as e:
2750 2759 if e.errno != errno.EEXIST:
2751 2760 raise
2752 2761
2753 2762 # Remove all existing exception reports.
2754 2763 for f in os.listdir(exceptionsdir):
2755 2764 os.unlink(os.path.join(exceptionsdir, f))
2756 2765
2757 2766 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2758 2767 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2759 2768 self.options.extra_config_opt.append(
2760 2769 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2761 2770
2762 2771 vlog("# Using TESTDIR", self._testdir)
2763 2772 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2764 2773 vlog("# Using HGTMP", self._hgtmp)
2765 2774 vlog("# Using PATH", os.environ["PATH"])
2766 2775 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2767 2776 vlog("# Writing to directory", self._outputdir)
2768 2777
2769 2778 try:
2770 2779 return self._runtests(testdescs) or 0
2771 2780 finally:
2772 2781 time.sleep(.1)
2773 2782 self._cleanup()
2774 2783
2775 2784 def findtests(self, args):
2776 2785 """Finds possible test files from arguments.
2777 2786
2778 2787 If you wish to inject custom tests into the test harness, this would
2779 2788 be a good function to monkeypatch or override in a derived class.
2780 2789 """
2781 2790 if not args:
2782 2791 if self.options.changed:
2783 2792 proc = Popen4(b'hg st --rev "%s" -man0 .' %
2784 2793 _bytespath(self.options.changed), None, 0)
2785 2794 stdout, stderr = proc.communicate()
2786 2795 args = stdout.strip(b'\0').split(b'\0')
2787 2796 else:
2788 2797 args = os.listdir(b'.')
2789 2798
2790 2799 expanded_args = []
2791 2800 for arg in args:
2792 2801 if os.path.isdir(arg):
2793 2802 if not arg.endswith(b'/'):
2794 2803 arg += b'/'
2795 2804 expanded_args.extend([arg + a for a in os.listdir(arg)])
2796 2805 else:
2797 2806 expanded_args.append(arg)
2798 2807 args = expanded_args
2799 2808
2800 2809 testcasepattern = re.compile(
2801 2810 br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-\.#]+))')
2802 2811 tests = []
2803 2812 for t in args:
2804 2813 case = []
2805 2814
2806 2815 if not (os.path.basename(t).startswith(b'test-')
2807 2816 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2808 2817
2809 2818 m = testcasepattern.match(os.path.basename(t))
2810 2819 if m is not None:
2811 2820 t_basename, casestr = m.groups()
2812 2821 t = os.path.join(os.path.dirname(t), t_basename)
2813 2822 if casestr:
2814 2823 case = casestr.split(b'#')
2815 2824 else:
2816 2825 continue
2817 2826
2818 2827 if t.endswith(b'.t'):
2819 2828 # .t file may contain multiple test cases
2820 2829 casedimensions = parsettestcases(t)
2821 2830 if casedimensions:
2822 2831 cases = []
2823 2832 def addcases(case, casedimensions):
2824 2833 if not casedimensions:
2825 2834 cases.append(case)
2826 2835 else:
2827 2836 for c in casedimensions[0]:
2828 2837 addcases(case + [c], casedimensions[1:])
2829 2838 addcases([], casedimensions)
2830 2839 if case and case in cases:
2831 2840 cases = [case]
2832 2841 elif case:
2833 2842 # Ignore invalid cases
2834 2843 cases = []
2835 2844 else:
2836 2845 pass
2837 2846 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2838 2847 else:
2839 2848 tests.append({'path': t})
2840 2849 else:
2841 2850 tests.append({'path': t})
2842 2851 return tests
2843 2852
2844 2853 def _runtests(self, testdescs):
2845 2854 def _reloadtest(test, i):
2846 2855 # convert a test back to its description dict
2847 2856 desc = {'path': test.path}
2848 2857 case = getattr(test, '_case', [])
2849 2858 if case:
2850 2859 desc['case'] = case
2851 2860 return self._gettest(desc, i)
2852 2861
2853 2862 try:
2854 2863 if self.options.restart:
2855 2864 orig = list(testdescs)
2856 2865 while testdescs:
2857 2866 desc = testdescs[0]
2858 2867 # desc['path'] is a relative path
2859 2868 if 'case' in desc:
2860 2869 casestr = b'#'.join(desc['case'])
2861 2870 errpath = b'%s#%s.err' % (desc['path'], casestr)
2862 2871 else:
2863 2872 errpath = b'%s.err' % desc['path']
2864 2873 errpath = os.path.join(self._outputdir, errpath)
2865 2874 if os.path.exists(errpath):
2866 2875 break
2867 2876 testdescs.pop(0)
2868 2877 if not testdescs:
2869 2878 print("running all tests")
2870 2879 testdescs = orig
2871 2880
2872 2881 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2873 2882 num_tests = len(tests) * self.options.runs_per_test
2874 2883
2875 2884 jobs = min(num_tests, self.options.jobs)
2876 2885
2877 2886 failed = False
2878 2887 kws = self.options.keywords
2879 2888 if kws is not None and PYTHON3:
2880 2889 kws = kws.encode('utf-8')
2881 2890
2882 2891 suite = TestSuite(self._testdir,
2883 2892 jobs=jobs,
2884 2893 whitelist=self.options.whitelisted,
2885 2894 blacklist=self.options.blacklist,
2886 2895 retest=self.options.retest,
2887 2896 keywords=kws,
2888 2897 loop=self.options.loop,
2889 2898 runs_per_test=self.options.runs_per_test,
2890 2899 showchannels=self.options.showchannels,
2891 2900 tests=tests, loadtest=_reloadtest)
2892 2901 verbosity = 1
2893 2902 if self.options.list_tests:
2894 2903 verbosity = 0
2895 2904 elif self.options.verbose:
2896 2905 verbosity = 2
2897 2906 runner = TextTestRunner(self, verbosity=verbosity)
2898 2907
2899 2908 if self.options.list_tests:
2900 2909 result = runner.listtests(suite)
2901 2910 else:
2902 2911 if self._installdir:
2903 2912 self._installhg()
2904 2913 self._checkhglib("Testing")
2905 2914 else:
2906 2915 self._usecorrectpython()
2907 2916 if self.options.chg:
2908 2917 assert self._installdir
2909 2918 self._installchg()
2910 2919
2911 2920 log('running %d tests using %d parallel processes' % (
2912 2921 num_tests, jobs))
2913 2922
2914 2923 result = runner.run(suite)
2915 2924
2916 2925 if result.failures or result.errors:
2917 2926 failed = True
2918 2927
2919 2928 result.onEnd()
2920 2929
2921 2930 if self.options.anycoverage:
2922 2931 self._outputcoverage()
2923 2932 except KeyboardInterrupt:
2924 2933 failed = True
2925 2934 print("\ninterrupted!")
2926 2935
2927 2936 if failed:
2928 2937 return 1
2929 2938
2930 2939 def _getport(self, count):
2931 2940 port = self._ports.get(count) # do we have a cached entry?
2932 2941 if port is None:
2933 2942 portneeded = 3
2934 2943 # above 100 tries we just give up and let test reports failure
2935 2944 for tries in xrange(100):
2936 2945 allfree = True
2937 2946 port = self.options.port + self._portoffset
2938 2947 for idx in xrange(portneeded):
2939 2948 if not checkportisavailable(port + idx):
2940 2949 allfree = False
2941 2950 break
2942 2951 self._portoffset += portneeded
2943 2952 if allfree:
2944 2953 break
2945 2954 self._ports[count] = port
2946 2955 return port
2947 2956
2948 2957 def _gettest(self, testdesc, count):
2949 2958 """Obtain a Test by looking at its filename.
2950 2959
2951 2960 Returns a Test instance. The Test may not be runnable if it doesn't
2952 2961 map to a known type.
2953 2962 """
2954 2963 path = testdesc['path']
2955 2964 lctest = path.lower()
2956 2965 testcls = Test
2957 2966
2958 2967 for ext, cls in self.TESTTYPES:
2959 2968 if lctest.endswith(ext):
2960 2969 testcls = cls
2961 2970 break
2962 2971
2963 2972 refpath = os.path.join(getcwdb(), path)
2964 2973 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2965 2974
2966 2975 # extra keyword parameters. 'case' is used by .t tests
2967 2976 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2968 2977
2969 2978 t = testcls(refpath, self._outputdir, tmpdir,
2970 2979 keeptmpdir=self.options.keep_tmpdir,
2971 2980 debug=self.options.debug,
2972 2981 first=self.options.first,
2973 2982 timeout=self.options.timeout,
2974 2983 startport=self._getport(count),
2975 2984 extraconfigopts=self.options.extra_config_opt,
2976 2985 py3warnings=self.options.py3_warnings,
2977 2986 shell=self.options.shell,
2978 2987 hgcommand=self._hgcommand,
2979 2988 usechg=bool(self.options.with_chg or self.options.chg),
2980 2989 useipv6=useipv6, **kwds)
2981 2990 t.should_reload = True
2982 2991 return t
2983 2992
2984 2993 def _cleanup(self):
2985 2994 """Clean up state from this test invocation."""
2986 2995 if self.options.keep_tmpdir:
2987 2996 return
2988 2997
2989 2998 vlog("# Cleaning up HGTMP", self._hgtmp)
2990 2999 shutil.rmtree(self._hgtmp, True)
2991 3000 for f in self._createdfiles:
2992 3001 try:
2993 3002 os.remove(f)
2994 3003 except OSError:
2995 3004 pass
2996 3005
2997 3006 def _usecorrectpython(self):
2998 3007 """Configure the environment to use the appropriate Python in tests."""
2999 3008 # Tests must use the same interpreter as us or bad things will happen.
3000 3009 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
3001 3010
3002 3011 # os.symlink() is a thing with py3 on Windows, but it requires
3003 3012 # Administrator rights.
3004 3013 if getattr(os, 'symlink', None) and os.name != 'nt':
3005 3014 vlog("# Making python executable in test path a symlink to '%s'" %
3006 sys.executable)
3015 sysexecutable)
3007 3016 mypython = os.path.join(self._tmpbindir, pyexename)
3008 3017 try:
3009 if os.readlink(mypython) == sys.executable:
3018 if os.readlink(mypython) == sysexecutable:
3010 3019 return
3011 3020 os.unlink(mypython)
3012 3021 except OSError as err:
3013 3022 if err.errno != errno.ENOENT:
3014 3023 raise
3015 if self._findprogram(pyexename) != sys.executable:
3024 if self._findprogram(pyexename) != sysexecutable:
3016 3025 try:
3017 os.symlink(sys.executable, mypython)
3026 os.symlink(sysexecutable, mypython)
3018 3027 self._createdfiles.append(mypython)
3019 3028 except OSError as err:
3020 3029 # child processes may race, which is harmless
3021 3030 if err.errno != errno.EEXIST:
3022 3031 raise
3023 3032 else:
3024 exedir, exename = os.path.split(sys.executable)
3033 exedir, exename = os.path.split(sysexecutable)
3025 3034 vlog("# Modifying search path to find %s as %s in '%s'" %
3026 3035 (exename, pyexename, exedir))
3027 3036 path = os.environ['PATH'].split(os.pathsep)
3028 3037 while exedir in path:
3029 3038 path.remove(exedir)
3030 3039 os.environ['PATH'] = os.pathsep.join([exedir] + path)
3031 3040 if not self._findprogram(pyexename):
3032 3041 print("WARNING: Cannot find %s in search path" % pyexename)
3033 3042
3034 3043 def _installhg(self):
3035 3044 """Install hg into the test environment.
3036 3045
3037 3046 This will also configure hg with the appropriate testing settings.
3038 3047 """
3039 3048 vlog("# Performing temporary installation of HG")
3040 3049 installerrs = os.path.join(self._hgtmp, b"install.err")
3041 3050 compiler = ''
3042 3051 if self.options.compiler:
3043 3052 compiler = '--compiler ' + self.options.compiler
3044 3053 if self.options.pure:
3045 3054 pure = b"--pure"
3046 3055 else:
3047 3056 pure = b""
3048 3057
3049 3058 # Run installer in hg root
3050 3059 script = os.path.realpath(sys.argv[0])
3051 exe = sys.executable
3060 exe = sysexecutable
3052 3061 if PYTHON3:
3053 3062 compiler = _bytespath(compiler)
3054 3063 script = _bytespath(script)
3055 3064 exe = _bytespath(exe)
3056 3065 hgroot = os.path.dirname(os.path.dirname(script))
3057 3066 self._hgroot = hgroot
3058 3067 os.chdir(hgroot)
3059 3068 nohome = b'--home=""'
3060 3069 if os.name == 'nt':
3061 3070 # The --home="" trick works only on OS where os.sep == '/'
3062 3071 # because of a distutils convert_path() fast-path. Avoid it at
3063 3072 # least on Windows for now, deal with .pydistutils.cfg bugs
3064 3073 # when they happen.
3065 3074 nohome = b''
3066 3075 cmd = (b'"%(exe)s" setup.py %(pure)s clean --all'
3067 3076 b' build %(compiler)s --build-base="%(base)s"'
3068 3077 b' install --force --prefix="%(prefix)s"'
3069 3078 b' --install-lib="%(libdir)s"'
3070 3079 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3071 3080 % {b'exe': exe, b'pure': pure,
3072 3081 b'compiler': compiler,
3073 3082 b'base': os.path.join(self._hgtmp, b"build"),
3074 3083 b'prefix': self._installdir, b'libdir': self._pythondir,
3075 3084 b'bindir': self._bindir,
3076 3085 b'nohome': nohome, b'logfile': installerrs})
3077 3086
3078 3087 # setuptools requires install directories to exist.
3079 3088 def makedirs(p):
3080 3089 try:
3081 3090 os.makedirs(p)
3082 3091 except OSError as e:
3083 3092 if e.errno != errno.EEXIST:
3084 3093 raise
3085 3094 makedirs(self._pythondir)
3086 3095 makedirs(self._bindir)
3087 3096
3088 3097 vlog("# Running", cmd)
3089 3098 if subprocess.call(_strpath(cmd), shell=True) == 0:
3090 3099 if not self.options.verbose:
3091 3100 try:
3092 3101 os.remove(installerrs)
3093 3102 except OSError as e:
3094 3103 if e.errno != errno.ENOENT:
3095 3104 raise
3096 3105 else:
3097 3106 with open(installerrs, 'rb') as f:
3098 3107 for line in f:
3099 3108 if PYTHON3:
3100 3109 sys.stdout.buffer.write(line)
3101 3110 else:
3102 3111 sys.stdout.write(line)
3103 3112 sys.exit(1)
3104 3113 os.chdir(self._testdir)
3105 3114
3106 3115 self._usecorrectpython()
3107 3116
3108 3117 if self.options.py3_warnings and not self.options.anycoverage:
3109 3118 vlog("# Updating hg command to enable Py3k Warnings switch")
3110 3119 with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
3111 3120 lines = [line.rstrip() for line in f]
3112 3121 lines[0] += ' -3'
3113 3122 with open(os.path.join(self._bindir, 'hg'), 'wb') as f:
3114 3123 for line in lines:
3115 3124 f.write(line + '\n')
3116 3125
3117 3126 hgbat = os.path.join(self._bindir, b'hg.bat')
3118 3127 if os.path.isfile(hgbat):
3119 3128 # hg.bat expects to be put in bin/scripts while run-tests.py
3120 3129 # installation layout put it in bin/ directly. Fix it
3121 3130 with open(hgbat, 'rb') as f:
3122 3131 data = f.read()
3123 3132 if br'"%~dp0..\python" "%~dp0hg" %*' in data:
3124 3133 data = data.replace(br'"%~dp0..\python" "%~dp0hg" %*',
3125 3134 b'"%~dp0python" "%~dp0hg" %*')
3126 3135 with open(hgbat, 'wb') as f:
3127 3136 f.write(data)
3128 3137 else:
3129 3138 print('WARNING: cannot fix hg.bat reference to python.exe')
3130 3139
3131 3140 if self.options.anycoverage:
3132 3141 custom = os.path.join(self._testdir, 'sitecustomize.py')
3133 3142 target = os.path.join(self._pythondir, 'sitecustomize.py')
3134 3143 vlog('# Installing coverage trigger to %s' % target)
3135 3144 shutil.copyfile(custom, target)
3136 3145 rc = os.path.join(self._testdir, '.coveragerc')
3137 3146 vlog('# Installing coverage rc to %s' % rc)
3138 3147 os.environ['COVERAGE_PROCESS_START'] = rc
3139 3148 covdir = os.path.join(self._installdir, '..', 'coverage')
3140 3149 try:
3141 3150 os.mkdir(covdir)
3142 3151 except OSError as e:
3143 3152 if e.errno != errno.EEXIST:
3144 3153 raise
3145 3154
3146 3155 os.environ['COVERAGE_DIR'] = covdir
3147 3156
3148 3157 def _checkhglib(self, verb):
3149 3158 """Ensure that the 'mercurial' package imported by python is
3150 3159 the one we expect it to be. If not, print a warning to stderr."""
3151 3160 if ((self._bindir == self._pythondir) and
3152 3161 (self._bindir != self._tmpbindir)):
3153 3162 # The pythondir has been inferred from --with-hg flag.
3154 3163 # We cannot expect anything sensible here.
3155 3164 return
3156 3165 expecthg = os.path.join(self._pythondir, b'mercurial')
3157 3166 actualhg = self._gethgpath()
3158 3167 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3159 3168 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
3160 3169 ' (expected %s)\n'
3161 3170 % (verb, actualhg, expecthg))
3162 3171 def _gethgpath(self):
3163 3172 """Return the path to the mercurial package that is actually found by
3164 3173 the current Python interpreter."""
3165 3174 if self._hgpath is not None:
3166 3175 return self._hgpath
3167 3176
3168 3177 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3169 3178 cmd = cmd % PYTHON
3170 3179 if PYTHON3:
3171 3180 cmd = _strpath(cmd)
3172 3181
3173 3182 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3174 3183 out, err = p.communicate()
3175 3184
3176 3185 self._hgpath = out.strip()
3177 3186
3178 3187 return self._hgpath
3179 3188
3180 3189 def _installchg(self):
3181 3190 """Install chg into the test environment"""
3182 3191 vlog('# Performing temporary installation of CHG')
3183 3192 assert os.path.dirname(self._bindir) == self._installdir
3184 3193 assert self._hgroot, 'must be called after _installhg()'
3185 3194 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
3186 3195 % {b'make': b'make', # TODO: switch by option or environment?
3187 3196 b'prefix': self._installdir})
3188 3197 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3189 3198 vlog("# Running", cmd)
3190 3199 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
3191 3200 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
3192 3201 stderr=subprocess.STDOUT)
3193 3202 out, _err = proc.communicate()
3194 3203 if proc.returncode != 0:
3195 3204 if PYTHON3:
3196 3205 sys.stdout.buffer.write(out)
3197 3206 else:
3198 3207 sys.stdout.write(out)
3199 3208 sys.exit(1)
3200 3209
3201 3210 def _outputcoverage(self):
3202 3211 """Produce code coverage output."""
3203 3212 import coverage
3204 3213 coverage = coverage.coverage
3205 3214
3206 3215 vlog('# Producing coverage report')
3207 3216 # chdir is the easiest way to get short, relative paths in the
3208 3217 # output.
3209 3218 os.chdir(self._hgroot)
3210 3219 covdir = os.path.join(self._installdir, '..', 'coverage')
3211 3220 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3212 3221
3213 3222 # Map install directory paths back to source directory.
3214 3223 cov.config.paths['srcdir'] = ['.', self._pythondir]
3215 3224
3216 3225 cov.combine()
3217 3226
3218 3227 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
3219 3228 cov.report(ignore_errors=True, omit=omit)
3220 3229
3221 3230 if self.options.htmlcov:
3222 3231 htmldir = os.path.join(self._outputdir, 'htmlcov')
3223 3232 cov.html_report(directory=htmldir, omit=omit)
3224 3233 if self.options.annotate:
3225 3234 adir = os.path.join(self._outputdir, 'annotated')
3226 3235 if not os.path.isdir(adir):
3227 3236 os.mkdir(adir)
3228 3237 cov.annotate(directory=adir, omit=omit)
3229 3238
3230 3239 def _findprogram(self, program):
3231 3240 """Search PATH for a executable program"""
3232 3241 dpb = _bytespath(os.defpath)
3233 3242 sepb = _bytespath(os.pathsep)
3234 3243 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3235 3244 name = os.path.join(p, program)
3236 3245 if os.name == 'nt' or os.access(name, os.X_OK):
3237 3246 return name
3238 3247 return None
3239 3248
3240 3249 def _checktools(self):
3241 3250 """Ensure tools required to run tests are present."""
3242 3251 for p in self.REQUIREDTOOLS:
3243 3252 if os.name == 'nt' and not p.endswith(b'.exe'):
3244 3253 p += b'.exe'
3245 3254 found = self._findprogram(p)
3246 3255 if found:
3247 3256 vlog("# Found prerequisite", p, "at", found)
3248 3257 else:
3249 3258 print("WARNING: Did not find prerequisite tool: %s " %
3250 3259 p.decode("utf-8"))
3251 3260
3252 3261 def aggregateexceptions(path):
3253 3262 exceptioncounts = collections.Counter()
3254 3263 testsbyfailure = collections.defaultdict(set)
3255 3264 failuresbytest = collections.defaultdict(set)
3256 3265
3257 3266 for f in os.listdir(path):
3258 3267 with open(os.path.join(path, f), 'rb') as fh:
3259 3268 data = fh.read().split(b'\0')
3260 3269 if len(data) != 5:
3261 3270 continue
3262 3271
3263 3272 exc, mainframe, hgframe, hgline, testname = data
3264 3273 exc = exc.decode('utf-8')
3265 3274 mainframe = mainframe.decode('utf-8')
3266 3275 hgframe = hgframe.decode('utf-8')
3267 3276 hgline = hgline.decode('utf-8')
3268 3277 testname = testname.decode('utf-8')
3269 3278
3270 3279 key = (hgframe, hgline, exc)
3271 3280 exceptioncounts[key] += 1
3272 3281 testsbyfailure[key].add(testname)
3273 3282 failuresbytest[testname].add(key)
3274 3283
3275 3284 # Find test having fewest failures for each failure.
3276 3285 leastfailing = {}
3277 3286 for key, tests in testsbyfailure.items():
3278 3287 fewesttest = None
3279 3288 fewestcount = 99999999
3280 3289 for test in sorted(tests):
3281 3290 if len(failuresbytest[test]) < fewestcount:
3282 3291 fewesttest = test
3283 3292 fewestcount = len(failuresbytest[test])
3284 3293
3285 3294 leastfailing[key] = (fewestcount, fewesttest)
3286 3295
3287 3296 # Create a combined counter so we can sort by total occurrences and
3288 3297 # impacted tests.
3289 3298 combined = {}
3290 3299 for key in exceptioncounts:
3291 3300 combined[key] = (exceptioncounts[key],
3292 3301 len(testsbyfailure[key]),
3293 3302 leastfailing[key][0],
3294 3303 leastfailing[key][1])
3295 3304
3296 3305 return {
3297 3306 'exceptioncounts': exceptioncounts,
3298 3307 'total': sum(exceptioncounts.values()),
3299 3308 'combined': combined,
3300 3309 'leastfailing': leastfailing,
3301 3310 'byfailure': testsbyfailure,
3302 3311 'bytest': failuresbytest,
3303 3312 }
3304 3313
3305 3314 if __name__ == '__main__':
3306 3315 runner = TestRunner()
3307 3316
3308 3317 try:
3309 3318 import msvcrt
3310 3319 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3311 3320 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3312 3321 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3313 3322 except ImportError:
3314 3323 pass
3315 3324
3316 3325 sys.exit(runner.run(sys.argv[1:]))
General Comments 0
You need to be logged in to leave comments. Login now