##// END OF EJS Templates
statprof: use context manager when reading source from file...
Martin von Zweigbergk -
r43102:c085cb13 default
parent child Browse files
Show More
@@ -1,973 +1,969 b''
1 1 #!/usr/bin/env python
2 2 ## statprof.py
3 3 ## Copyright (C) 2012 Bryan O'Sullivan <bos@serpentine.com>
4 4 ## Copyright (C) 2011 Alex Fraser <alex at phatcore dot com>
5 5 ## Copyright (C) 2004,2005 Andy Wingo <wingo at pobox dot com>
6 6 ## Copyright (C) 2001 Rob Browning <rlb at defaultvalue dot org>
7 7
8 8 ## This library is free software; you can redistribute it and/or
9 9 ## modify it under the terms of the GNU Lesser General Public
10 10 ## License as published by the Free Software Foundation; either
11 11 ## version 2.1 of the License, or (at your option) any later version.
12 12 ##
13 13 ## This library is distributed in the hope that it will be useful,
14 14 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
15 15 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 16 ## Lesser General Public License for more details.
17 17 ##
18 18 ## You should have received a copy of the GNU Lesser General Public
19 19 ## License along with this program; if not, contact:
20 20 ##
21 21 ## Free Software Foundation Voice: +1-617-542-5942
22 22 ## 59 Temple Place - Suite 330 Fax: +1-617-542-2652
23 23 ## Boston, MA 02111-1307, USA gnu@gnu.org
24 24
25 25 """
26 26 statprof is intended to be a fairly simple statistical profiler for
27 27 python. It was ported directly from a statistical profiler for guile,
28 28 also named statprof, available from guile-lib [0].
29 29
30 30 [0] http://wingolog.org/software/guile-lib/statprof/
31 31
32 32 To start profiling, call statprof.start():
33 33 >>> start()
34 34
35 35 Then run whatever it is that you want to profile, for example:
36 36 >>> import test.pystone; test.pystone.pystones()
37 37
38 38 Then stop the profiling and print out the results:
39 39 >>> stop()
40 40 >>> display()
41 41 % cumulative self
42 42 time seconds seconds name
43 43 26.72 1.40 0.37 pystone.py:79:Proc0
44 44 13.79 0.56 0.19 pystone.py:133:Proc1
45 45 13.79 0.19 0.19 pystone.py:208:Proc8
46 46 10.34 0.16 0.14 pystone.py:229:Func2
47 47 6.90 0.10 0.10 pystone.py:45:__init__
48 48 4.31 0.16 0.06 pystone.py:53:copy
49 49 ...
50 50
51 51 All of the numerical data is statistically approximate. In the
52 52 following column descriptions, and in all of statprof, "time" refers
53 53 to execution time (both user and system), not wall clock time.
54 54
55 55 % time
56 56 The percent of the time spent inside the procedure itself (not
57 57 counting children).
58 58
59 59 cumulative seconds
60 60 The total number of seconds spent in the procedure, including
61 61 children.
62 62
63 63 self seconds
64 64 The total number of seconds spent in the procedure itself (not
65 65 counting children).
66 66
67 67 name
68 68 The name of the procedure.
69 69
70 70 By default statprof keeps the data collected from previous runs. If you
71 71 want to clear the collected data, call reset():
72 72 >>> reset()
73 73
74 74 reset() can also be used to change the sampling frequency from the
75 75 default of 1000 Hz. For example, to tell statprof to sample 50 times a
76 76 second:
77 77 >>> reset(50)
78 78
79 79 This means that statprof will sample the call stack after every 1/50 of
80 80 a second of user + system time spent running on behalf of the python
81 81 process. When your process is idle (for example, blocking in a read(),
82 82 as is the case at the listener), the clock does not advance. For this
83 83 reason statprof is not currently not suitable for profiling io-bound
84 84 operations.
85 85
86 86 The profiler uses the hash of the code object itself to identify the
87 87 procedures, so it won't confuse different procedures with the same name.
88 88 They will show up as two different rows in the output.
89 89
90 90 Right now the profiler is quite simplistic. I cannot provide
91 91 call-graphs or other higher level information. What you see in the
92 92 table is pretty much all there is. Patches are welcome :-)
93 93
94 94
95 95 Threading
96 96 ---------
97 97
98 98 Because signals only get delivered to the main thread in Python,
99 99 statprof only profiles the main thread. However because the time
100 100 reporting function uses per-process timers, the results can be
101 101 significantly off if other threads' work patterns are not similar to the
102 102 main thread's work patterns.
103 103 """
104 104 # no-check-code
105 105 from __future__ import absolute_import, division, print_function
106 106
107 107 import collections
108 108 import contextlib
109 109 import getopt
110 110 import inspect
111 111 import json
112 112 import os
113 113 import signal
114 114 import sys
115 115 import threading
116 116 import time
117 117
118 118 from . import (
119 119 encoding,
120 120 pycompat,
121 121 )
122 122
123 123 defaultdict = collections.defaultdict
124 124 contextmanager = contextlib.contextmanager
125 125
126 126 __all__ = ['start', 'stop', 'reset', 'display', 'profile']
127 127
128 128 skips = {
129 129 r"util.py:check",
130 130 r"extensions.py:closure",
131 131 r"color.py:colorcmd",
132 132 r"dispatch.py:checkargs",
133 133 r"dispatch.py:<lambda>",
134 134 r"dispatch.py:_runcatch",
135 135 r"dispatch.py:_dispatch",
136 136 r"dispatch.py:_runcommand",
137 137 r"pager.py:pagecmd",
138 138 r"dispatch.py:run",
139 139 r"dispatch.py:dispatch",
140 140 r"dispatch.py:runcommand",
141 141 r"hg.py:<module>",
142 142 r"evolve.py:warnobserrors",
143 143 }
144 144
145 145 ###########################################################################
146 146 ## Utils
147 147
148 148 def clock():
149 149 times = os.times()
150 150 return (times[0] + times[1], times[4])
151 151
152 152
153 153 ###########################################################################
154 154 ## Collection data structures
155 155
156 156 class ProfileState(object):
157 157 def __init__(self, frequency=None):
158 158 self.reset(frequency)
159 159 self.track = 'cpu'
160 160
161 161 def reset(self, frequency=None):
162 162 # total so far
163 163 self.accumulated_time = (0.0, 0.0)
164 164 # start_time when timer is active
165 165 self.last_start_time = None
166 166 # a float
167 167 if frequency:
168 168 self.sample_interval = 1.0 / frequency
169 169 elif not hasattr(self, 'sample_interval'):
170 170 # default to 1000 Hz
171 171 self.sample_interval = 1.0 / 1000.0
172 172 else:
173 173 # leave the frequency as it was
174 174 pass
175 175 self.remaining_prof_time = None
176 176 # for user start/stop nesting
177 177 self.profile_level = 0
178 178
179 179 self.samples = []
180 180
181 181 def accumulate_time(self, stop_time):
182 182 increment = (
183 183 stop_time[0] - self.last_start_time[0],
184 184 stop_time[1] - self.last_start_time[1],
185 185 )
186 186 self.accumulated_time = (
187 187 self.accumulated_time[0] + increment[0],
188 188 self.accumulated_time[1] + increment[1],
189 189 )
190 190
191 191 def seconds_per_sample(self):
192 192 return self.accumulated_time[self.timeidx] / len(self.samples)
193 193
194 194 @property
195 195 def timeidx(self):
196 196 if self.track == 'real':
197 197 return 1
198 198 return 0
199 199
200 200 state = ProfileState()
201 201
202 202
203 203 class CodeSite(object):
204 204 cache = {}
205 205
206 206 __slots__ = (r'path', r'lineno', r'function', r'source')
207 207
208 208 def __init__(self, path, lineno, function):
209 209 assert isinstance(path, bytes)
210 210 self.path = path
211 211 self.lineno = lineno
212 212 assert isinstance(function, bytes)
213 213 self.function = function
214 214 self.source = None
215 215
216 216 def __eq__(self, other):
217 217 try:
218 218 return (self.lineno == other.lineno and
219 219 self.path == other.path)
220 220 except:
221 221 return False
222 222
223 223 def __hash__(self):
224 224 return hash((self.lineno, self.path))
225 225
226 226 @classmethod
227 227 def get(cls, path, lineno, function):
228 228 k = (path, lineno)
229 229 try:
230 230 return cls.cache[k]
231 231 except KeyError:
232 232 v = cls(path, lineno, function)
233 233 cls.cache[k] = v
234 234 return v
235 235
236 236 def getsource(self, length):
237 237 if self.source is None:
238 238 lineno = self.lineno - 1
239 fp = None
240 239 try:
241 fp = open(self.path, 'rb')
240 with open(self.path, 'rb') as fp:
242 241 for i, line in enumerate(fp):
243 242 if i == lineno:
244 243 self.source = line.strip()
245 244 break
246 245 except:
247 246 pass
248 finally:
249 if fp:
250 fp.close()
251 247 if self.source is None:
252 248 self.source = ''
253 249
254 250 source = self.source
255 251 if len(source) > length:
256 252 source = source[:(length - 3)] + "..."
257 253 return source
258 254
259 255 def filename(self):
260 256 return os.path.basename(self.path)
261 257
262 258 def skipname(self):
263 259 return r'%s:%s' % (self.filename(), self.function)
264 260
265 261 class Sample(object):
266 262 __slots__ = (r'stack', r'time')
267 263
268 264 def __init__(self, stack, time):
269 265 self.stack = stack
270 266 self.time = time
271 267
272 268 @classmethod
273 269 def from_frame(cls, frame, time):
274 270 stack = []
275 271
276 272 while frame:
277 273 stack.append(CodeSite.get(
278 274 pycompat.sysbytes(frame.f_code.co_filename),
279 275 frame.f_lineno,
280 276 pycompat.sysbytes(frame.f_code.co_name)))
281 277 frame = frame.f_back
282 278
283 279 return Sample(stack, time)
284 280
285 281 ###########################################################################
286 282 ## SIGPROF handler
287 283
288 284 def profile_signal_handler(signum, frame):
289 285 if state.profile_level > 0:
290 286 now = clock()
291 287 state.accumulate_time(now)
292 288
293 289 timestamp = state.accumulated_time[state.timeidx]
294 290 state.samples.append(Sample.from_frame(frame, timestamp))
295 291
296 292 signal.setitimer(signal.ITIMER_PROF,
297 293 state.sample_interval, 0.0)
298 294 state.last_start_time = now
299 295
300 296 stopthread = threading.Event()
301 297 def samplerthread(tid):
302 298 while not stopthread.is_set():
303 299 now = clock()
304 300 state.accumulate_time(now)
305 301
306 302 frame = sys._current_frames()[tid]
307 303
308 304 timestamp = state.accumulated_time[state.timeidx]
309 305 state.samples.append(Sample.from_frame(frame, timestamp))
310 306
311 307 state.last_start_time = now
312 308 time.sleep(state.sample_interval)
313 309
314 310 stopthread.clear()
315 311
316 312 ###########################################################################
317 313 ## Profiling API
318 314
319 315 def is_active():
320 316 return state.profile_level > 0
321 317
322 318 lastmechanism = None
323 319 def start(mechanism='thread', track='cpu'):
324 320 '''Install the profiling signal handler, and start profiling.'''
325 321 state.track = track # note: nesting different mode won't work
326 322 state.profile_level += 1
327 323 if state.profile_level == 1:
328 324 state.last_start_time = clock()
329 325 rpt = state.remaining_prof_time
330 326 state.remaining_prof_time = None
331 327
332 328 global lastmechanism
333 329 lastmechanism = mechanism
334 330
335 331 if mechanism == 'signal':
336 332 signal.signal(signal.SIGPROF, profile_signal_handler)
337 333 signal.setitimer(signal.ITIMER_PROF,
338 334 rpt or state.sample_interval, 0.0)
339 335 elif mechanism == 'thread':
340 336 frame = inspect.currentframe()
341 337 tid = [k for k, f in sys._current_frames().items() if f == frame][0]
342 338 state.thread = threading.Thread(target=samplerthread,
343 339 args=(tid,), name="samplerthread")
344 340 state.thread.start()
345 341
346 342 def stop():
347 343 '''Stop profiling, and uninstall the profiling signal handler.'''
348 344 state.profile_level -= 1
349 345 if state.profile_level == 0:
350 346 if lastmechanism == 'signal':
351 347 rpt = signal.setitimer(signal.ITIMER_PROF, 0.0, 0.0)
352 348 signal.signal(signal.SIGPROF, signal.SIG_IGN)
353 349 state.remaining_prof_time = rpt[0]
354 350 elif lastmechanism == 'thread':
355 351 stopthread.set()
356 352 state.thread.join()
357 353
358 354 state.accumulate_time(clock())
359 355 state.last_start_time = None
360 356 statprofpath = encoding.environ.get('STATPROF_DEST')
361 357 if statprofpath:
362 358 save_data(statprofpath)
363 359
364 360 return state
365 361
366 362 def save_data(path):
367 363 with open(path, 'w+') as file:
368 364 file.write("%f %f\n" % state.accumulated_time)
369 365 for sample in state.samples:
370 366 time = sample.time
371 367 stack = sample.stack
372 368 sites = ['\1'.join([s.path, b'%d' % s.lineno, s.function])
373 369 for s in stack]
374 370 file.write("%d\0%s\n" % (time, '\0'.join(sites)))
375 371
376 372 def load_data(path):
377 373 lines = open(path, 'rb').read().splitlines()
378 374
379 375 state.accumulated_time = [float(value) for value in lines[0].split()]
380 376 state.samples = []
381 377 for line in lines[1:]:
382 378 parts = line.split('\0')
383 379 time = float(parts[0])
384 380 rawsites = parts[1:]
385 381 sites = []
386 382 for rawsite in rawsites:
387 383 siteparts = rawsite.split('\1')
388 384 sites.append(CodeSite.get(siteparts[0], int(siteparts[1]),
389 385 siteparts[2]))
390 386
391 387 state.samples.append(Sample(sites, time))
392 388
393 389
394 390
395 391 def reset(frequency=None):
396 392 '''Clear out the state of the profiler. Do not call while the
397 393 profiler is running.
398 394
399 395 The optional frequency argument specifies the number of samples to
400 396 collect per second.'''
401 397 assert state.profile_level == 0, "Can't reset() while statprof is running"
402 398 CodeSite.cache.clear()
403 399 state.reset(frequency)
404 400
405 401
406 402 @contextmanager
407 403 def profile():
408 404 start()
409 405 try:
410 406 yield
411 407 finally:
412 408 stop()
413 409 display()
414 410
415 411
416 412 ###########################################################################
417 413 ## Reporting API
418 414
419 415 class SiteStats(object):
420 416 def __init__(self, site):
421 417 self.site = site
422 418 self.selfcount = 0
423 419 self.totalcount = 0
424 420
425 421 def addself(self):
426 422 self.selfcount += 1
427 423
428 424 def addtotal(self):
429 425 self.totalcount += 1
430 426
431 427 def selfpercent(self):
432 428 return self.selfcount / len(state.samples) * 100
433 429
434 430 def totalpercent(self):
435 431 return self.totalcount / len(state.samples) * 100
436 432
437 433 def selfseconds(self):
438 434 return self.selfcount * state.seconds_per_sample()
439 435
440 436 def totalseconds(self):
441 437 return self.totalcount * state.seconds_per_sample()
442 438
443 439 @classmethod
444 440 def buildstats(cls, samples):
445 441 stats = {}
446 442
447 443 for sample in samples:
448 444 for i, site in enumerate(sample.stack):
449 445 sitestat = stats.get(site)
450 446 if not sitestat:
451 447 sitestat = SiteStats(site)
452 448 stats[site] = sitestat
453 449
454 450 sitestat.addtotal()
455 451
456 452 if i == 0:
457 453 sitestat.addself()
458 454
459 455 return [s for s in stats.itervalues()]
460 456
461 457 class DisplayFormats:
462 458 ByLine = 0
463 459 ByMethod = 1
464 460 AboutMethod = 2
465 461 Hotpath = 3
466 462 FlameGraph = 4
467 463 Json = 5
468 464 Chrome = 6
469 465
470 466 def display(fp=None, format=3, data=None, **kwargs):
471 467 '''Print statistics, either to stdout or the given file object.'''
472 468 if data is None:
473 469 data = state
474 470
475 471 if fp is None:
476 472 import sys
477 473 fp = sys.stdout
478 474 if len(data.samples) == 0:
479 475 fp.write(b'No samples recorded.\n')
480 476 return
481 477
482 478 if format == DisplayFormats.ByLine:
483 479 display_by_line(data, fp)
484 480 elif format == DisplayFormats.ByMethod:
485 481 display_by_method(data, fp)
486 482 elif format == DisplayFormats.AboutMethod:
487 483 display_about_method(data, fp, **kwargs)
488 484 elif format == DisplayFormats.Hotpath:
489 485 display_hotpath(data, fp, **kwargs)
490 486 elif format == DisplayFormats.FlameGraph:
491 487 write_to_flame(data, fp, **kwargs)
492 488 elif format == DisplayFormats.Json:
493 489 write_to_json(data, fp)
494 490 elif format == DisplayFormats.Chrome:
495 491 write_to_chrome(data, fp, **kwargs)
496 492 else:
497 493 raise Exception("Invalid display format")
498 494
499 495 if format not in (DisplayFormats.Json, DisplayFormats.Chrome):
500 496 fp.write(b'---\n')
501 497 fp.write(b'Sample count: %d\n' % len(data.samples))
502 498 fp.write(b'Total time: %f seconds (%f wall)\n' % data.accumulated_time)
503 499
504 500 def display_by_line(data, fp):
505 501 '''Print the profiler data with each sample line represented
506 502 as one row in a table. Sorted by self-time per line.'''
507 503 stats = SiteStats.buildstats(data.samples)
508 504 stats.sort(reverse=True, key=lambda x: x.selfseconds())
509 505
510 506 fp.write(b'%5.5s %10.10s %7.7s %-8.8s\n' % (
511 507 b'% ', b'cumulative', b'self', b''))
512 508 fp.write(b'%5.5s %9.9s %8.8s %-8.8s\n' % (
513 509 b"time", b"seconds", b"seconds", b"name"))
514 510
515 511 for stat in stats:
516 512 site = stat.site
517 513 sitelabel = '%s:%d:%s' % (site.filename(),
518 514 site.lineno,
519 515 site.function)
520 516 fp.write(b'%6.2f %9.2f %9.2f %s\n' % (
521 517 stat.selfpercent(), stat.totalseconds(),
522 518 stat.selfseconds(), sitelabel))
523 519
524 520 def display_by_method(data, fp):
525 521 '''Print the profiler data with each sample function represented
526 522 as one row in a table. Important lines within that function are
527 523 output as nested rows. Sorted by self-time per line.'''
528 524 fp.write(b'%5.5s %10.10s %7.7s %-8.8s\n' %
529 525 ('% ', 'cumulative', 'self', ''))
530 526 fp.write(b'%5.5s %9.9s %8.8s %-8.8s\n' %
531 527 ("time", "seconds", "seconds", "name"))
532 528
533 529 stats = SiteStats.buildstats(data.samples)
534 530
535 531 grouped = defaultdict(list)
536 532 for stat in stats:
537 533 grouped[stat.site.filename() + b":" + stat.site.function].append(stat)
538 534
539 535 # compute sums for each function
540 536 functiondata = []
541 537 for fname, sitestats in grouped.iteritems():
542 538 total_cum_sec = 0
543 539 total_self_sec = 0
544 540 total_percent = 0
545 541 for stat in sitestats:
546 542 total_cum_sec += stat.totalseconds()
547 543 total_self_sec += stat.selfseconds()
548 544 total_percent += stat.selfpercent()
549 545
550 546 functiondata.append((fname,
551 547 total_cum_sec,
552 548 total_self_sec,
553 549 total_percent,
554 550 sitestats))
555 551
556 552 # sort by total self sec
557 553 functiondata.sort(reverse=True, key=lambda x: x[2])
558 554
559 555 for function in functiondata:
560 556 if function[3] < 0.05:
561 557 continue
562 558 fp.write(b'%6.2f %9.2f %9.2f %s\n' % (
563 559 function[3], # total percent
564 560 function[1], # total cum sec
565 561 function[2], # total self sec
566 562 function[0])) # file:function
567 563
568 564 function[4].sort(reverse=True, key=lambda i: i.selfseconds())
569 565 for stat in function[4]:
570 566 # only show line numbers for significant locations (>1% time spent)
571 567 if stat.selfpercent() > 1:
572 568 source = stat.site.getsource(25)
573 569 if sys.version_info.major >= 3 and not isinstance(source, bytes):
574 570 source = pycompat.bytestr(source)
575 571
576 572 stattuple = (stat.selfpercent(), stat.selfseconds(),
577 573 stat.site.lineno, source)
578 574
579 575 fp.write(b'%33.0f%% %6.2f line %d: %s\n' % stattuple)
580 576
581 577 def display_about_method(data, fp, function=None, **kwargs):
582 578 if function is None:
583 579 raise Exception("Invalid function")
584 580
585 581 filename = None
586 582 if ':' in function:
587 583 filename, function = function.split(':')
588 584
589 585 relevant_samples = 0
590 586 parents = {}
591 587 children = {}
592 588
593 589 for sample in data.samples:
594 590 for i, site in enumerate(sample.stack):
595 591 if site.function == function and (not filename
596 592 or site.filename() == filename):
597 593 relevant_samples += 1
598 594 if i != len(sample.stack) - 1:
599 595 parent = sample.stack[i + 1]
600 596 if parent in parents:
601 597 parents[parent] = parents[parent] + 1
602 598 else:
603 599 parents[parent] = 1
604 600
605 601 if site in children:
606 602 children[site] = children[site] + 1
607 603 else:
608 604 children[site] = 1
609 605
610 606 parents = [(parent, count) for parent, count in parents.iteritems()]
611 607 parents.sort(reverse=True, key=lambda x: x[1])
612 608 for parent, count in parents:
613 609 fp.write(b'%6.2f%% %s:%s line %s: %s\n' %
614 610 (count / relevant_samples * 100,
615 611 pycompat.fsencode(parent.filename()),
616 612 pycompat.sysbytes(parent.function),
617 613 parent.lineno,
618 614 pycompat.sysbytes(parent.getsource(50))))
619 615
620 616 stats = SiteStats.buildstats(data.samples)
621 617 stats = [s for s in stats
622 618 if s.site.function == function and
623 619 (not filename or s.site.filename() == filename)]
624 620
625 621 total_cum_sec = 0
626 622 total_self_sec = 0
627 623 total_self_percent = 0
628 624 total_cum_percent = 0
629 625 for stat in stats:
630 626 total_cum_sec += stat.totalseconds()
631 627 total_self_sec += stat.selfseconds()
632 628 total_self_percent += stat.selfpercent()
633 629 total_cum_percent += stat.totalpercent()
634 630
635 631 fp.write(
636 632 b'\n %s:%s Total: %0.2fs (%0.2f%%) Self: %0.2fs (%0.2f%%)\n\n'
637 633 % (
638 634 pycompat.sysbytes(filename or '___'),
639 635 pycompat.sysbytes(function),
640 636 total_cum_sec,
641 637 total_cum_percent,
642 638 total_self_sec,
643 639 total_self_percent
644 640 ))
645 641
646 642 children = [(child, count) for child, count in children.iteritems()]
647 643 children.sort(reverse=True, key=lambda x: x[1])
648 644 for child, count in children:
649 645 fp.write(b' %6.2f%% line %s: %s\n' %
650 646 (count / relevant_samples * 100, child.lineno,
651 647 pycompat.sysbytes(child.getsource(50))))
652 648
653 649 def display_hotpath(data, fp, limit=0.05, **kwargs):
654 650 class HotNode(object):
655 651 def __init__(self, site):
656 652 self.site = site
657 653 self.count = 0
658 654 self.children = {}
659 655
660 656 def add(self, stack, time):
661 657 self.count += time
662 658 site = stack[0]
663 659 child = self.children.get(site)
664 660 if not child:
665 661 child = HotNode(site)
666 662 self.children[site] = child
667 663
668 664 if len(stack) > 1:
669 665 i = 1
670 666 # Skip boiler plate parts of the stack
671 667 while i < len(stack) and stack[i].skipname() in skips:
672 668 i += 1
673 669 if i < len(stack):
674 670 child.add(stack[i:], time)
675 671
676 672 root = HotNode(None)
677 673 lasttime = data.samples[0].time
678 674 for sample in data.samples:
679 675 root.add(sample.stack[::-1], sample.time - lasttime)
680 676 lasttime = sample.time
681 677 showtime = kwargs.get(r'showtime', True)
682 678
683 679 def _write(node, depth, multiple_siblings):
684 680 site = node.site
685 681 visiblechildren = [c for c in node.children.itervalues()
686 682 if c.count >= (limit * root.count)]
687 683 if site:
688 684 indent = depth * 2 - 1
689 685 filename = ''
690 686 function = ''
691 687 if len(node.children) > 0:
692 688 childsite = list(node.children.itervalues())[0].site
693 689 filename = (childsite.filename() + ':').ljust(15)
694 690 function = childsite.function
695 691
696 692 # lots of string formatting
697 693 listpattern = ''.ljust(indent) +\
698 694 ('\\' if multiple_siblings else '|') +\
699 695 ' %4.1f%%' +\
700 696 (' %5.2fs' % node.count if showtime else '') +\
701 697 ' %s %s'
702 698 liststring = listpattern % (node.count / root.count * 100,
703 699 filename, function)
704 700 codepattern = '%' + ('%d' % (55 - len(liststring))) + 's %d: %s'
705 701 codestring = codepattern % ('line', site.lineno, site.getsource(30))
706 702
707 703 finalstring = liststring + codestring
708 704 childrensamples = sum([c.count for c in node.children.itervalues()])
709 705 # Make frames that performed more than 10% of the operation red
710 706 if node.count - childrensamples > (0.1 * root.count):
711 707 finalstring = '\033[91m' + finalstring + '\033[0m'
712 708 # Make frames that didn't actually perform work dark grey
713 709 elif node.count - childrensamples == 0:
714 710 finalstring = '\033[90m' + finalstring + '\033[0m'
715 711 fp.write(finalstring + b'\n')
716 712
717 713 newdepth = depth
718 714 if len(visiblechildren) > 1 or multiple_siblings:
719 715 newdepth += 1
720 716
721 717 visiblechildren.sort(reverse=True, key=lambda x: x.count)
722 718 for child in visiblechildren:
723 719 _write(child, newdepth, len(visiblechildren) > 1)
724 720
725 721 if root.count > 0:
726 722 _write(root, 0, False)
727 723
728 724 def write_to_flame(data, fp, scriptpath=None, outputfile=None, **kwargs):
729 725 if scriptpath is None:
730 726 scriptpath = encoding.environ['HOME'] + '/flamegraph.pl'
731 727 if not os.path.exists(scriptpath):
732 728 fp.write(b'error: missing %s\n' % scriptpath)
733 729 fp.write(b'get it here: https://github.com/brendangregg/FlameGraph\n')
734 730 return
735 731
736 732 fd, path = pycompat.mkstemp()
737 733
738 734 file = open(path, "w+")
739 735
740 736 lines = {}
741 737 for sample in data.samples:
742 738 sites = [s.function for s in sample.stack]
743 739 sites.reverse()
744 740 line = ';'.join(sites)
745 741 if line in lines:
746 742 lines[line] = lines[line] + 1
747 743 else:
748 744 lines[line] = 1
749 745
750 746 for line, count in lines.iteritems():
751 747 file.write("%s %d\n" % (line, count))
752 748
753 749 file.close()
754 750
755 751 if outputfile is None:
756 752 outputfile = '~/flamegraph.svg'
757 753
758 754 os.system("perl ~/flamegraph.pl %s > %s" % (path, outputfile))
759 755 fp.write(b'Written to %s\n' % outputfile)
760 756
761 757 _pathcache = {}
762 758 def simplifypath(path):
763 759 '''Attempt to make the path to a Python module easier to read by
764 760 removing whatever part of the Python search path it was found
765 761 on.'''
766 762
767 763 if path in _pathcache:
768 764 return _pathcache[path]
769 765 hgpath = pycompat.fsencode(encoding.__file__).rsplit(os.sep, 2)[0]
770 766 for p in [hgpath] + sys.path:
771 767 prefix = p + os.sep
772 768 if path.startswith(prefix):
773 769 path = path[len(prefix):]
774 770 break
775 771 _pathcache[path] = path
776 772 return path
777 773
778 774 def write_to_json(data, fp):
779 775 samples = []
780 776
781 777 for sample in data.samples:
782 778 stack = []
783 779
784 780 for frame in sample.stack:
785 781 stack.append(
786 782 (pycompat.sysstr(frame.path),
787 783 frame.lineno,
788 784 pycompat.sysstr(frame.function)))
789 785
790 786 samples.append((sample.time, stack))
791 787
792 788 data = json.dumps(samples)
793 789 if not isinstance(data, bytes):
794 790 data = data.encode('utf-8')
795 791
796 792 fp.write(data)
797 793
798 794 def write_to_chrome(data, fp, minthreshold=0.005, maxthreshold=0.999):
799 795 samples = []
800 796 laststack = collections.deque()
801 797 lastseen = collections.deque()
802 798
803 799 # The Chrome tracing format allows us to use a compact stack
804 800 # representation to save space. It's fiddly but worth it.
805 801 # We maintain a bijection between stack and ID.
806 802 stack2id = {}
807 803 id2stack = [] # will eventually be rendered
808 804
809 805 def stackid(stack):
810 806 if not stack:
811 807 return
812 808 if stack in stack2id:
813 809 return stack2id[stack]
814 810 parent = stackid(stack[1:])
815 811 myid = len(stack2id)
816 812 stack2id[stack] = myid
817 813 id2stack.append(dict(category=stack[0][0], name='%s %s' % stack[0]))
818 814 if parent is not None:
819 815 id2stack[-1].update(parent=parent)
820 816 return myid
821 817
822 818 # The sampling profiler can sample multiple times without
823 819 # advancing the clock, potentially causing the Chrome trace viewer
824 820 # to render single-pixel columns that we cannot zoom in on. We
825 821 # work around this by pretending that zero-duration samples are a
826 822 # millisecond in length.
827 823
828 824 clamp = 0.001
829 825
830 826 # We provide knobs that by default attempt to filter out stack
831 827 # frames that are too noisy:
832 828 #
833 829 # * A few take almost all execution time. These are usually boring
834 830 # setup functions, giving a stack that is deep but uninformative.
835 831 #
836 832 # * Numerous samples take almost no time, but introduce lots of
837 833 # noisy, oft-deep "spines" into a rendered profile.
838 834
839 835 blacklist = set()
840 836 totaltime = data.samples[-1].time - data.samples[0].time
841 837 minthreshold = totaltime * minthreshold
842 838 maxthreshold = max(totaltime * maxthreshold, clamp)
843 839
844 840 def poplast():
845 841 oldsid = stackid(tuple(laststack))
846 842 oldcat, oldfunc = laststack.popleft()
847 843 oldtime, oldidx = lastseen.popleft()
848 844 duration = sample.time - oldtime
849 845 if minthreshold <= duration <= maxthreshold:
850 846 # ensure no zero-duration events
851 847 sampletime = max(oldtime + clamp, sample.time)
852 848 samples.append(dict(ph='E', name=oldfunc, cat=oldcat, sf=oldsid,
853 849 ts=sampletime*1e6, pid=0))
854 850 else:
855 851 blacklist.add(oldidx)
856 852
857 853 # Much fiddling to synthesize correctly(ish) nested begin/end
858 854 # events given only stack snapshots.
859 855
860 856 for sample in data.samples:
861 857 stack = tuple((('%s:%d' % (simplifypath(frame.path), frame.lineno),
862 858 frame.function) for frame in sample.stack))
863 859 qstack = collections.deque(stack)
864 860 if laststack == qstack:
865 861 continue
866 862 while laststack and qstack and laststack[-1] == qstack[-1]:
867 863 laststack.pop()
868 864 qstack.pop()
869 865 while laststack:
870 866 poplast()
871 867 for f in reversed(qstack):
872 868 lastseen.appendleft((sample.time, len(samples)))
873 869 laststack.appendleft(f)
874 870 path, name = f
875 871 sid = stackid(tuple(laststack))
876 872 samples.append(dict(ph='B', name=name, cat=path, ts=sample.time*1e6,
877 873 sf=sid, pid=0))
878 874 laststack = collections.deque(stack)
879 875 while laststack:
880 876 poplast()
881 877 events = [sample for idx, sample in enumerate(samples)
882 878 if idx not in blacklist]
883 879 frames = collections.OrderedDict((str(k), v)
884 880 for (k,v) in enumerate(id2stack))
885 881 json.dump(dict(traceEvents=events, stackFrames=frames), fp, indent=1)
886 882 fp.write('\n')
887 883
888 884 def printusage():
889 885 print(r"""
890 886 The statprof command line allows you to inspect the last profile's results in
891 887 the following forms:
892 888
893 889 usage:
894 890 hotpath [-l --limit percent]
895 891 Shows a graph of calls with the percent of time each takes.
896 892 Red calls take over 10%% of the total time themselves.
897 893 lines
898 894 Shows the actual sampled lines.
899 895 functions
900 896 Shows the samples grouped by function.
901 897 function [filename:]functionname
902 898 Shows the callers and callees of a particular function.
903 899 flame [-s --script-path] [-o --output-file path]
904 900 Writes out a flamegraph to output-file (defaults to ~/flamegraph.svg)
905 901 Requires that ~/flamegraph.pl exist.
906 902 (Specify alternate script path with --script-path.)""")
907 903
908 904 def main(argv=None):
909 905 if argv is None:
910 906 argv = sys.argv
911 907
912 908 if len(argv) == 1:
913 909 printusage()
914 910 return 0
915 911
916 912 displayargs = {}
917 913
918 914 optstart = 2
919 915 displayargs['function'] = None
920 916 if argv[1] == r'hotpath':
921 917 displayargs['format'] = DisplayFormats.Hotpath
922 918 elif argv[1] == r'lines':
923 919 displayargs['format'] = DisplayFormats.ByLine
924 920 elif argv[1] == r'functions':
925 921 displayargs['format'] = DisplayFormats.ByMethod
926 922 elif argv[1] == r'function':
927 923 displayargs['format'] = DisplayFormats.AboutMethod
928 924 displayargs['function'] = argv[2]
929 925 optstart = 3
930 926 elif argv[1] == r'flame':
931 927 displayargs['format'] = DisplayFormats.FlameGraph
932 928 else:
933 929 printusage()
934 930 return 0
935 931
936 932 # process options
937 933 try:
938 934 opts, args = pycompat.getoptb(sys.argv[optstart:], "hl:f:o:p:",
939 935 ["help", "limit=", "file=", "output-file=", "script-path="])
940 936 except getopt.error as msg:
941 937 print(msg)
942 938 printusage()
943 939 return 2
944 940
945 941 displayargs['limit'] = 0.05
946 942 path = None
947 943 for o, value in opts:
948 944 if o in (r"-l", r"--limit"):
949 945 displayargs['limit'] = float(value)
950 946 elif o in (r"-f", r"--file"):
951 947 path = value
952 948 elif o in (r"-o", r"--output-file"):
953 949 displayargs['outputfile'] = value
954 950 elif o in (r"-p", r"--script-path"):
955 951 displayargs['scriptpath'] = value
956 952 elif o in (r"-h", r"help"):
957 953 printusage()
958 954 return 0
959 955 else:
960 956 assert False, "unhandled option %s" % o
961 957
962 958 if not path:
963 959 print(r'must specify --file to load')
964 960 return 1
965 961
966 962 load_data(path=path)
967 963
968 964 display(**pycompat.strkwargs(displayargs))
969 965
970 966 return 0
971 967
972 968 if __name__ == r"__main__":
973 969 sys.exit(main())
General Comments 0
You need to be logged in to leave comments. Login now