##// END OF EJS Templates
py3: use %d in a few places...
Gregory Szorc -
r40233:113adb1b default
parent child Browse files
Show More
@@ -1,961 +1,961 b''
1 1 #!/usr/bin/env python
2 2 ## statprof.py
3 3 ## Copyright (C) 2012 Bryan O'Sullivan <bos@serpentine.com>
4 4 ## Copyright (C) 2011 Alex Fraser <alex at phatcore dot com>
5 5 ## Copyright (C) 2004,2005 Andy Wingo <wingo at pobox dot com>
6 6 ## Copyright (C) 2001 Rob Browning <rlb at defaultvalue dot org>
7 7
8 8 ## This library is free software; you can redistribute it and/or
9 9 ## modify it under the terms of the GNU Lesser General Public
10 10 ## License as published by the Free Software Foundation; either
11 11 ## version 2.1 of the License, or (at your option) any later version.
12 12 ##
13 13 ## This library is distributed in the hope that it will be useful,
14 14 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
15 15 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 16 ## Lesser General Public License for more details.
17 17 ##
18 18 ## You should have received a copy of the GNU Lesser General Public
19 19 ## License along with this program; if not, contact:
20 20 ##
21 21 ## Free Software Foundation Voice: +1-617-542-5942
22 22 ## 59 Temple Place - Suite 330 Fax: +1-617-542-2652
23 23 ## Boston, MA 02111-1307, USA gnu@gnu.org
24 24
25 25 """
26 26 statprof is intended to be a fairly simple statistical profiler for
27 27 python. It was ported directly from a statistical profiler for guile,
28 28 also named statprof, available from guile-lib [0].
29 29
30 30 [0] http://wingolog.org/software/guile-lib/statprof/
31 31
32 32 To start profiling, call statprof.start():
33 33 >>> start()
34 34
35 35 Then run whatever it is that you want to profile, for example:
36 36 >>> import test.pystone; test.pystone.pystones()
37 37
38 38 Then stop the profiling and print out the results:
39 39 >>> stop()
40 40 >>> display()
41 41 % cumulative self
42 42 time seconds seconds name
43 43 26.72 1.40 0.37 pystone.py:79:Proc0
44 44 13.79 0.56 0.19 pystone.py:133:Proc1
45 45 13.79 0.19 0.19 pystone.py:208:Proc8
46 46 10.34 0.16 0.14 pystone.py:229:Func2
47 47 6.90 0.10 0.10 pystone.py:45:__init__
48 48 4.31 0.16 0.06 pystone.py:53:copy
49 49 ...
50 50
51 51 All of the numerical data is statistically approximate. In the
52 52 following column descriptions, and in all of statprof, "time" refers
53 53 to execution time (both user and system), not wall clock time.
54 54
55 55 % time
56 56 The percent of the time spent inside the procedure itself (not
57 57 counting children).
58 58
59 59 cumulative seconds
60 60 The total number of seconds spent in the procedure, including
61 61 children.
62 62
63 63 self seconds
64 64 The total number of seconds spent in the procedure itself (not
65 65 counting children).
66 66
67 67 name
68 68 The name of the procedure.
69 69
70 70 By default statprof keeps the data collected from previous runs. If you
71 71 want to clear the collected data, call reset():
72 72 >>> reset()
73 73
74 74 reset() can also be used to change the sampling frequency from the
75 75 default of 1000 Hz. For example, to tell statprof to sample 50 times a
76 76 second:
77 77 >>> reset(50)
78 78
79 79 This means that statprof will sample the call stack after every 1/50 of
80 80 a second of user + system time spent running on behalf of the python
81 81 process. When your process is idle (for example, blocking in a read(),
82 82 as is the case at the listener), the clock does not advance. For this
83 83 reason statprof is not currently not suitable for profiling io-bound
84 84 operations.
85 85
86 86 The profiler uses the hash of the code object itself to identify the
87 87 procedures, so it won't confuse different procedures with the same name.
88 88 They will show up as two different rows in the output.
89 89
90 90 Right now the profiler is quite simplistic. I cannot provide
91 91 call-graphs or other higher level information. What you see in the
92 92 table is pretty much all there is. Patches are welcome :-)
93 93
94 94
95 95 Threading
96 96 ---------
97 97
98 98 Because signals only get delivered to the main thread in Python,
99 99 statprof only profiles the main thread. However because the time
100 100 reporting function uses per-process timers, the results can be
101 101 significantly off if other threads' work patterns are not similar to the
102 102 main thread's work patterns.
103 103 """
104 104 # no-check-code
105 105 from __future__ import absolute_import, division, print_function
106 106
107 107 import collections
108 108 import contextlib
109 109 import getopt
110 110 import inspect
111 111 import json
112 112 import os
113 113 import signal
114 114 import sys
115 115 import threading
116 116 import time
117 117
118 118 from . import (
119 119 encoding,
120 120 pycompat,
121 121 )
122 122
123 123 defaultdict = collections.defaultdict
124 124 contextmanager = contextlib.contextmanager
125 125
126 126 __all__ = ['start', 'stop', 'reset', 'display', 'profile']
127 127
128 128 skips = {
129 129 r"util.py:check",
130 130 r"extensions.py:closure",
131 131 r"color.py:colorcmd",
132 132 r"dispatch.py:checkargs",
133 133 r"dispatch.py:<lambda>",
134 134 r"dispatch.py:_runcatch",
135 135 r"dispatch.py:_dispatch",
136 136 r"dispatch.py:_runcommand",
137 137 r"pager.py:pagecmd",
138 138 r"dispatch.py:run",
139 139 r"dispatch.py:dispatch",
140 140 r"dispatch.py:runcommand",
141 141 r"hg.py:<module>",
142 142 r"evolve.py:warnobserrors",
143 143 }
144 144
145 145 ###########################################################################
146 146 ## Utils
147 147
148 148 def clock():
149 149 times = os.times()
150 150 return (times[0] + times[1], times[4])
151 151
152 152
153 153 ###########################################################################
154 154 ## Collection data structures
155 155
156 156 class ProfileState(object):
157 157 def __init__(self, frequency=None):
158 158 self.reset(frequency)
159 159 self.track = 'cpu'
160 160
161 161 def reset(self, frequency=None):
162 162 # total so far
163 163 self.accumulated_time = (0.0, 0.0)
164 164 # start_time when timer is active
165 165 self.last_start_time = None
166 166 # a float
167 167 if frequency:
168 168 self.sample_interval = 1.0 / frequency
169 169 elif not hasattr(self, 'sample_interval'):
170 170 # default to 1000 Hz
171 171 self.sample_interval = 1.0 / 1000.0
172 172 else:
173 173 # leave the frequency as it was
174 174 pass
175 175 self.remaining_prof_time = None
176 176 # for user start/stop nesting
177 177 self.profile_level = 0
178 178
179 179 self.samples = []
180 180
181 181 def accumulate_time(self, stop_time):
182 182 increment = (
183 183 stop_time[0] - self.last_start_time[0],
184 184 stop_time[1] - self.last_start_time[1],
185 185 )
186 186 self.accumulated_time = (
187 187 self.accumulated_time[0] + increment[0],
188 188 self.accumulated_time[1] + increment[1],
189 189 )
190 190
191 191 def seconds_per_sample(self):
192 192 return self.accumulated_time[self.timeidx] / len(self.samples)
193 193
194 194 @property
195 195 def timeidx(self):
196 196 if self.track == 'real':
197 197 return 1
198 198 return 0
199 199
200 200 state = ProfileState()
201 201
202 202
203 203 class CodeSite(object):
204 204 cache = {}
205 205
206 206 __slots__ = (u'path', u'lineno', u'function', u'source')
207 207
208 208 def __init__(self, path, lineno, function):
209 209 self.path = path
210 210 self.lineno = lineno
211 211 self.function = function
212 212 self.source = None
213 213
214 214 def __eq__(self, other):
215 215 try:
216 216 return (self.lineno == other.lineno and
217 217 self.path == other.path)
218 218 except:
219 219 return False
220 220
221 221 def __hash__(self):
222 222 return hash((self.lineno, self.path))
223 223
224 224 @classmethod
225 225 def get(cls, path, lineno, function):
226 226 k = (path, lineno)
227 227 try:
228 228 return cls.cache[k]
229 229 except KeyError:
230 230 v = cls(path, lineno, function)
231 231 cls.cache[k] = v
232 232 return v
233 233
234 234 def getsource(self, length):
235 235 if self.source is None:
236 236 lineno = self.lineno - 1
237 237 fp = None
238 238 try:
239 239 fp = open(self.path)
240 240 for i, line in enumerate(fp):
241 241 if i == lineno:
242 242 self.source = line.strip()
243 243 break
244 244 except:
245 245 pass
246 246 finally:
247 247 if fp:
248 248 fp.close()
249 249 if self.source is None:
250 250 self.source = ''
251 251
252 252 source = self.source
253 253 if len(source) > length:
254 254 source = source[:(length - 3)] + "..."
255 255 return source
256 256
257 257 def filename(self):
258 258 return os.path.basename(self.path)
259 259
260 260 class Sample(object):
261 261 __slots__ = (u'stack', u'time')
262 262
263 263 def __init__(self, stack, time):
264 264 self.stack = stack
265 265 self.time = time
266 266
267 267 @classmethod
268 268 def from_frame(cls, frame, time):
269 269 stack = []
270 270
271 271 while frame:
272 272 stack.append(CodeSite.get(frame.f_code.co_filename, frame.f_lineno,
273 273 frame.f_code.co_name))
274 274 frame = frame.f_back
275 275
276 276 return Sample(stack, time)
277 277
278 278 ###########################################################################
279 279 ## SIGPROF handler
280 280
281 281 def profile_signal_handler(signum, frame):
282 282 if state.profile_level > 0:
283 283 now = clock()
284 284 state.accumulate_time(now)
285 285
286 286 timestamp = state.accumulated_time[state.timeidx]
287 287 state.samples.append(Sample.from_frame(frame, timestamp))
288 288
289 289 signal.setitimer(signal.ITIMER_PROF,
290 290 state.sample_interval, 0.0)
291 291 state.last_start_time = now
292 292
293 293 stopthread = threading.Event()
294 294 def samplerthread(tid):
295 295 while not stopthread.is_set():
296 296 now = clock()
297 297 state.accumulate_time(now)
298 298
299 299 frame = sys._current_frames()[tid]
300 300
301 301 timestamp = state.accumulated_time[state.timeidx]
302 302 state.samples.append(Sample.from_frame(frame, timestamp))
303 303
304 304 state.last_start_time = now
305 305 time.sleep(state.sample_interval)
306 306
307 307 stopthread.clear()
308 308
309 309 ###########################################################################
310 310 ## Profiling API
311 311
312 312 def is_active():
313 313 return state.profile_level > 0
314 314
315 315 lastmechanism = None
316 316 def start(mechanism='thread', track='cpu'):
317 317 '''Install the profiling signal handler, and start profiling.'''
318 318 state.track = track # note: nesting different mode won't work
319 319 state.profile_level += 1
320 320 if state.profile_level == 1:
321 321 state.last_start_time = clock()
322 322 rpt = state.remaining_prof_time
323 323 state.remaining_prof_time = None
324 324
325 325 global lastmechanism
326 326 lastmechanism = mechanism
327 327
328 328 if mechanism == 'signal':
329 329 signal.signal(signal.SIGPROF, profile_signal_handler)
330 330 signal.setitimer(signal.ITIMER_PROF,
331 331 rpt or state.sample_interval, 0.0)
332 332 elif mechanism == 'thread':
333 333 frame = inspect.currentframe()
334 334 tid = [k for k, f in sys._current_frames().items() if f == frame][0]
335 335 state.thread = threading.Thread(target=samplerthread,
336 336 args=(tid,), name="samplerthread")
337 337 state.thread.start()
338 338
339 339 def stop():
340 340 '''Stop profiling, and uninstall the profiling signal handler.'''
341 341 state.profile_level -= 1
342 342 if state.profile_level == 0:
343 343 if lastmechanism == 'signal':
344 344 rpt = signal.setitimer(signal.ITIMER_PROF, 0.0, 0.0)
345 345 signal.signal(signal.SIGPROF, signal.SIG_IGN)
346 346 state.remaining_prof_time = rpt[0]
347 347 elif lastmechanism == 'thread':
348 348 stopthread.set()
349 349 state.thread.join()
350 350
351 351 state.accumulate_time(clock())
352 352 state.last_start_time = None
353 353 statprofpath = encoding.environ.get('STATPROF_DEST')
354 354 if statprofpath:
355 355 save_data(statprofpath)
356 356
357 357 return state
358 358
359 359 def save_data(path):
360 360 with open(path, 'w+') as file:
361 361 file.write("%f %f\n" % state.accumulated_time)
362 362 for sample in state.samples:
363 time = str(sample.time)
363 time = sample.time
364 364 stack = sample.stack
365 365 sites = ['\1'.join([s.path, str(s.lineno), s.function])
366 366 for s in stack]
367 file.write("%s\0%s\n" % (time, '\0'.join(sites)))
367 file.write("%d\0%s\n" % (time, '\0'.join(sites)))
368 368
369 369 def load_data(path):
370 370 lines = open(path, 'r').read().splitlines()
371 371
372 372 state.accumulated_time = [float(value) for value in lines[0].split()]
373 373 state.samples = []
374 374 for line in lines[1:]:
375 375 parts = line.split('\0')
376 376 time = float(parts[0])
377 377 rawsites = parts[1:]
378 378 sites = []
379 379 for rawsite in rawsites:
380 380 siteparts = rawsite.split('\1')
381 381 sites.append(CodeSite.get(siteparts[0], int(siteparts[1]),
382 382 siteparts[2]))
383 383
384 384 state.samples.append(Sample(sites, time))
385 385
386 386
387 387
388 388 def reset(frequency=None):
389 389 '''Clear out the state of the profiler. Do not call while the
390 390 profiler is running.
391 391
392 392 The optional frequency argument specifies the number of samples to
393 393 collect per second.'''
394 394 assert state.profile_level == 0, "Can't reset() while statprof is running"
395 395 CodeSite.cache.clear()
396 396 state.reset(frequency)
397 397
398 398
399 399 @contextmanager
400 400 def profile():
401 401 start()
402 402 try:
403 403 yield
404 404 finally:
405 405 stop()
406 406 display()
407 407
408 408
409 409 ###########################################################################
410 410 ## Reporting API
411 411
412 412 class SiteStats(object):
413 413 def __init__(self, site):
414 414 self.site = site
415 415 self.selfcount = 0
416 416 self.totalcount = 0
417 417
418 418 def addself(self):
419 419 self.selfcount += 1
420 420
421 421 def addtotal(self):
422 422 self.totalcount += 1
423 423
424 424 def selfpercent(self):
425 425 return self.selfcount / len(state.samples) * 100
426 426
427 427 def totalpercent(self):
428 428 return self.totalcount / len(state.samples) * 100
429 429
430 430 def selfseconds(self):
431 431 return self.selfcount * state.seconds_per_sample()
432 432
433 433 def totalseconds(self):
434 434 return self.totalcount * state.seconds_per_sample()
435 435
436 436 @classmethod
437 437 def buildstats(cls, samples):
438 438 stats = {}
439 439
440 440 for sample in samples:
441 441 for i, site in enumerate(sample.stack):
442 442 sitestat = stats.get(site)
443 443 if not sitestat:
444 444 sitestat = SiteStats(site)
445 445 stats[site] = sitestat
446 446
447 447 sitestat.addtotal()
448 448
449 449 if i == 0:
450 450 sitestat.addself()
451 451
452 452 return [s for s in stats.itervalues()]
453 453
454 454 class DisplayFormats:
455 455 ByLine = 0
456 456 ByMethod = 1
457 457 AboutMethod = 2
458 458 Hotpath = 3
459 459 FlameGraph = 4
460 460 Json = 5
461 461 Chrome = 6
462 462
463 463 def display(fp=None, format=3, data=None, **kwargs):
464 464 '''Print statistics, either to stdout or the given file object.'''
465 465 if data is None:
466 466 data = state
467 467
468 468 if fp is None:
469 469 import sys
470 470 fp = sys.stdout
471 471 if len(data.samples) == 0:
472 472 print('No samples recorded.', file=fp)
473 473 return
474 474
475 475 if format == DisplayFormats.ByLine:
476 476 display_by_line(data, fp)
477 477 elif format == DisplayFormats.ByMethod:
478 478 display_by_method(data, fp)
479 479 elif format == DisplayFormats.AboutMethod:
480 480 display_about_method(data, fp, **kwargs)
481 481 elif format == DisplayFormats.Hotpath:
482 482 display_hotpath(data, fp, **kwargs)
483 483 elif format == DisplayFormats.FlameGraph:
484 484 write_to_flame(data, fp, **kwargs)
485 485 elif format == DisplayFormats.Json:
486 486 write_to_json(data, fp)
487 487 elif format == DisplayFormats.Chrome:
488 488 write_to_chrome(data, fp, **kwargs)
489 489 else:
490 490 raise Exception("Invalid display format")
491 491
492 492 if format not in (DisplayFormats.Json, DisplayFormats.Chrome):
493 493 print('---', file=fp)
494 494 print('Sample count: %d' % len(data.samples), file=fp)
495 495 print('Total time: %f seconds (%f wall)' % data.accumulated_time,
496 496 file=fp)
497 497
498 498 def display_by_line(data, fp):
499 499 '''Print the profiler data with each sample line represented
500 500 as one row in a table. Sorted by self-time per line.'''
501 501 stats = SiteStats.buildstats(data.samples)
502 502 stats.sort(reverse=True, key=lambda x: x.selfseconds())
503 503
504 504 print('%5.5s %10.10s %7.7s %-8.8s' %
505 505 ('% ', 'cumulative', 'self', ''), file=fp)
506 506 print('%5.5s %9.9s %8.8s %-8.8s' %
507 507 ("time", "seconds", "seconds", "name"), file=fp)
508 508
509 509 for stat in stats:
510 510 site = stat.site
511 511 sitelabel = '%s:%d:%s' % (site.filename(), site.lineno, site.function)
512 512 print('%6.2f %9.2f %9.2f %s' % (stat.selfpercent(),
513 513 stat.totalseconds(),
514 514 stat.selfseconds(),
515 515 sitelabel),
516 516 file=fp)
517 517
518 518 def display_by_method(data, fp):
519 519 '''Print the profiler data with each sample function represented
520 520 as one row in a table. Important lines within that function are
521 521 output as nested rows. Sorted by self-time per line.'''
522 522 print('%5.5s %10.10s %7.7s %-8.8s' %
523 523 ('% ', 'cumulative', 'self', ''), file=fp)
524 524 print('%5.5s %9.9s %8.8s %-8.8s' %
525 525 ("time", "seconds", "seconds", "name"), file=fp)
526 526
527 527 stats = SiteStats.buildstats(data.samples)
528 528
529 529 grouped = defaultdict(list)
530 530 for stat in stats:
531 531 grouped[stat.site.filename() + ":" + stat.site.function].append(stat)
532 532
533 533 # compute sums for each function
534 534 functiondata = []
535 535 for fname, sitestats in grouped.iteritems():
536 536 total_cum_sec = 0
537 537 total_self_sec = 0
538 538 total_percent = 0
539 539 for stat in sitestats:
540 540 total_cum_sec += stat.totalseconds()
541 541 total_self_sec += stat.selfseconds()
542 542 total_percent += stat.selfpercent()
543 543
544 544 functiondata.append((fname,
545 545 total_cum_sec,
546 546 total_self_sec,
547 547 total_percent,
548 548 sitestats))
549 549
550 550 # sort by total self sec
551 551 functiondata.sort(reverse=True, key=lambda x: x[2])
552 552
553 553 for function in functiondata:
554 554 if function[3] < 0.05:
555 555 continue
556 556 print('%6.2f %9.2f %9.2f %s' % (function[3], # total percent
557 557 function[1], # total cum sec
558 558 function[2], # total self sec
559 559 function[0]), # file:function
560 560 file=fp)
561 561 function[4].sort(reverse=True, key=lambda i: i.selfseconds())
562 562 for stat in function[4]:
563 563 # only show line numbers for significant locations (>1% time spent)
564 564 if stat.selfpercent() > 1:
565 565 source = stat.site.getsource(25)
566 566 stattuple = (stat.selfpercent(), stat.selfseconds(),
567 567 stat.site.lineno, source)
568 568
569 print('%33.0f%% %6.2f line %s: %s' % (stattuple), file=fp)
569 print('%33.0f%% %6.2f line %d: %s' % (stattuple), file=fp)
570 570
571 571 def display_about_method(data, fp, function=None, **kwargs):
572 572 if function is None:
573 573 raise Exception("Invalid function")
574 574
575 575 filename = None
576 576 if ':' in function:
577 577 filename, function = function.split(':')
578 578
579 579 relevant_samples = 0
580 580 parents = {}
581 581 children = {}
582 582
583 583 for sample in data.samples:
584 584 for i, site in enumerate(sample.stack):
585 585 if site.function == function and (not filename
586 586 or site.filename() == filename):
587 587 relevant_samples += 1
588 588 if i != len(sample.stack) - 1:
589 589 parent = sample.stack[i + 1]
590 590 if parent in parents:
591 591 parents[parent] = parents[parent] + 1
592 592 else:
593 593 parents[parent] = 1
594 594
595 595 if site in children:
596 596 children[site] = children[site] + 1
597 597 else:
598 598 children[site] = 1
599 599
600 600 parents = [(parent, count) for parent, count in parents.iteritems()]
601 601 parents.sort(reverse=True, key=lambda x: x[1])
602 602 for parent, count in parents:
603 603 print('%6.2f%% %s:%s line %s: %s' %
604 604 (count / relevant_samples * 100, parent.filename(),
605 605 parent.function, parent.lineno, parent.getsource(50)), file=fp)
606 606
607 607 stats = SiteStats.buildstats(data.samples)
608 608 stats = [s for s in stats
609 609 if s.site.function == function and
610 610 (not filename or s.site.filename() == filename)]
611 611
612 612 total_cum_sec = 0
613 613 total_self_sec = 0
614 614 total_self_percent = 0
615 615 total_cum_percent = 0
616 616 for stat in stats:
617 617 total_cum_sec += stat.totalseconds()
618 618 total_self_sec += stat.selfseconds()
619 619 total_self_percent += stat.selfpercent()
620 620 total_cum_percent += stat.totalpercent()
621 621
622 622 print(
623 623 '\n %s:%s Total: %0.2fs (%0.2f%%) Self: %0.2fs (%0.2f%%)\n' %
624 624 (
625 625 filename or '___',
626 626 function,
627 627 total_cum_sec,
628 628 total_cum_percent,
629 629 total_self_sec,
630 630 total_self_percent
631 631 ), file=fp)
632 632
633 633 children = [(child, count) for child, count in children.iteritems()]
634 634 children.sort(reverse=True, key=lambda x: x[1])
635 635 for child, count in children:
636 636 print(' %6.2f%% line %s: %s' %
637 637 (count / relevant_samples * 100, child.lineno,
638 638 child.getsource(50)), file=fp)
639 639
640 640 def display_hotpath(data, fp, limit=0.05, **kwargs):
641 641 class HotNode(object):
642 642 def __init__(self, site):
643 643 self.site = site
644 644 self.count = 0
645 645 self.children = {}
646 646
647 647 def add(self, stack, time):
648 648 self.count += time
649 649 site = stack[0]
650 650 child = self.children.get(site)
651 651 if not child:
652 652 child = HotNode(site)
653 653 self.children[site] = child
654 654
655 655 if len(stack) > 1:
656 656 i = 1
657 657 # Skip boiler plate parts of the stack
658 658 name = r'%s:%s' % (stack[i].filename(), stack[i].function)
659 659 while i < len(stack) and name in skips:
660 660 i += 1
661 661 if i < len(stack):
662 662 child.add(stack[i:], time)
663 663
664 664 root = HotNode(None)
665 665 lasttime = data.samples[0].time
666 666 for sample in data.samples:
667 667 root.add(sample.stack[::-1], sample.time - lasttime)
668 668 lasttime = sample.time
669 669
670 670 def _write(node, depth, multiple_siblings):
671 671 site = node.site
672 672 visiblechildren = [c for c in node.children.itervalues()
673 673 if c.count >= (limit * root.count)]
674 674 if site:
675 675 indent = depth * 2 - 1
676 676 filename = ''
677 677 function = ''
678 678 if len(node.children) > 0:
679 679 childsite = list(node.children.itervalues())[0].site
680 680 filename = (childsite.filename() + ':').ljust(15)
681 681 function = childsite.function
682 682
683 683 # lots of string formatting
684 684 listpattern = ''.ljust(indent) +\
685 685 ('\\' if multiple_siblings else '|') +\
686 686 ' %4.1f%% %s %s'
687 687 liststring = listpattern % (node.count / root.count * 100,
688 688 filename, function)
689 689 codepattern = '%' + str(55 - len(liststring)) + 's %s: %s'
690 690 codestring = codepattern % ('line', site.lineno, site.getsource(30))
691 691
692 692 finalstring = liststring + codestring
693 693 childrensamples = sum([c.count for c in node.children.itervalues()])
694 694 # Make frames that performed more than 10% of the operation red
695 695 if node.count - childrensamples > (0.1 * root.count):
696 696 finalstring = '\033[91m' + finalstring + '\033[0m'
697 697 # Make frames that didn't actually perform work dark grey
698 698 elif node.count - childrensamples == 0:
699 699 finalstring = '\033[90m' + finalstring + '\033[0m'
700 700 print(finalstring, file=fp)
701 701
702 702 newdepth = depth
703 703 if len(visiblechildren) > 1 or multiple_siblings:
704 704 newdepth += 1
705 705
706 706 visiblechildren.sort(reverse=True, key=lambda x: x.count)
707 707 for child in visiblechildren:
708 708 _write(child, newdepth, len(visiblechildren) > 1)
709 709
710 710 if root.count > 0:
711 711 _write(root, 0, False)
712 712
713 713 def write_to_flame(data, fp, scriptpath=None, outputfile=None, **kwargs):
714 714 if scriptpath is None:
715 715 scriptpath = encoding.environ['HOME'] + '/flamegraph.pl'
716 716 if not os.path.exists(scriptpath):
717 717 print("error: missing %s" % scriptpath, file=fp)
718 718 print("get it here: https://github.com/brendangregg/FlameGraph",
719 719 file=fp)
720 720 return
721 721
722 722 fd, path = pycompat.mkstemp()
723 723
724 724 file = open(path, "w+")
725 725
726 726 lines = {}
727 727 for sample in data.samples:
728 728 sites = [s.function for s in sample.stack]
729 729 sites.reverse()
730 730 line = ';'.join(sites)
731 731 if line in lines:
732 732 lines[line] = lines[line] + 1
733 733 else:
734 734 lines[line] = 1
735 735
736 736 for line, count in lines.iteritems():
737 file.write("%s %s\n" % (line, count))
737 file.write("%s %d\n" % (line, count))
738 738
739 739 file.close()
740 740
741 741 if outputfile is None:
742 742 outputfile = '~/flamegraph.svg'
743 743
744 744 os.system("perl ~/flamegraph.pl %s > %s" % (path, outputfile))
745 745 print("Written to %s" % outputfile, file=fp)
746 746
747 747 _pathcache = {}
748 748 def simplifypath(path):
749 749 '''Attempt to make the path to a Python module easier to read by
750 750 removing whatever part of the Python search path it was found
751 751 on.'''
752 752
753 753 if path in _pathcache:
754 754 return _pathcache[path]
755 755 hgpath = pycompat.fsencode(encoding.__file__).rsplit(os.sep, 2)[0]
756 756 for p in [hgpath] + sys.path:
757 757 prefix = p + os.sep
758 758 if path.startswith(prefix):
759 759 path = path[len(prefix):]
760 760 break
761 761 _pathcache[path] = path
762 762 return path
763 763
764 764 def write_to_json(data, fp):
765 765 samples = []
766 766
767 767 for sample in data.samples:
768 768 stack = []
769 769
770 770 for frame in sample.stack:
771 771 stack.append((frame.path, frame.lineno, frame.function))
772 772
773 773 samples.append((sample.time, stack))
774 774
775 775 data = json.dumps(samples)
776 776 if not isinstance(data, bytes):
777 777 data = data.encode('utf-8')
778 778
779 779 fp.write(data)
780 780
781 781 def write_to_chrome(data, fp, minthreshold=0.005, maxthreshold=0.999):
782 782 samples = []
783 783 laststack = collections.deque()
784 784 lastseen = collections.deque()
785 785
786 786 # The Chrome tracing format allows us to use a compact stack
787 787 # representation to save space. It's fiddly but worth it.
788 788 # We maintain a bijection between stack and ID.
789 789 stack2id = {}
790 790 id2stack = [] # will eventually be rendered
791 791
792 792 def stackid(stack):
793 793 if not stack:
794 794 return
795 795 if stack in stack2id:
796 796 return stack2id[stack]
797 797 parent = stackid(stack[1:])
798 798 myid = len(stack2id)
799 799 stack2id[stack] = myid
800 800 id2stack.append(dict(category=stack[0][0], name='%s %s' % stack[0]))
801 801 if parent is not None:
802 802 id2stack[-1].update(parent=parent)
803 803 return myid
804 804
805 805 def endswith(a, b):
806 806 return list(a)[-len(b):] == list(b)
807 807
808 808 # The sampling profiler can sample multiple times without
809 809 # advancing the clock, potentially causing the Chrome trace viewer
810 810 # to render single-pixel columns that we cannot zoom in on. We
811 811 # work around this by pretending that zero-duration samples are a
812 812 # millisecond in length.
813 813
814 814 clamp = 0.001
815 815
816 816 # We provide knobs that by default attempt to filter out stack
817 817 # frames that are too noisy:
818 818 #
819 819 # * A few take almost all execution time. These are usually boring
820 820 # setup functions, giving a stack that is deep but uninformative.
821 821 #
822 822 # * Numerous samples take almost no time, but introduce lots of
823 823 # noisy, oft-deep "spines" into a rendered profile.
824 824
825 825 blacklist = set()
826 826 totaltime = data.samples[-1].time - data.samples[0].time
827 827 minthreshold = totaltime * minthreshold
828 828 maxthreshold = max(totaltime * maxthreshold, clamp)
829 829
830 830 def poplast():
831 831 oldsid = stackid(tuple(laststack))
832 832 oldcat, oldfunc = laststack.popleft()
833 833 oldtime, oldidx = lastseen.popleft()
834 834 duration = sample.time - oldtime
835 835 if minthreshold <= duration <= maxthreshold:
836 836 # ensure no zero-duration events
837 837 sampletime = max(oldtime + clamp, sample.time)
838 838 samples.append(dict(ph='E', name=oldfunc, cat=oldcat, sf=oldsid,
839 839 ts=sampletime*1e6, pid=0))
840 840 else:
841 841 blacklist.add(oldidx)
842 842
843 843 # Much fiddling to synthesize correctly(ish) nested begin/end
844 844 # events given only stack snapshots.
845 845
846 846 for sample in data.samples:
847 847 tos = sample.stack[0]
848 848 name = tos.function
849 849 path = simplifypath(tos.path)
850 850 stack = tuple((('%s:%d' % (simplifypath(frame.path), frame.lineno),
851 851 frame.function) for frame in sample.stack))
852 852 qstack = collections.deque(stack)
853 853 if laststack == qstack:
854 854 continue
855 855 while laststack and qstack and laststack[-1] == qstack[-1]:
856 856 laststack.pop()
857 857 qstack.pop()
858 858 while laststack:
859 859 poplast()
860 860 for f in reversed(qstack):
861 861 lastseen.appendleft((sample.time, len(samples)))
862 862 laststack.appendleft(f)
863 863 path, name = f
864 864 sid = stackid(tuple(laststack))
865 865 samples.append(dict(ph='B', name=name, cat=path, ts=sample.time*1e6,
866 866 sf=sid, pid=0))
867 867 laststack = collections.deque(stack)
868 868 while laststack:
869 869 poplast()
870 870 events = [s[1] for s in enumerate(samples) if s[0] not in blacklist]
871 871 frames = collections.OrderedDict((str(k), v)
872 872 for (k,v) in enumerate(id2stack))
873 873 json.dump(dict(traceEvents=events, stackFrames=frames), fp, indent=1)
874 874 fp.write('\n')
875 875
876 876 def printusage():
877 877 print("""
878 878 The statprof command line allows you to inspect the last profile's results in
879 879 the following forms:
880 880
881 881 usage:
882 882 hotpath [-l --limit percent]
883 883 Shows a graph of calls with the percent of time each takes.
884 884 Red calls take over 10%% of the total time themselves.
885 885 lines
886 886 Shows the actual sampled lines.
887 887 functions
888 888 Shows the samples grouped by function.
889 889 function [filename:]functionname
890 890 Shows the callers and callees of a particular function.
891 891 flame [-s --script-path] [-o --output-file path]
892 892 Writes out a flamegraph to output-file (defaults to ~/flamegraph.svg)
893 893 Requires that ~/flamegraph.pl exist.
894 894 (Specify alternate script path with --script-path.)""")
895 895
896 896 def main(argv=None):
897 897 if argv is None:
898 898 argv = sys.argv
899 899
900 900 if len(argv) == 1:
901 901 printusage()
902 902 return 0
903 903
904 904 displayargs = {}
905 905
906 906 optstart = 2
907 907 displayargs['function'] = None
908 908 if argv[1] == 'hotpath':
909 909 displayargs['format'] = DisplayFormats.Hotpath
910 910 elif argv[1] == 'lines':
911 911 displayargs['format'] = DisplayFormats.ByLine
912 912 elif argv[1] == 'functions':
913 913 displayargs['format'] = DisplayFormats.ByMethod
914 914 elif argv[1] == 'function':
915 915 displayargs['format'] = DisplayFormats.AboutMethod
916 916 displayargs['function'] = argv[2]
917 917 optstart = 3
918 918 elif argv[1] == 'flame':
919 919 displayargs['format'] = DisplayFormats.FlameGraph
920 920 else:
921 921 printusage()
922 922 return 0
923 923
924 924 # process options
925 925 try:
926 926 opts, args = pycompat.getoptb(sys.argv[optstart:], "hl:f:o:p:",
927 927 ["help", "limit=", "file=", "output-file=", "script-path="])
928 928 except getopt.error as msg:
929 929 print(msg)
930 930 printusage()
931 931 return 2
932 932
933 933 displayargs['limit'] = 0.05
934 934 path = None
935 935 for o, value in opts:
936 936 if o in ("-l", "--limit"):
937 937 displayargs['limit'] = float(value)
938 938 elif o in ("-f", "--file"):
939 939 path = value
940 940 elif o in ("-o", "--output-file"):
941 941 displayargs['outputfile'] = value
942 942 elif o in ("-p", "--script-path"):
943 943 displayargs['scriptpath'] = value
944 944 elif o in ("-h", "help"):
945 945 printusage()
946 946 return 0
947 947 else:
948 948 assert False, "unhandled option %s" % o
949 949
950 950 if not path:
951 951 print('must specify --file to load')
952 952 return 1
953 953
954 954 load_data(path=path)
955 955
956 956 display(**pycompat.strkwargs(displayargs))
957 957
958 958 return 0
959 959
960 960 if __name__ == "__main__":
961 961 sys.exit(main())
General Comments 0
You need to be logged in to leave comments. Login now