##// END OF EJS Templates
py3: replace os.environ with encoding.environ (part 4 of 5)
Pulkit Goyal -
r30637:344e6888 default
parent child Browse files
Show More
@@ -1,58 +1,59 b''
1 1 from __future__ import absolute_import
2 2
3 3 import os
4 4
5 5 from . import (
6 encoding,
6 7 osutil,
7 8 pycompat,
8 9 util,
9 10 win32,
10 11 )
11 12
12 13 try:
13 14 import _winreg as winreg
14 15 winreg.CloseKey
15 16 except ImportError:
16 17 import winreg
17 18
18 19 def systemrcpath():
19 20 '''return default os-specific hgrc search path'''
20 21 rcpath = []
21 22 filename = util.executablepath()
22 23 # Use mercurial.ini found in directory with hg.exe
23 24 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
24 25 rcpath.append(progrc)
25 26 # Use hgrc.d found in directory with hg.exe
26 27 progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
27 28 if os.path.isdir(progrcd):
28 29 for f, kind in osutil.listdir(progrcd):
29 30 if f.endswith('.rc'):
30 31 rcpath.append(os.path.join(progrcd, f))
31 32 # else look for a system rcpath in the registry
32 33 value = util.lookupreg('SOFTWARE\\Mercurial', None,
33 34 winreg.HKEY_LOCAL_MACHINE)
34 35 if not isinstance(value, str) or not value:
35 36 return rcpath
36 37 value = util.localpath(value)
37 38 for p in value.split(pycompat.ospathsep):
38 39 if p.lower().endswith('mercurial.ini'):
39 40 rcpath.append(p)
40 41 elif os.path.isdir(p):
41 42 for f, kind in osutil.listdir(p):
42 43 if f.endswith('.rc'):
43 44 rcpath.append(os.path.join(p, f))
44 45 return rcpath
45 46
46 47 def userrcpath():
47 48 '''return os-specific hgrc search path to the user dir'''
48 49 home = os.path.expanduser('~')
49 50 path = [os.path.join(home, 'mercurial.ini'),
50 51 os.path.join(home, '.hgrc')]
51 userprofile = os.environ.get('USERPROFILE')
52 userprofile = encoding.environ.get('USERPROFILE')
52 53 if userprofile and userprofile != home:
53 54 path.append(os.path.join(userprofile, 'mercurial.ini'))
54 55 path.append(os.path.join(userprofile, '.hgrc'))
55 56 return path
56 57
57 58 def termsize(ui):
58 59 return win32.termsize()
@@ -1,809 +1,810 b''
1 1 #!/usr/bin/env python
2 2 ## statprof.py
3 3 ## Copyright (C) 2012 Bryan O'Sullivan <bos@serpentine.com>
4 4 ## Copyright (C) 2011 Alex Fraser <alex at phatcore dot com>
5 5 ## Copyright (C) 2004,2005 Andy Wingo <wingo at pobox dot com>
6 6 ## Copyright (C) 2001 Rob Browning <rlb at defaultvalue dot org>
7 7
8 8 ## This library is free software; you can redistribute it and/or
9 9 ## modify it under the terms of the GNU Lesser General Public
10 10 ## License as published by the Free Software Foundation; either
11 11 ## version 2.1 of the License, or (at your option) any later version.
12 12 ##
13 13 ## This library is distributed in the hope that it will be useful,
14 14 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
15 15 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 16 ## Lesser General Public License for more details.
17 17 ##
18 18 ## You should have received a copy of the GNU Lesser General Public
19 19 ## License along with this program; if not, contact:
20 20 ##
21 21 ## Free Software Foundation Voice: +1-617-542-5942
22 22 ## 59 Temple Place - Suite 330 Fax: +1-617-542-2652
23 23 ## Boston, MA 02111-1307, USA gnu@gnu.org
24 24
25 25 """
26 26 statprof is intended to be a fairly simple statistical profiler for
27 27 python. It was ported directly from a statistical profiler for guile,
28 28 also named statprof, available from guile-lib [0].
29 29
30 30 [0] http://wingolog.org/software/guile-lib/statprof/
31 31
32 32 To start profiling, call statprof.start():
33 33 >>> start()
34 34
35 35 Then run whatever it is that you want to profile, for example:
36 36 >>> import test.pystone; test.pystone.pystones()
37 37
38 38 Then stop the profiling and print out the results:
39 39 >>> stop()
40 40 >>> display()
41 41 % cumulative self
42 42 time seconds seconds name
43 43 26.72 1.40 0.37 pystone.py:79:Proc0
44 44 13.79 0.56 0.19 pystone.py:133:Proc1
45 45 13.79 0.19 0.19 pystone.py:208:Proc8
46 46 10.34 0.16 0.14 pystone.py:229:Func2
47 47 6.90 0.10 0.10 pystone.py:45:__init__
48 48 4.31 0.16 0.06 pystone.py:53:copy
49 49 ...
50 50
51 51 All of the numerical data is statistically approximate. In the
52 52 following column descriptions, and in all of statprof, "time" refers
53 53 to execution time (both user and system), not wall clock time.
54 54
55 55 % time
56 56 The percent of the time spent inside the procedure itself (not
57 57 counting children).
58 58
59 59 cumulative seconds
60 60 The total number of seconds spent in the procedure, including
61 61 children.
62 62
63 63 self seconds
64 64 The total number of seconds spent in the procedure itself (not
65 65 counting children).
66 66
67 67 name
68 68 The name of the procedure.
69 69
70 70 By default statprof keeps the data collected from previous runs. If you
71 71 want to clear the collected data, call reset():
72 72 >>> reset()
73 73
74 74 reset() can also be used to change the sampling frequency from the
75 75 default of 1000 Hz. For example, to tell statprof to sample 50 times a
76 76 second:
77 77 >>> reset(50)
78 78
79 79 This means that statprof will sample the call stack after every 1/50 of
80 80 a second of user + system time spent running on behalf of the python
81 81 process. When your process is idle (for example, blocking in a read(),
82 82 as is the case at the listener), the clock does not advance. For this
83 83 reason statprof is not currently not suitable for profiling io-bound
84 84 operations.
85 85
86 86 The profiler uses the hash of the code object itself to identify the
87 87 procedures, so it won't confuse different procedures with the same name.
88 88 They will show up as two different rows in the output.
89 89
90 90 Right now the profiler is quite simplistic. I cannot provide
91 91 call-graphs or other higher level information. What you see in the
92 92 table is pretty much all there is. Patches are welcome :-)
93 93
94 94
95 95 Threading
96 96 ---------
97 97
98 98 Because signals only get delivered to the main thread in Python,
99 99 statprof only profiles the main thread. However because the time
100 100 reporting function uses per-process timers, the results can be
101 101 significantly off if other threads' work patterns are not similar to the
102 102 main thread's work patterns.
103 103 """
104 104 # no-check-code
105 105 from __future__ import absolute_import, division, print_function
106 106
107 107 import collections
108 108 import contextlib
109 109 import getopt
110 110 import inspect
111 111 import json
112 112 import os
113 113 import signal
114 114 import sys
115 115 import tempfile
116 116 import threading
117 117 import time
118 118
119 119 from . import (
120 encoding,
120 121 pycompat,
121 122 )
122 123
123 124 defaultdict = collections.defaultdict
124 125 contextmanager = contextlib.contextmanager
125 126
126 127 __all__ = ['start', 'stop', 'reset', 'display', 'profile']
127 128
128 129 skips = set(["util.py:check", "extensions.py:closure",
129 130 "color.py:colorcmd", "dispatch.py:checkargs",
130 131 "dispatch.py:<lambda>", "dispatch.py:_runcatch",
131 132 "dispatch.py:_dispatch", "dispatch.py:_runcommand",
132 133 "pager.py:pagecmd", "dispatch.py:run",
133 134 "dispatch.py:dispatch", "dispatch.py:runcommand",
134 135 "hg.py:<module>", "evolve.py:warnobserrors",
135 136 ])
136 137
137 138 ###########################################################################
138 139 ## Utils
139 140
140 141 def clock():
141 142 times = os.times()
142 143 return times[0] + times[1]
143 144
144 145
145 146 ###########################################################################
146 147 ## Collection data structures
147 148
148 149 class ProfileState(object):
149 150 def __init__(self, frequency=None):
150 151 self.reset(frequency)
151 152
152 153 def reset(self, frequency=None):
153 154 # total so far
154 155 self.accumulated_time = 0.0
155 156 # start_time when timer is active
156 157 self.last_start_time = None
157 158 # a float
158 159 if frequency:
159 160 self.sample_interval = 1.0 / frequency
160 161 elif not hasattr(self, 'sample_interval'):
161 162 # default to 1000 Hz
162 163 self.sample_interval = 1.0 / 1000.0
163 164 else:
164 165 # leave the frequency as it was
165 166 pass
166 167 self.remaining_prof_time = None
167 168 # for user start/stop nesting
168 169 self.profile_level = 0
169 170
170 171 self.samples = []
171 172
172 173 def accumulate_time(self, stop_time):
173 174 self.accumulated_time += stop_time - self.last_start_time
174 175
175 176 def seconds_per_sample(self):
176 177 return self.accumulated_time / len(self.samples)
177 178
178 179 state = ProfileState()
179 180
180 181
181 182 class CodeSite(object):
182 183 cache = {}
183 184
184 185 __slots__ = (u'path', u'lineno', u'function', u'source')
185 186
186 187 def __init__(self, path, lineno, function):
187 188 self.path = path
188 189 self.lineno = lineno
189 190 self.function = function
190 191 self.source = None
191 192
192 193 def __eq__(self, other):
193 194 try:
194 195 return (self.lineno == other.lineno and
195 196 self.path == other.path)
196 197 except:
197 198 return False
198 199
199 200 def __hash__(self):
200 201 return hash((self.lineno, self.path))
201 202
202 203 @classmethod
203 204 def get(cls, path, lineno, function):
204 205 k = (path, lineno)
205 206 try:
206 207 return cls.cache[k]
207 208 except KeyError:
208 209 v = cls(path, lineno, function)
209 210 cls.cache[k] = v
210 211 return v
211 212
212 213 def getsource(self, length):
213 214 if self.source is None:
214 215 lineno = self.lineno - 1
215 216 fp = None
216 217 try:
217 218 fp = open(self.path)
218 219 for i, line in enumerate(fp):
219 220 if i == lineno:
220 221 self.source = line.strip()
221 222 break
222 223 except:
223 224 pass
224 225 finally:
225 226 if fp:
226 227 fp.close()
227 228 if self.source is None:
228 229 self.source = ''
229 230
230 231 source = self.source
231 232 if len(source) > length:
232 233 source = source[:(length - 3)] + "..."
233 234 return source
234 235
235 236 def filename(self):
236 237 return os.path.basename(self.path)
237 238
238 239 class Sample(object):
239 240 __slots__ = (u'stack', u'time')
240 241
241 242 def __init__(self, stack, time):
242 243 self.stack = stack
243 244 self.time = time
244 245
245 246 @classmethod
246 247 def from_frame(cls, frame, time):
247 248 stack = []
248 249
249 250 while frame:
250 251 stack.append(CodeSite.get(frame.f_code.co_filename, frame.f_lineno,
251 252 frame.f_code.co_name))
252 253 frame = frame.f_back
253 254
254 255 return Sample(stack, time)
255 256
256 257 ###########################################################################
257 258 ## SIGPROF handler
258 259
259 260 def profile_signal_handler(signum, frame):
260 261 if state.profile_level > 0:
261 262 now = clock()
262 263 state.accumulate_time(now)
263 264
264 265 state.samples.append(Sample.from_frame(frame, state.accumulated_time))
265 266
266 267 signal.setitimer(signal.ITIMER_PROF,
267 268 state.sample_interval, 0.0)
268 269 state.last_start_time = now
269 270
270 271 stopthread = threading.Event()
271 272 def samplerthread(tid):
272 273 while not stopthread.is_set():
273 274 now = clock()
274 275 state.accumulate_time(now)
275 276
276 277 frame = sys._current_frames()[tid]
277 278 state.samples.append(Sample.from_frame(frame, state.accumulated_time))
278 279
279 280 state.last_start_time = now
280 281 time.sleep(state.sample_interval)
281 282
282 283 stopthread.clear()
283 284
284 285 ###########################################################################
285 286 ## Profiling API
286 287
287 288 def is_active():
288 289 return state.profile_level > 0
289 290
290 291 lastmechanism = None
291 292 def start(mechanism='thread'):
292 293 '''Install the profiling signal handler, and start profiling.'''
293 294 state.profile_level += 1
294 295 if state.profile_level == 1:
295 296 state.last_start_time = clock()
296 297 rpt = state.remaining_prof_time
297 298 state.remaining_prof_time = None
298 299
299 300 global lastmechanism
300 301 lastmechanism = mechanism
301 302
302 303 if mechanism == 'signal':
303 304 signal.signal(signal.SIGPROF, profile_signal_handler)
304 305 signal.setitimer(signal.ITIMER_PROF,
305 306 rpt or state.sample_interval, 0.0)
306 307 elif mechanism == 'thread':
307 308 frame = inspect.currentframe()
308 309 tid = [k for k, f in sys._current_frames().items() if f == frame][0]
309 310 state.thread = threading.Thread(target=samplerthread,
310 311 args=(tid,), name="samplerthread")
311 312 state.thread.start()
312 313
313 314 def stop():
314 315 '''Stop profiling, and uninstall the profiling signal handler.'''
315 316 state.profile_level -= 1
316 317 if state.profile_level == 0:
317 318 if lastmechanism == 'signal':
318 319 rpt = signal.setitimer(signal.ITIMER_PROF, 0.0, 0.0)
319 320 signal.signal(signal.SIGPROF, signal.SIG_IGN)
320 321 state.remaining_prof_time = rpt[0]
321 322 elif lastmechanism == 'thread':
322 323 stopthread.set()
323 324 state.thread.join()
324 325
325 326 state.accumulate_time(clock())
326 327 state.last_start_time = None
327 statprofpath = os.environ.get('STATPROF_DEST')
328 statprofpath = encoding.environ.get('STATPROF_DEST')
328 329 if statprofpath:
329 330 save_data(statprofpath)
330 331
331 332 return state
332 333
333 334 def save_data(path):
334 335 with open(path, 'w+') as file:
335 336 file.write(str(state.accumulated_time) + '\n')
336 337 for sample in state.samples:
337 338 time = str(sample.time)
338 339 stack = sample.stack
339 340 sites = ['\1'.join([s.path, str(s.lineno), s.function])
340 341 for s in stack]
341 342 file.write(time + '\0' + '\0'.join(sites) + '\n')
342 343
343 344 def load_data(path):
344 345 lines = open(path, 'r').read().splitlines()
345 346
346 347 state.accumulated_time = float(lines[0])
347 348 state.samples = []
348 349 for line in lines[1:]:
349 350 parts = line.split('\0')
350 351 time = float(parts[0])
351 352 rawsites = parts[1:]
352 353 sites = []
353 354 for rawsite in rawsites:
354 355 siteparts = rawsite.split('\1')
355 356 sites.append(CodeSite.get(siteparts[0], int(siteparts[1]),
356 357 siteparts[2]))
357 358
358 359 state.samples.append(Sample(sites, time))
359 360
360 361
361 362
362 363 def reset(frequency=None):
363 364 '''Clear out the state of the profiler. Do not call while the
364 365 profiler is running.
365 366
366 367 The optional frequency argument specifies the number of samples to
367 368 collect per second.'''
368 369 assert state.profile_level == 0, "Can't reset() while statprof is running"
369 370 CodeSite.cache.clear()
370 371 state.reset(frequency)
371 372
372 373
373 374 @contextmanager
374 375 def profile():
375 376 start()
376 377 try:
377 378 yield
378 379 finally:
379 380 stop()
380 381 display()
381 382
382 383
383 384 ###########################################################################
384 385 ## Reporting API
385 386
386 387 class SiteStats(object):
387 388 def __init__(self, site):
388 389 self.site = site
389 390 self.selfcount = 0
390 391 self.totalcount = 0
391 392
392 393 def addself(self):
393 394 self.selfcount += 1
394 395
395 396 def addtotal(self):
396 397 self.totalcount += 1
397 398
398 399 def selfpercent(self):
399 400 return self.selfcount / len(state.samples) * 100
400 401
401 402 def totalpercent(self):
402 403 return self.totalcount / len(state.samples) * 100
403 404
404 405 def selfseconds(self):
405 406 return self.selfcount * state.seconds_per_sample()
406 407
407 408 def totalseconds(self):
408 409 return self.totalcount * state.seconds_per_sample()
409 410
410 411 @classmethod
411 412 def buildstats(cls, samples):
412 413 stats = {}
413 414
414 415 for sample in samples:
415 416 for i, site in enumerate(sample.stack):
416 417 sitestat = stats.get(site)
417 418 if not sitestat:
418 419 sitestat = SiteStats(site)
419 420 stats[site] = sitestat
420 421
421 422 sitestat.addtotal()
422 423
423 424 if i == 0:
424 425 sitestat.addself()
425 426
426 427 return [s for s in stats.itervalues()]
427 428
428 429 class DisplayFormats:
429 430 ByLine = 0
430 431 ByMethod = 1
431 432 AboutMethod = 2
432 433 Hotpath = 3
433 434 FlameGraph = 4
434 435 Json = 5
435 436
436 437 def display(fp=None, format=3, data=None, **kwargs):
437 438 '''Print statistics, either to stdout or the given file object.'''
438 439 data = data or state
439 440
440 441 if fp is None:
441 442 import sys
442 443 fp = sys.stdout
443 444 if len(data.samples) == 0:
444 445 print('No samples recorded.', file=fp)
445 446 return
446 447
447 448 if format == DisplayFormats.ByLine:
448 449 display_by_line(data, fp)
449 450 elif format == DisplayFormats.ByMethod:
450 451 display_by_method(data, fp)
451 452 elif format == DisplayFormats.AboutMethod:
452 453 display_about_method(data, fp, **kwargs)
453 454 elif format == DisplayFormats.Hotpath:
454 455 display_hotpath(data, fp, **kwargs)
455 456 elif format == DisplayFormats.FlameGraph:
456 457 write_to_flame(data, fp, **kwargs)
457 458 elif format == DisplayFormats.Json:
458 459 write_to_json(data, fp)
459 460 else:
460 461 raise Exception("Invalid display format")
461 462
462 463 if format != DisplayFormats.Json:
463 464 print('---', file=fp)
464 465 print('Sample count: %d' % len(data.samples), file=fp)
465 466 print('Total time: %f seconds' % data.accumulated_time, file=fp)
466 467
467 468 def display_by_line(data, fp):
468 469 '''Print the profiler data with each sample line represented
469 470 as one row in a table. Sorted by self-time per line.'''
470 471 stats = SiteStats.buildstats(data.samples)
471 472 stats.sort(reverse=True, key=lambda x: x.selfseconds())
472 473
473 474 print('%5.5s %10.10s %7.7s %-8.8s' %
474 475 ('% ', 'cumulative', 'self', ''), file=fp)
475 476 print('%5.5s %9.9s %8.8s %-8.8s' %
476 477 ("time", "seconds", "seconds", "name"), file=fp)
477 478
478 479 for stat in stats:
479 480 site = stat.site
480 481 sitelabel = '%s:%d:%s' % (site.filename(), site.lineno, site.function)
481 482 print('%6.2f %9.2f %9.2f %s' % (stat.selfpercent(),
482 483 stat.totalseconds(),
483 484 stat.selfseconds(),
484 485 sitelabel),
485 486 file=fp)
486 487
487 488 def display_by_method(data, fp):
488 489 '''Print the profiler data with each sample function represented
489 490 as one row in a table. Important lines within that function are
490 491 output as nested rows. Sorted by self-time per line.'''
491 492 print('%5.5s %10.10s %7.7s %-8.8s' %
492 493 ('% ', 'cumulative', 'self', ''), file=fp)
493 494 print('%5.5s %9.9s %8.8s %-8.8s' %
494 495 ("time", "seconds", "seconds", "name"), file=fp)
495 496
496 497 stats = SiteStats.buildstats(data.samples)
497 498
498 499 grouped = defaultdict(list)
499 500 for stat in stats:
500 501 grouped[stat.site.filename() + ":" + stat.site.function].append(stat)
501 502
502 503 # compute sums for each function
503 504 functiondata = []
504 505 for fname, sitestats in grouped.iteritems():
505 506 total_cum_sec = 0
506 507 total_self_sec = 0
507 508 total_percent = 0
508 509 for stat in sitestats:
509 510 total_cum_sec += stat.totalseconds()
510 511 total_self_sec += stat.selfseconds()
511 512 total_percent += stat.selfpercent()
512 513
513 514 functiondata.append((fname,
514 515 total_cum_sec,
515 516 total_self_sec,
516 517 total_percent,
517 518 sitestats))
518 519
519 520 # sort by total self sec
520 521 functiondata.sort(reverse=True, key=lambda x: x[2])
521 522
522 523 for function in functiondata:
523 524 if function[3] < 0.05:
524 525 continue
525 526 print('%6.2f %9.2f %9.2f %s' % (function[3], # total percent
526 527 function[1], # total cum sec
527 528 function[2], # total self sec
528 529 function[0]), # file:function
529 530 file=fp)
530 531 function[4].sort(reverse=True, key=lambda i: i.selfseconds())
531 532 for stat in function[4]:
532 533 # only show line numbers for significant locations (>1% time spent)
533 534 if stat.selfpercent() > 1:
534 535 source = stat.site.getsource(25)
535 536 stattuple = (stat.selfpercent(), stat.selfseconds(),
536 537 stat.site.lineno, source)
537 538
538 539 print('%33.0f%% %6.2f line %s: %s' % (stattuple), file=fp)
539 540
540 541 def display_about_method(data, fp, function=None, **kwargs):
541 542 if function is None:
542 543 raise Exception("Invalid function")
543 544
544 545 filename = None
545 546 if ':' in function:
546 547 filename, function = function.split(':')
547 548
548 549 relevant_samples = 0
549 550 parents = {}
550 551 children = {}
551 552
552 553 for sample in data.samples:
553 554 for i, site in enumerate(sample.stack):
554 555 if site.function == function and (not filename
555 556 or site.filename() == filename):
556 557 relevant_samples += 1
557 558 if i != len(sample.stack) - 1:
558 559 parent = sample.stack[i + 1]
559 560 if parent in parents:
560 561 parents[parent] = parents[parent] + 1
561 562 else:
562 563 parents[parent] = 1
563 564
564 565 if site in children:
565 566 children[site] = children[site] + 1
566 567 else:
567 568 children[site] = 1
568 569
569 570 parents = [(parent, count) for parent, count in parents.iteritems()]
570 571 parents.sort(reverse=True, key=lambda x: x[1])
571 572 for parent, count in parents:
572 573 print('%6.2f%% %s:%s line %s: %s' %
573 574 (count / relevant_samples * 100, parent.filename(),
574 575 parent.function, parent.lineno, parent.getsource(50)), file=fp)
575 576
576 577 stats = SiteStats.buildstats(data.samples)
577 578 stats = [s for s in stats
578 579 if s.site.function == function and
579 580 (not filename or s.site.filename() == filename)]
580 581
581 582 total_cum_sec = 0
582 583 total_self_sec = 0
583 584 total_self_percent = 0
584 585 total_cum_percent = 0
585 586 for stat in stats:
586 587 total_cum_sec += stat.totalseconds()
587 588 total_self_sec += stat.selfseconds()
588 589 total_self_percent += stat.selfpercent()
589 590 total_cum_percent += stat.totalpercent()
590 591
591 592 print(
592 593 '\n %s:%s Total: %0.2fs (%0.2f%%) Self: %0.2fs (%0.2f%%)\n' %
593 594 (
594 595 filename or '___',
595 596 function,
596 597 total_cum_sec,
597 598 total_cum_percent,
598 599 total_self_sec,
599 600 total_self_percent
600 601 ), file=fp)
601 602
602 603 children = [(child, count) for child, count in children.iteritems()]
603 604 children.sort(reverse=True, key=lambda x: x[1])
604 605 for child, count in children:
605 606 print(' %6.2f%% line %s: %s' %
606 607 (count / relevant_samples * 100, child.lineno,
607 608 child.getsource(50)), file=fp)
608 609
609 610 def display_hotpath(data, fp, limit=0.05, **kwargs):
610 611 class HotNode(object):
611 612 def __init__(self, site):
612 613 self.site = site
613 614 self.count = 0
614 615 self.children = {}
615 616
616 617 def add(self, stack, time):
617 618 self.count += time
618 619 site = stack[0]
619 620 child = self.children.get(site)
620 621 if not child:
621 622 child = HotNode(site)
622 623 self.children[site] = child
623 624
624 625 if len(stack) > 1:
625 626 i = 1
626 627 # Skip boiler plate parts of the stack
627 628 while i < len(stack) and '%s:%s' % (stack[i].filename(), stack[i].function) in skips:
628 629 i += 1
629 630 if i < len(stack):
630 631 child.add(stack[i:], time)
631 632
632 633 root = HotNode(None)
633 634 lasttime = data.samples[0].time
634 635 for sample in data.samples:
635 636 root.add(sample.stack[::-1], sample.time - lasttime)
636 637 lasttime = sample.time
637 638
638 639 def _write(node, depth, multiple_siblings):
639 640 site = node.site
640 641 visiblechildren = [c for c in node.children.itervalues()
641 642 if c.count >= (limit * root.count)]
642 643 if site:
643 644 indent = depth * 2 - 1
644 645 filename = ''
645 646 function = ''
646 647 if len(node.children) > 0:
647 648 childsite = list(node.children.itervalues())[0].site
648 649 filename = (childsite.filename() + ':').ljust(15)
649 650 function = childsite.function
650 651
651 652 # lots of string formatting
652 653 listpattern = ''.ljust(indent) +\
653 654 ('\\' if multiple_siblings else '|') +\
654 655 ' %4.1f%% %s %s'
655 656 liststring = listpattern % (node.count / root.count * 100,
656 657 filename, function)
657 658 codepattern = '%' + str(55 - len(liststring)) + 's %s: %s'
658 659 codestring = codepattern % ('line', site.lineno, site.getsource(30))
659 660
660 661 finalstring = liststring + codestring
661 662 childrensamples = sum([c.count for c in node.children.itervalues()])
662 663 # Make frames that performed more than 10% of the operation red
663 664 if node.count - childrensamples > (0.1 * root.count):
664 665 finalstring = '\033[91m' + finalstring + '\033[0m'
665 666 # Make frames that didn't actually perform work dark grey
666 667 elif node.count - childrensamples == 0:
667 668 finalstring = '\033[90m' + finalstring + '\033[0m'
668 669 print(finalstring, file=fp)
669 670
670 671 newdepth = depth
671 672 if len(visiblechildren) > 1 or multiple_siblings:
672 673 newdepth += 1
673 674
674 675 visiblechildren.sort(reverse=True, key=lambda x: x.count)
675 676 for child in visiblechildren:
676 677 _write(child, newdepth, len(visiblechildren) > 1)
677 678
678 679 if root.count > 0:
679 680 _write(root, 0, False)
680 681
681 682 def write_to_flame(data, fp, scriptpath=None, outputfile=None, **kwargs):
682 683 if scriptpath is None:
683 scriptpath = os.environ['HOME'] + '/flamegraph.pl'
684 scriptpath = encoding.environ['HOME'] + '/flamegraph.pl'
684 685 if not os.path.exists(scriptpath):
685 686 print("error: missing %s" % scriptpath, file=fp)
686 687 print("get it here: https://github.com/brendangregg/FlameGraph",
687 688 file=fp)
688 689 return
689 690
690 691 fd, path = tempfile.mkstemp()
691 692
692 693 file = open(path, "w+")
693 694
694 695 lines = {}
695 696 for sample in data.samples:
696 697 sites = [s.function for s in sample.stack]
697 698 sites.reverse()
698 699 line = ';'.join(sites)
699 700 if line in lines:
700 701 lines[line] = lines[line] + 1
701 702 else:
702 703 lines[line] = 1
703 704
704 705 for line, count in lines.iteritems():
705 706 file.write("%s %s\n" % (line, count))
706 707
707 708 file.close()
708 709
709 710 if outputfile is None:
710 711 outputfile = '~/flamegraph.svg'
711 712
712 713 os.system("perl ~/flamegraph.pl %s > %s" % (path, outputfile))
713 714 print("Written to %s" % outputfile, file=fp)
714 715
715 716 def write_to_json(data, fp):
716 717 samples = []
717 718
718 719 for sample in data.samples:
719 720 stack = []
720 721
721 722 for frame in sample.stack:
722 723 stack.append((frame.path, frame.lineno, frame.function))
723 724
724 725 samples.append((sample.time, stack))
725 726
726 727 print(json.dumps(samples), file=fp)
727 728
728 729 def printusage():
729 730 print("""
730 731 The statprof command line allows you to inspect the last profile's results in
731 732 the following forms:
732 733
733 734 usage:
734 735 hotpath [-l --limit percent]
735 736 Shows a graph of calls with the percent of time each takes.
736 737 Red calls take over 10%% of the total time themselves.
737 738 lines
738 739 Shows the actual sampled lines.
739 740 functions
740 741 Shows the samples grouped by function.
741 742 function [filename:]functionname
742 743 Shows the callers and callees of a particular function.
743 744 flame [-s --script-path] [-o --output-file path]
744 745 Writes out a flamegraph to output-file (defaults to ~/flamegraph.svg)
745 746 Requires that ~/flamegraph.pl exist.
746 747 (Specify alternate script path with --script-path.)""")
747 748
748 749 def main(argv=None):
749 750 if argv is None:
750 751 argv = sys.argv
751 752
752 753 if len(argv) == 1:
753 754 printusage()
754 755 return 0
755 756
756 757 displayargs = {}
757 758
758 759 optstart = 2
759 760 displayargs['function'] = None
760 761 if argv[1] == 'hotpath':
761 762 displayargs['format'] = DisplayFormats.Hotpath
762 763 elif argv[1] == 'lines':
763 764 displayargs['format'] = DisplayFormats.ByLine
764 765 elif argv[1] == 'functions':
765 766 displayargs['format'] = DisplayFormats.ByMethod
766 767 elif argv[1] == 'function':
767 768 displayargs['format'] = DisplayFormats.AboutMethod
768 769 displayargs['function'] = argv[2]
769 770 optstart = 3
770 771 elif argv[1] == 'flame':
771 772 displayargs['format'] = DisplayFormats.FlameGraph
772 773 else:
773 774 printusage()
774 775 return 0
775 776
776 777 # process options
777 778 try:
778 779 opts, args = pycompat.getoptb(sys.argv[optstart:], "hl:f:o:p:",
779 780 ["help", "limit=", "file=", "output-file=", "script-path="])
780 781 except getopt.error as msg:
781 782 print(msg)
782 783 printusage()
783 784 return 2
784 785
785 786 displayargs['limit'] = 0.05
786 787 path = None
787 788 for o, value in opts:
788 789 if o in ("-l", "--limit"):
789 790 displayargs['limit'] = float(value)
790 791 elif o in ("-f", "--file"):
791 792 path = value
792 793 elif o in ("-o", "--output-file"):
793 794 displayargs['outputfile'] = value
794 795 elif o in ("-p", "--script-path"):
795 796 displayargs['scriptpath'] = value
796 797 elif o in ("-h", "help"):
797 798 printusage()
798 799 return 0
799 800 else:
800 801 assert False, "unhandled option %s" % o
801 802
802 803 load_data(path=path)
803 804
804 805 display(**displayargs)
805 806
806 807 return 0
807 808
808 809 if __name__ == "__main__":
809 810 sys.exit(main())
@@ -1,1407 +1,1407 b''
1 1 # ui.py - user interface bits for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import errno
12 12 import getpass
13 13 import inspect
14 14 import os
15 15 import re
16 16 import socket
17 17 import sys
18 18 import tempfile
19 19 import traceback
20 20
21 21 from .i18n import _
22 22 from .node import hex
23 23
24 24 from . import (
25 25 config,
26 26 encoding,
27 27 error,
28 28 formatter,
29 29 progress,
30 30 pycompat,
31 31 scmutil,
32 32 util,
33 33 )
34 34
35 35 urlreq = util.urlreq
36 36
37 37 samplehgrcs = {
38 38 'user':
39 39 """# example user config (see 'hg help config' for more info)
40 40 [ui]
41 41 # name and email, e.g.
42 42 # username = Jane Doe <jdoe@example.com>
43 43 username =
44 44
45 45 [extensions]
46 46 # uncomment these lines to enable some popular extensions
47 47 # (see 'hg help extensions' for more info)
48 48 #
49 49 # pager =
50 50 # color =""",
51 51
52 52 'cloned':
53 53 """# example repository config (see 'hg help config' for more info)
54 54 [paths]
55 55 default = %s
56 56
57 57 # path aliases to other clones of this repo in URLs or filesystem paths
58 58 # (see 'hg help config.paths' for more info)
59 59 #
60 60 # default-push = ssh://jdoe@example.net/hg/jdoes-fork
61 61 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
62 62 # my-clone = /home/jdoe/jdoes-clone
63 63
64 64 [ui]
65 65 # name and email (local to this repository, optional), e.g.
66 66 # username = Jane Doe <jdoe@example.com>
67 67 """,
68 68
69 69 'local':
70 70 """# example repository config (see 'hg help config' for more info)
71 71 [paths]
72 72 # path aliases to other clones of this repo in URLs or filesystem paths
73 73 # (see 'hg help config.paths' for more info)
74 74 #
75 75 # default = http://example.com/hg/example-repo
76 76 # default-push = ssh://jdoe@example.net/hg/jdoes-fork
77 77 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
78 78 # my-clone = /home/jdoe/jdoes-clone
79 79
80 80 [ui]
81 81 # name and email (local to this repository, optional), e.g.
82 82 # username = Jane Doe <jdoe@example.com>
83 83 """,
84 84
85 85 'global':
86 86 """# example system-wide hg config (see 'hg help config' for more info)
87 87
88 88 [extensions]
89 89 # uncomment these lines to enable some popular extensions
90 90 # (see 'hg help extensions' for more info)
91 91 #
92 92 # blackbox =
93 93 # color =
94 94 # pager =""",
95 95 }
96 96
97 97 class ui(object):
98 98 def __init__(self, src=None):
99 99 """Create a fresh new ui object if no src given
100 100
101 101 Use uimod.ui.load() to create a ui which knows global and user configs.
102 102 In most cases, you should use ui.copy() to create a copy of an existing
103 103 ui object.
104 104 """
105 105 # _buffers: used for temporary capture of output
106 106 self._buffers = []
107 107 # 3-tuple describing how each buffer in the stack behaves.
108 108 # Values are (capture stderr, capture subprocesses, apply labels).
109 109 self._bufferstates = []
110 110 # When a buffer is active, defines whether we are expanding labels.
111 111 # This exists to prevent an extra list lookup.
112 112 self._bufferapplylabels = None
113 113 self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
114 114 self._reportuntrusted = True
115 115 self._ocfg = config.config() # overlay
116 116 self._tcfg = config.config() # trusted
117 117 self._ucfg = config.config() # untrusted
118 118 self._trustusers = set()
119 119 self._trustgroups = set()
120 120 self.callhooks = True
121 121 # Insecure server connections requested.
122 122 self.insecureconnections = False
123 123
124 124 if src:
125 125 self.fout = src.fout
126 126 self.ferr = src.ferr
127 127 self.fin = src.fin
128 128
129 129 self._tcfg = src._tcfg.copy()
130 130 self._ucfg = src._ucfg.copy()
131 131 self._ocfg = src._ocfg.copy()
132 132 self._trustusers = src._trustusers.copy()
133 133 self._trustgroups = src._trustgroups.copy()
134 134 self.environ = src.environ
135 135 self.callhooks = src.callhooks
136 136 self.insecureconnections = src.insecureconnections
137 137 self.fixconfig()
138 138
139 139 self.httppasswordmgrdb = src.httppasswordmgrdb
140 140 else:
141 141 self.fout = util.stdout
142 142 self.ferr = util.stderr
143 143 self.fin = util.stdin
144 144
145 145 # shared read-only environment
146 self.environ = os.environ
146 self.environ = encoding.environ
147 147
148 148 self.httppasswordmgrdb = urlreq.httppasswordmgrwithdefaultrealm()
149 149
150 150 @classmethod
151 151 def load(cls):
152 152 """Create a ui and load global and user configs"""
153 153 u = cls()
154 154 # we always trust global config files
155 155 for f in scmutil.rcpath():
156 156 u.readconfig(f, trust=True)
157 157 return u
158 158
159 159 def copy(self):
160 160 return self.__class__(self)
161 161
162 162 def resetstate(self):
163 163 """Clear internal state that shouldn't persist across commands"""
164 164 if self._progbar:
165 165 self._progbar.resetstate() # reset last-print time of progress bar
166 166 self.httppasswordmgrdb = urlreq.httppasswordmgrwithdefaultrealm()
167 167
168 168 def formatter(self, topic, opts):
169 169 return formatter.formatter(self, topic, opts)
170 170
171 171 def _trusted(self, fp, f):
172 172 st = util.fstat(fp)
173 173 if util.isowner(st):
174 174 return True
175 175
176 176 tusers, tgroups = self._trustusers, self._trustgroups
177 177 if '*' in tusers or '*' in tgroups:
178 178 return True
179 179
180 180 user = util.username(st.st_uid)
181 181 group = util.groupname(st.st_gid)
182 182 if user in tusers or group in tgroups or user == util.username():
183 183 return True
184 184
185 185 if self._reportuntrusted:
186 186 self.warn(_('not trusting file %s from untrusted '
187 187 'user %s, group %s\n') % (f, user, group))
188 188 return False
189 189
190 190 def readconfig(self, filename, root=None, trust=False,
191 191 sections=None, remap=None):
192 192 try:
193 193 fp = open(filename, u'rb')
194 194 except IOError:
195 195 if not sections: # ignore unless we were looking for something
196 196 return
197 197 raise
198 198
199 199 cfg = config.config()
200 200 trusted = sections or trust or self._trusted(fp, filename)
201 201
202 202 try:
203 203 cfg.read(filename, fp, sections=sections, remap=remap)
204 204 fp.close()
205 205 except error.ConfigError as inst:
206 206 if trusted:
207 207 raise
208 208 self.warn(_("ignored: %s\n") % str(inst))
209 209
210 210 if self.plain():
211 211 for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
212 212 'logtemplate', 'statuscopies', 'style',
213 213 'traceback', 'verbose'):
214 214 if k in cfg['ui']:
215 215 del cfg['ui'][k]
216 216 for k, v in cfg.items('defaults'):
217 217 del cfg['defaults'][k]
218 218 # Don't remove aliases from the configuration if in the exceptionlist
219 219 if self.plain('alias'):
220 220 for k, v in cfg.items('alias'):
221 221 del cfg['alias'][k]
222 222 if self.plain('revsetalias'):
223 223 for k, v in cfg.items('revsetalias'):
224 224 del cfg['revsetalias'][k]
225 225 if self.plain('templatealias'):
226 226 for k, v in cfg.items('templatealias'):
227 227 del cfg['templatealias'][k]
228 228
229 229 if trusted:
230 230 self._tcfg.update(cfg)
231 231 self._tcfg.update(self._ocfg)
232 232 self._ucfg.update(cfg)
233 233 self._ucfg.update(self._ocfg)
234 234
235 235 if root is None:
236 236 root = os.path.expanduser('~')
237 237 self.fixconfig(root=root)
238 238
239 239 def fixconfig(self, root=None, section=None):
240 240 if section in (None, 'paths'):
241 241 # expand vars and ~
242 242 # translate paths relative to root (or home) into absolute paths
243 243 root = root or pycompat.getcwd()
244 244 for c in self._tcfg, self._ucfg, self._ocfg:
245 245 for n, p in c.items('paths'):
246 246 # Ignore sub-options.
247 247 if ':' in n:
248 248 continue
249 249 if not p:
250 250 continue
251 251 if '%%' in p:
252 252 s = self.configsource('paths', n) or 'none'
253 253 self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
254 254 % (n, p, s))
255 255 p = p.replace('%%', '%')
256 256 p = util.expandpath(p)
257 257 if not util.hasscheme(p) and not os.path.isabs(p):
258 258 p = os.path.normpath(os.path.join(root, p))
259 259 c.set("paths", n, p)
260 260
261 261 if section in (None, 'ui'):
262 262 # update ui options
263 263 self.debugflag = self.configbool('ui', 'debug')
264 264 self.verbose = self.debugflag or self.configbool('ui', 'verbose')
265 265 self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
266 266 if self.verbose and self.quiet:
267 267 self.quiet = self.verbose = False
268 268 self._reportuntrusted = self.debugflag or self.configbool("ui",
269 269 "report_untrusted", True)
270 270 self.tracebackflag = self.configbool('ui', 'traceback', False)
271 271
272 272 if section in (None, 'trusted'):
273 273 # update trust information
274 274 self._trustusers.update(self.configlist('trusted', 'users'))
275 275 self._trustgroups.update(self.configlist('trusted', 'groups'))
276 276
277 277 def backupconfig(self, section, item):
278 278 return (self._ocfg.backup(section, item),
279 279 self._tcfg.backup(section, item),
280 280 self._ucfg.backup(section, item),)
281 281 def restoreconfig(self, data):
282 282 self._ocfg.restore(data[0])
283 283 self._tcfg.restore(data[1])
284 284 self._ucfg.restore(data[2])
285 285
286 286 def setconfig(self, section, name, value, source=''):
287 287 for cfg in (self._ocfg, self._tcfg, self._ucfg):
288 288 cfg.set(section, name, value, source)
289 289 self.fixconfig(section=section)
290 290
291 291 def _data(self, untrusted):
292 292 return untrusted and self._ucfg or self._tcfg
293 293
294 294 def configsource(self, section, name, untrusted=False):
295 295 return self._data(untrusted).source(section, name)
296 296
297 297 def config(self, section, name, default=None, untrusted=False):
298 298 if isinstance(name, list):
299 299 alternates = name
300 300 else:
301 301 alternates = [name]
302 302
303 303 for n in alternates:
304 304 value = self._data(untrusted).get(section, n, None)
305 305 if value is not None:
306 306 name = n
307 307 break
308 308 else:
309 309 value = default
310 310
311 311 if self.debugflag and not untrusted and self._reportuntrusted:
312 312 for n in alternates:
313 313 uvalue = self._ucfg.get(section, n)
314 314 if uvalue is not None and uvalue != value:
315 315 self.debug("ignoring untrusted configuration option "
316 316 "%s.%s = %s\n" % (section, n, uvalue))
317 317 return value
318 318
319 319 def configsuboptions(self, section, name, default=None, untrusted=False):
320 320 """Get a config option and all sub-options.
321 321
322 322 Some config options have sub-options that are declared with the
323 323 format "key:opt = value". This method is used to return the main
324 324 option and all its declared sub-options.
325 325
326 326 Returns a 2-tuple of ``(option, sub-options)``, where `sub-options``
327 327 is a dict of defined sub-options where keys and values are strings.
328 328 """
329 329 data = self._data(untrusted)
330 330 main = data.get(section, name, default)
331 331 if self.debugflag and not untrusted and self._reportuntrusted:
332 332 uvalue = self._ucfg.get(section, name)
333 333 if uvalue is not None and uvalue != main:
334 334 self.debug('ignoring untrusted configuration option '
335 335 '%s.%s = %s\n' % (section, name, uvalue))
336 336
337 337 sub = {}
338 338 prefix = '%s:' % name
339 339 for k, v in data.items(section):
340 340 if k.startswith(prefix):
341 341 sub[k[len(prefix):]] = v
342 342
343 343 if self.debugflag and not untrusted and self._reportuntrusted:
344 344 for k, v in sub.items():
345 345 uvalue = self._ucfg.get(section, '%s:%s' % (name, k))
346 346 if uvalue is not None and uvalue != v:
347 347 self.debug('ignoring untrusted configuration option '
348 348 '%s:%s.%s = %s\n' % (section, name, k, uvalue))
349 349
350 350 return main, sub
351 351
352 352 def configpath(self, section, name, default=None, untrusted=False):
353 353 'get a path config item, expanded relative to repo root or config file'
354 354 v = self.config(section, name, default, untrusted)
355 355 if v is None:
356 356 return None
357 357 if not os.path.isabs(v) or "://" not in v:
358 358 src = self.configsource(section, name, untrusted)
359 359 if ':' in src:
360 360 base = os.path.dirname(src.rsplit(':')[0])
361 361 v = os.path.join(base, os.path.expanduser(v))
362 362 return v
363 363
364 364 def configbool(self, section, name, default=False, untrusted=False):
365 365 """parse a configuration element as a boolean
366 366
367 367 >>> u = ui(); s = 'foo'
368 368 >>> u.setconfig(s, 'true', 'yes')
369 369 >>> u.configbool(s, 'true')
370 370 True
371 371 >>> u.setconfig(s, 'false', 'no')
372 372 >>> u.configbool(s, 'false')
373 373 False
374 374 >>> u.configbool(s, 'unknown')
375 375 False
376 376 >>> u.configbool(s, 'unknown', True)
377 377 True
378 378 >>> u.setconfig(s, 'invalid', 'somevalue')
379 379 >>> u.configbool(s, 'invalid')
380 380 Traceback (most recent call last):
381 381 ...
382 382 ConfigError: foo.invalid is not a boolean ('somevalue')
383 383 """
384 384
385 385 v = self.config(section, name, None, untrusted)
386 386 if v is None:
387 387 return default
388 388 if isinstance(v, bool):
389 389 return v
390 390 b = util.parsebool(v)
391 391 if b is None:
392 392 raise error.ConfigError(_("%s.%s is not a boolean ('%s')")
393 393 % (section, name, v))
394 394 return b
395 395
396 396 def configint(self, section, name, default=None, untrusted=False):
397 397 """parse a configuration element as an integer
398 398
399 399 >>> u = ui(); s = 'foo'
400 400 >>> u.setconfig(s, 'int1', '42')
401 401 >>> u.configint(s, 'int1')
402 402 42
403 403 >>> u.setconfig(s, 'int2', '-42')
404 404 >>> u.configint(s, 'int2')
405 405 -42
406 406 >>> u.configint(s, 'unknown', 7)
407 407 7
408 408 >>> u.setconfig(s, 'invalid', 'somevalue')
409 409 >>> u.configint(s, 'invalid')
410 410 Traceback (most recent call last):
411 411 ...
412 412 ConfigError: foo.invalid is not an integer ('somevalue')
413 413 """
414 414
415 415 v = self.config(section, name, None, untrusted)
416 416 if v is None:
417 417 return default
418 418 try:
419 419 return int(v)
420 420 except ValueError:
421 421 raise error.ConfigError(_("%s.%s is not an integer ('%s')")
422 422 % (section, name, v))
423 423
424 424 def configbytes(self, section, name, default=0, untrusted=False):
425 425 """parse a configuration element as a quantity in bytes
426 426
427 427 Units can be specified as b (bytes), k or kb (kilobytes), m or
428 428 mb (megabytes), g or gb (gigabytes).
429 429
430 430 >>> u = ui(); s = 'foo'
431 431 >>> u.setconfig(s, 'val1', '42')
432 432 >>> u.configbytes(s, 'val1')
433 433 42
434 434 >>> u.setconfig(s, 'val2', '42.5 kb')
435 435 >>> u.configbytes(s, 'val2')
436 436 43520
437 437 >>> u.configbytes(s, 'unknown', '7 MB')
438 438 7340032
439 439 >>> u.setconfig(s, 'invalid', 'somevalue')
440 440 >>> u.configbytes(s, 'invalid')
441 441 Traceback (most recent call last):
442 442 ...
443 443 ConfigError: foo.invalid is not a byte quantity ('somevalue')
444 444 """
445 445
446 446 value = self.config(section, name)
447 447 if value is None:
448 448 if not isinstance(default, str):
449 449 return default
450 450 value = default
451 451 try:
452 452 return util.sizetoint(value)
453 453 except error.ParseError:
454 454 raise error.ConfigError(_("%s.%s is not a byte quantity ('%s')")
455 455 % (section, name, value))
456 456
457 457 def configlist(self, section, name, default=None, untrusted=False):
458 458 """parse a configuration element as a list of comma/space separated
459 459 strings
460 460
461 461 >>> u = ui(); s = 'foo'
462 462 >>> u.setconfig(s, 'list1', 'this,is "a small" ,test')
463 463 >>> u.configlist(s, 'list1')
464 464 ['this', 'is', 'a small', 'test']
465 465 """
466 466
467 467 def _parse_plain(parts, s, offset):
468 468 whitespace = False
469 469 while offset < len(s) and (s[offset].isspace() or s[offset] == ','):
470 470 whitespace = True
471 471 offset += 1
472 472 if offset >= len(s):
473 473 return None, parts, offset
474 474 if whitespace:
475 475 parts.append('')
476 476 if s[offset] == '"' and not parts[-1]:
477 477 return _parse_quote, parts, offset + 1
478 478 elif s[offset] == '"' and parts[-1][-1] == '\\':
479 479 parts[-1] = parts[-1][:-1] + s[offset]
480 480 return _parse_plain, parts, offset + 1
481 481 parts[-1] += s[offset]
482 482 return _parse_plain, parts, offset + 1
483 483
484 484 def _parse_quote(parts, s, offset):
485 485 if offset < len(s) and s[offset] == '"': # ""
486 486 parts.append('')
487 487 offset += 1
488 488 while offset < len(s) and (s[offset].isspace() or
489 489 s[offset] == ','):
490 490 offset += 1
491 491 return _parse_plain, parts, offset
492 492
493 493 while offset < len(s) and s[offset] != '"':
494 494 if (s[offset] == '\\' and offset + 1 < len(s)
495 495 and s[offset + 1] == '"'):
496 496 offset += 1
497 497 parts[-1] += '"'
498 498 else:
499 499 parts[-1] += s[offset]
500 500 offset += 1
501 501
502 502 if offset >= len(s):
503 503 real_parts = _configlist(parts[-1])
504 504 if not real_parts:
505 505 parts[-1] = '"'
506 506 else:
507 507 real_parts[0] = '"' + real_parts[0]
508 508 parts = parts[:-1]
509 509 parts.extend(real_parts)
510 510 return None, parts, offset
511 511
512 512 offset += 1
513 513 while offset < len(s) and s[offset] in [' ', ',']:
514 514 offset += 1
515 515
516 516 if offset < len(s):
517 517 if offset + 1 == len(s) and s[offset] == '"':
518 518 parts[-1] += '"'
519 519 offset += 1
520 520 else:
521 521 parts.append('')
522 522 else:
523 523 return None, parts, offset
524 524
525 525 return _parse_plain, parts, offset
526 526
527 527 def _configlist(s):
528 528 s = s.rstrip(' ,')
529 529 if not s:
530 530 return []
531 531 parser, parts, offset = _parse_plain, [''], 0
532 532 while parser:
533 533 parser, parts, offset = parser(parts, s, offset)
534 534 return parts
535 535
536 536 result = self.config(section, name, untrusted=untrusted)
537 537 if result is None:
538 538 result = default or []
539 539 if isinstance(result, bytes):
540 540 result = _configlist(result.lstrip(' ,\n'))
541 541 if result is None:
542 542 result = default or []
543 543 return result
544 544
545 545 def hasconfig(self, section, name, untrusted=False):
546 546 return self._data(untrusted).hasitem(section, name)
547 547
548 548 def has_section(self, section, untrusted=False):
549 549 '''tell whether section exists in config.'''
550 550 return section in self._data(untrusted)
551 551
552 552 def configitems(self, section, untrusted=False, ignoresub=False):
553 553 items = self._data(untrusted).items(section)
554 554 if ignoresub:
555 555 newitems = {}
556 556 for k, v in items:
557 557 if ':' not in k:
558 558 newitems[k] = v
559 559 items = newitems.items()
560 560 if self.debugflag and not untrusted and self._reportuntrusted:
561 561 for k, v in self._ucfg.items(section):
562 562 if self._tcfg.get(section, k) != v:
563 563 self.debug("ignoring untrusted configuration option "
564 564 "%s.%s = %s\n" % (section, k, v))
565 565 return items
566 566
567 567 def walkconfig(self, untrusted=False):
568 568 cfg = self._data(untrusted)
569 569 for section in cfg.sections():
570 570 for name, value in self.configitems(section, untrusted):
571 571 yield section, name, value
572 572
573 573 def plain(self, feature=None):
574 574 '''is plain mode active?
575 575
576 576 Plain mode means that all configuration variables which affect
577 577 the behavior and output of Mercurial should be
578 578 ignored. Additionally, the output should be stable,
579 579 reproducible and suitable for use in scripts or applications.
580 580
581 581 The only way to trigger plain mode is by setting either the
582 582 `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
583 583
584 584 The return value can either be
585 585 - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
586 586 - True otherwise
587 587 '''
588 588 if ('HGPLAIN' not in encoding.environ and
589 589 'HGPLAINEXCEPT' not in encoding.environ):
590 590 return False
591 591 exceptions = encoding.environ.get('HGPLAINEXCEPT',
592 592 '').strip().split(',')
593 593 if feature and exceptions:
594 594 return feature not in exceptions
595 595 return True
596 596
597 597 def username(self):
598 598 """Return default username to be used in commits.
599 599
600 600 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
601 601 and stop searching if one of these is set.
602 602 If not found and ui.askusername is True, ask the user, else use
603 603 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
604 604 """
605 605 user = encoding.environ.get("HGUSER")
606 606 if user is None:
607 607 user = self.config("ui", ["username", "user"])
608 608 if user is not None:
609 609 user = os.path.expandvars(user)
610 610 if user is None:
611 611 user = encoding.environ.get("EMAIL")
612 612 if user is None and self.configbool("ui", "askusername"):
613 613 user = self.prompt(_("enter a commit username:"), default=None)
614 614 if user is None and not self.interactive():
615 615 try:
616 616 user = '%s@%s' % (util.getuser(), socket.getfqdn())
617 617 self.warn(_("no username found, using '%s' instead\n") % user)
618 618 except KeyError:
619 619 pass
620 620 if not user:
621 621 raise error.Abort(_('no username supplied'),
622 622 hint=_("use 'hg config --edit' "
623 623 'to set your username'))
624 624 if "\n" in user:
625 625 raise error.Abort(_("username %s contains a newline\n")
626 626 % repr(user))
627 627 return user
628 628
629 629 def shortuser(self, user):
630 630 """Return a short representation of a user name or email address."""
631 631 if not self.verbose:
632 632 user = util.shortuser(user)
633 633 return user
634 634
635 635 def expandpath(self, loc, default=None):
636 636 """Return repository location relative to cwd or from [paths]"""
637 637 try:
638 638 p = self.paths.getpath(loc)
639 639 if p:
640 640 return p.rawloc
641 641 except error.RepoError:
642 642 pass
643 643
644 644 if default:
645 645 try:
646 646 p = self.paths.getpath(default)
647 647 if p:
648 648 return p.rawloc
649 649 except error.RepoError:
650 650 pass
651 651
652 652 return loc
653 653
654 654 @util.propertycache
655 655 def paths(self):
656 656 return paths(self)
657 657
658 658 def pushbuffer(self, error=False, subproc=False, labeled=False):
659 659 """install a buffer to capture standard output of the ui object
660 660
661 661 If error is True, the error output will be captured too.
662 662
663 663 If subproc is True, output from subprocesses (typically hooks) will be
664 664 captured too.
665 665
666 666 If labeled is True, any labels associated with buffered
667 667 output will be handled. By default, this has no effect
668 668 on the output returned, but extensions and GUI tools may
669 669 handle this argument and returned styled output. If output
670 670 is being buffered so it can be captured and parsed or
671 671 processed, labeled should not be set to True.
672 672 """
673 673 self._buffers.append([])
674 674 self._bufferstates.append((error, subproc, labeled))
675 675 self._bufferapplylabels = labeled
676 676
677 677 def popbuffer(self):
678 678 '''pop the last buffer and return the buffered output'''
679 679 self._bufferstates.pop()
680 680 if self._bufferstates:
681 681 self._bufferapplylabels = self._bufferstates[-1][2]
682 682 else:
683 683 self._bufferapplylabels = None
684 684
685 685 return "".join(self._buffers.pop())
686 686
687 687 def write(self, *args, **opts):
688 688 '''write args to output
689 689
690 690 By default, this method simply writes to the buffer or stdout,
691 691 but extensions or GUI tools may override this method,
692 692 write_err(), popbuffer(), and label() to style output from
693 693 various parts of hg.
694 694
695 695 An optional keyword argument, "label", can be passed in.
696 696 This should be a string containing label names separated by
697 697 space. Label names take the form of "topic.type". For example,
698 698 ui.debug() issues a label of "ui.debug".
699 699
700 700 When labeling output for a specific command, a label of
701 701 "cmdname.type" is recommended. For example, status issues
702 702 a label of "status.modified" for modified files.
703 703 '''
704 704 if self._buffers and not opts.get('prompt', False):
705 705 self._buffers[-1].extend(a for a in args)
706 706 else:
707 707 self._progclear()
708 708 for a in args:
709 709 self.fout.write(a)
710 710
711 711 def write_err(self, *args, **opts):
712 712 self._progclear()
713 713 try:
714 714 if self._bufferstates and self._bufferstates[-1][0]:
715 715 return self.write(*args, **opts)
716 716 if not getattr(self.fout, 'closed', False):
717 717 self.fout.flush()
718 718 for a in args:
719 719 self.ferr.write(a)
720 720 # stderr may be buffered under win32 when redirected to files,
721 721 # including stdout.
722 722 if not getattr(self.ferr, 'closed', False):
723 723 self.ferr.flush()
724 724 except IOError as inst:
725 725 if inst.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
726 726 raise
727 727
728 728 def flush(self):
729 729 try: self.fout.flush()
730 730 except (IOError, ValueError): pass
731 731 try: self.ferr.flush()
732 732 except (IOError, ValueError): pass
733 733
734 734 def _isatty(self, fh):
735 735 if self.configbool('ui', 'nontty', False):
736 736 return False
737 737 return util.isatty(fh)
738 738
739 739 def interface(self, feature):
740 740 """what interface to use for interactive console features?
741 741
742 742 The interface is controlled by the value of `ui.interface` but also by
743 743 the value of feature-specific configuration. For example:
744 744
745 745 ui.interface.histedit = text
746 746 ui.interface.chunkselector = curses
747 747
748 748 Here the features are "histedit" and "chunkselector".
749 749
750 750 The configuration above means that the default interfaces for commands
751 751 is curses, the interface for histedit is text and the interface for
752 752 selecting chunk is crecord (the best curses interface available).
753 753
754 754 Consider the following example:
755 755 ui.interface = curses
756 756 ui.interface.histedit = text
757 757
758 758 Then histedit will use the text interface and chunkselector will use
759 759 the default curses interface (crecord at the moment).
760 760 """
761 761 alldefaults = frozenset(["text", "curses"])
762 762
763 763 featureinterfaces = {
764 764 "chunkselector": [
765 765 "text",
766 766 "curses",
767 767 ]
768 768 }
769 769
770 770 # Feature-specific interface
771 771 if feature not in featureinterfaces.keys():
772 772 # Programming error, not user error
773 773 raise ValueError("Unknown feature requested %s" % feature)
774 774
775 775 availableinterfaces = frozenset(featureinterfaces[feature])
776 776 if alldefaults > availableinterfaces:
777 777 # Programming error, not user error. We need a use case to
778 778 # define the right thing to do here.
779 779 raise ValueError(
780 780 "Feature %s does not handle all default interfaces" %
781 781 feature)
782 782
783 783 if self.plain():
784 784 return "text"
785 785
786 786 # Default interface for all the features
787 787 defaultinterface = "text"
788 788 i = self.config("ui", "interface", None)
789 789 if i in alldefaults:
790 790 defaultinterface = i
791 791
792 792 choseninterface = defaultinterface
793 793 f = self.config("ui", "interface.%s" % feature, None)
794 794 if f in availableinterfaces:
795 795 choseninterface = f
796 796
797 797 if i is not None and defaultinterface != i:
798 798 if f is not None:
799 799 self.warn(_("invalid value for ui.interface: %s\n") %
800 800 (i,))
801 801 else:
802 802 self.warn(_("invalid value for ui.interface: %s (using %s)\n") %
803 803 (i, choseninterface))
804 804 if f is not None and choseninterface != f:
805 805 self.warn(_("invalid value for ui.interface.%s: %s (using %s)\n") %
806 806 (feature, f, choseninterface))
807 807
808 808 return choseninterface
809 809
810 810 def interactive(self):
811 811 '''is interactive input allowed?
812 812
813 813 An interactive session is a session where input can be reasonably read
814 814 from `sys.stdin'. If this function returns false, any attempt to read
815 815 from stdin should fail with an error, unless a sensible default has been
816 816 specified.
817 817
818 818 Interactiveness is triggered by the value of the `ui.interactive'
819 819 configuration variable or - if it is unset - when `sys.stdin' points
820 820 to a terminal device.
821 821
822 822 This function refers to input only; for output, see `ui.formatted()'.
823 823 '''
824 824 i = self.configbool("ui", "interactive", None)
825 825 if i is None:
826 826 # some environments replace stdin without implementing isatty
827 827 # usually those are non-interactive
828 828 return self._isatty(self.fin)
829 829
830 830 return i
831 831
832 832 def termwidth(self):
833 833 '''how wide is the terminal in columns?
834 834 '''
835 835 if 'COLUMNS' in encoding.environ:
836 836 try:
837 837 return int(encoding.environ['COLUMNS'])
838 838 except ValueError:
839 839 pass
840 840 return scmutil.termsize(self)[0]
841 841
842 842 def formatted(self):
843 843 '''should formatted output be used?
844 844
845 845 It is often desirable to format the output to suite the output medium.
846 846 Examples of this are truncating long lines or colorizing messages.
847 847 However, this is not often not desirable when piping output into other
848 848 utilities, e.g. `grep'.
849 849
850 850 Formatted output is triggered by the value of the `ui.formatted'
851 851 configuration variable or - if it is unset - when `sys.stdout' points
852 852 to a terminal device. Please note that `ui.formatted' should be
853 853 considered an implementation detail; it is not intended for use outside
854 854 Mercurial or its extensions.
855 855
856 856 This function refers to output only; for input, see `ui.interactive()'.
857 857 This function always returns false when in plain mode, see `ui.plain()'.
858 858 '''
859 859 if self.plain():
860 860 return False
861 861
862 862 i = self.configbool("ui", "formatted", None)
863 863 if i is None:
864 864 # some environments replace stdout without implementing isatty
865 865 # usually those are non-interactive
866 866 return self._isatty(self.fout)
867 867
868 868 return i
869 869
870 870 def _readline(self, prompt=''):
871 871 if self._isatty(self.fin):
872 872 try:
873 873 # magically add command line editing support, where
874 874 # available
875 875 import readline
876 876 # force demandimport to really load the module
877 877 readline.read_history_file
878 878 # windows sometimes raises something other than ImportError
879 879 except Exception:
880 880 pass
881 881
882 882 # call write() so output goes through subclassed implementation
883 883 # e.g. color extension on Windows
884 884 self.write(prompt, prompt=True)
885 885
886 886 # instead of trying to emulate raw_input, swap (self.fin,
887 887 # self.fout) with (sys.stdin, sys.stdout)
888 888 oldin = sys.stdin
889 889 oldout = sys.stdout
890 890 sys.stdin = self.fin
891 891 sys.stdout = self.fout
892 892 # prompt ' ' must exist; otherwise readline may delete entire line
893 893 # - http://bugs.python.org/issue12833
894 894 line = raw_input(' ')
895 895 sys.stdin = oldin
896 896 sys.stdout = oldout
897 897
898 898 # When stdin is in binary mode on Windows, it can cause
899 899 # raw_input() to emit an extra trailing carriage return
900 900 if os.linesep == '\r\n' and line and line[-1] == '\r':
901 901 line = line[:-1]
902 902 return line
903 903
904 904 def prompt(self, msg, default="y"):
905 905 """Prompt user with msg, read response.
906 906 If ui is not interactive, the default is returned.
907 907 """
908 908 if not self.interactive():
909 909 self.write(msg, ' ', default or '', "\n")
910 910 return default
911 911 try:
912 912 r = self._readline(self.label(msg, 'ui.prompt'))
913 913 if not r:
914 914 r = default
915 915 if self.configbool('ui', 'promptecho'):
916 916 self.write(r, "\n")
917 917 return r
918 918 except EOFError:
919 919 raise error.ResponseExpected()
920 920
921 921 @staticmethod
922 922 def extractchoices(prompt):
923 923 """Extract prompt message and list of choices from specified prompt.
924 924
925 925 This returns tuple "(message, choices)", and "choices" is the
926 926 list of tuple "(response character, text without &)".
927 927
928 928 >>> ui.extractchoices("awake? $$ &Yes $$ &No")
929 929 ('awake? ', [('y', 'Yes'), ('n', 'No')])
930 930 >>> ui.extractchoices("line\\nbreak? $$ &Yes $$ &No")
931 931 ('line\\nbreak? ', [('y', 'Yes'), ('n', 'No')])
932 932 >>> ui.extractchoices("want lots of $$money$$?$$Ye&s$$N&o")
933 933 ('want lots of $$money$$?', [('s', 'Yes'), ('o', 'No')])
934 934 """
935 935
936 936 # Sadly, the prompt string may have been built with a filename
937 937 # containing "$$" so let's try to find the first valid-looking
938 938 # prompt to start parsing. Sadly, we also can't rely on
939 939 # choices containing spaces, ASCII, or basically anything
940 940 # except an ampersand followed by a character.
941 941 m = re.match(r'(?s)(.+?)\$\$([^\$]*&[^ \$].*)', prompt)
942 942 msg = m.group(1)
943 943 choices = [p.strip(' ') for p in m.group(2).split('$$')]
944 944 return (msg,
945 945 [(s[s.index('&') + 1].lower(), s.replace('&', '', 1))
946 946 for s in choices])
947 947
948 948 def promptchoice(self, prompt, default=0):
949 949 """Prompt user with a message, read response, and ensure it matches
950 950 one of the provided choices. The prompt is formatted as follows:
951 951
952 952 "would you like fries with that (Yn)? $$ &Yes $$ &No"
953 953
954 954 The index of the choice is returned. Responses are case
955 955 insensitive. If ui is not interactive, the default is
956 956 returned.
957 957 """
958 958
959 959 msg, choices = self.extractchoices(prompt)
960 960 resps = [r for r, t in choices]
961 961 while True:
962 962 r = self.prompt(msg, resps[default])
963 963 if r.lower() in resps:
964 964 return resps.index(r.lower())
965 965 self.write(_("unrecognized response\n"))
966 966
967 967 def getpass(self, prompt=None, default=None):
968 968 if not self.interactive():
969 969 return default
970 970 try:
971 971 self.write_err(self.label(prompt or _('password: '), 'ui.prompt'))
972 972 # disable getpass() only if explicitly specified. it's still valid
973 973 # to interact with tty even if fin is not a tty.
974 974 if self.configbool('ui', 'nontty'):
975 975 return self.fin.readline().rstrip('\n')
976 976 else:
977 977 return getpass.getpass('')
978 978 except EOFError:
979 979 raise error.ResponseExpected()
980 980 def status(self, *msg, **opts):
981 981 '''write status message to output (if ui.quiet is False)
982 982
983 983 This adds an output label of "ui.status".
984 984 '''
985 985 if not self.quiet:
986 986 opts['label'] = opts.get('label', '') + ' ui.status'
987 987 self.write(*msg, **opts)
988 988 def warn(self, *msg, **opts):
989 989 '''write warning message to output (stderr)
990 990
991 991 This adds an output label of "ui.warning".
992 992 '''
993 993 opts['label'] = opts.get('label', '') + ' ui.warning'
994 994 self.write_err(*msg, **opts)
995 995 def note(self, *msg, **opts):
996 996 '''write note to output (if ui.verbose is True)
997 997
998 998 This adds an output label of "ui.note".
999 999 '''
1000 1000 if self.verbose:
1001 1001 opts['label'] = opts.get('label', '') + ' ui.note'
1002 1002 self.write(*msg, **opts)
1003 1003 def debug(self, *msg, **opts):
1004 1004 '''write debug message to output (if ui.debugflag is True)
1005 1005
1006 1006 This adds an output label of "ui.debug".
1007 1007 '''
1008 1008 if self.debugflag:
1009 1009 opts['label'] = opts.get('label', '') + ' ui.debug'
1010 1010 self.write(*msg, **opts)
1011 1011
1012 1012 def edit(self, text, user, extra=None, editform=None, pending=None):
1013 1013 extra_defaults = {
1014 1014 'prefix': 'editor',
1015 1015 'suffix': '.txt',
1016 1016 }
1017 1017 if extra is not None:
1018 1018 extra_defaults.update(extra)
1019 1019 extra = extra_defaults
1020 1020 (fd, name) = tempfile.mkstemp(prefix='hg-' + extra['prefix'] + '-',
1021 1021 suffix=extra['suffix'], text=True)
1022 1022 try:
1023 1023 f = os.fdopen(fd, "w")
1024 1024 f.write(text)
1025 1025 f.close()
1026 1026
1027 1027 environ = {'HGUSER': user}
1028 1028 if 'transplant_source' in extra:
1029 1029 environ.update({'HGREVISION': hex(extra['transplant_source'])})
1030 1030 for label in ('intermediate-source', 'source', 'rebase_source'):
1031 1031 if label in extra:
1032 1032 environ.update({'HGREVISION': extra[label]})
1033 1033 break
1034 1034 if editform:
1035 1035 environ.update({'HGEDITFORM': editform})
1036 1036 if pending:
1037 1037 environ.update({'HG_PENDING': pending})
1038 1038
1039 1039 editor = self.geteditor()
1040 1040
1041 1041 self.system("%s \"%s\"" % (editor, name),
1042 1042 environ=environ,
1043 1043 onerr=error.Abort, errprefix=_("edit failed"))
1044 1044
1045 1045 f = open(name)
1046 1046 t = f.read()
1047 1047 f.close()
1048 1048 finally:
1049 1049 os.unlink(name)
1050 1050
1051 1051 return t
1052 1052
1053 1053 def system(self, cmd, environ=None, cwd=None, onerr=None, errprefix=None):
1054 1054 '''execute shell command with appropriate output stream. command
1055 1055 output will be redirected if fout is not stdout.
1056 1056 '''
1057 1057 out = self.fout
1058 1058 if any(s[1] for s in self._bufferstates):
1059 1059 out = self
1060 1060 return util.system(cmd, environ=environ, cwd=cwd, onerr=onerr,
1061 1061 errprefix=errprefix, out=out)
1062 1062
1063 1063 def traceback(self, exc=None, force=False):
1064 1064 '''print exception traceback if traceback printing enabled or forced.
1065 1065 only to call in exception handler. returns true if traceback
1066 1066 printed.'''
1067 1067 if self.tracebackflag or force:
1068 1068 if exc is None:
1069 1069 exc = sys.exc_info()
1070 1070 cause = getattr(exc[1], 'cause', None)
1071 1071
1072 1072 if cause is not None:
1073 1073 causetb = traceback.format_tb(cause[2])
1074 1074 exctb = traceback.format_tb(exc[2])
1075 1075 exconly = traceback.format_exception_only(cause[0], cause[1])
1076 1076
1077 1077 # exclude frame where 'exc' was chained and rethrown from exctb
1078 1078 self.write_err('Traceback (most recent call last):\n',
1079 1079 ''.join(exctb[:-1]),
1080 1080 ''.join(causetb),
1081 1081 ''.join(exconly))
1082 1082 else:
1083 1083 output = traceback.format_exception(exc[0], exc[1], exc[2])
1084 1084 self.write_err(''.join(output))
1085 1085 return self.tracebackflag or force
1086 1086
1087 1087 def geteditor(self):
1088 1088 '''return editor to use'''
1089 1089 if sys.platform == 'plan9':
1090 1090 # vi is the MIPS instruction simulator on Plan 9. We
1091 1091 # instead default to E to plumb commit messages to
1092 1092 # avoid confusion.
1093 1093 editor = 'E'
1094 1094 else:
1095 1095 editor = 'vi'
1096 1096 return (encoding.environ.get("HGEDITOR") or
1097 1097 self.config("ui", "editor") or
1098 1098 encoding.environ.get("VISUAL") or
1099 1099 encoding.environ.get("EDITOR", editor))
1100 1100
1101 1101 @util.propertycache
1102 1102 def _progbar(self):
1103 1103 """setup the progbar singleton to the ui object"""
1104 1104 if (self.quiet or self.debugflag
1105 1105 or self.configbool('progress', 'disable', False)
1106 1106 or not progress.shouldprint(self)):
1107 1107 return None
1108 1108 return getprogbar(self)
1109 1109
1110 1110 def _progclear(self):
1111 1111 """clear progress bar output if any. use it before any output"""
1112 1112 if '_progbar' not in vars(self): # nothing loaded yet
1113 1113 return
1114 1114 if self._progbar is not None and self._progbar.printed:
1115 1115 self._progbar.clear()
1116 1116
1117 1117 def progress(self, topic, pos, item="", unit="", total=None):
1118 1118 '''show a progress message
1119 1119
1120 1120 By default a textual progress bar will be displayed if an operation
1121 1121 takes too long. 'topic' is the current operation, 'item' is a
1122 1122 non-numeric marker of the current position (i.e. the currently
1123 1123 in-process file), 'pos' is the current numeric position (i.e.
1124 1124 revision, bytes, etc.), unit is a corresponding unit label,
1125 1125 and total is the highest expected pos.
1126 1126
1127 1127 Multiple nested topics may be active at a time.
1128 1128
1129 1129 All topics should be marked closed by setting pos to None at
1130 1130 termination.
1131 1131 '''
1132 1132 if self._progbar is not None:
1133 1133 self._progbar.progress(topic, pos, item=item, unit=unit,
1134 1134 total=total)
1135 1135 if pos is None or not self.configbool('progress', 'debug'):
1136 1136 return
1137 1137
1138 1138 if unit:
1139 1139 unit = ' ' + unit
1140 1140 if item:
1141 1141 item = ' ' + item
1142 1142
1143 1143 if total:
1144 1144 pct = 100.0 * pos / total
1145 1145 self.debug('%s:%s %s/%s%s (%4.2f%%)\n'
1146 1146 % (topic, item, pos, total, unit, pct))
1147 1147 else:
1148 1148 self.debug('%s:%s %s%s\n' % (topic, item, pos, unit))
1149 1149
1150 1150 def log(self, service, *msg, **opts):
1151 1151 '''hook for logging facility extensions
1152 1152
1153 1153 service should be a readily-identifiable subsystem, which will
1154 1154 allow filtering.
1155 1155
1156 1156 *msg should be a newline-terminated format string to log, and
1157 1157 then any values to %-format into that format string.
1158 1158
1159 1159 **opts currently has no defined meanings.
1160 1160 '''
1161 1161
1162 1162 def label(self, msg, label):
1163 1163 '''style msg based on supplied label
1164 1164
1165 1165 Like ui.write(), this just returns msg unchanged, but extensions
1166 1166 and GUI tools can override it to allow styling output without
1167 1167 writing it.
1168 1168
1169 1169 ui.write(s, 'label') is equivalent to
1170 1170 ui.write(ui.label(s, 'label')).
1171 1171 '''
1172 1172 return msg
1173 1173
1174 1174 def develwarn(self, msg, stacklevel=1, config=None):
1175 1175 """issue a developer warning message
1176 1176
1177 1177 Use 'stacklevel' to report the offender some layers further up in the
1178 1178 stack.
1179 1179 """
1180 1180 if not self.configbool('devel', 'all-warnings'):
1181 1181 if config is not None and not self.configbool('devel', config):
1182 1182 return
1183 1183 msg = 'devel-warn: ' + msg
1184 1184 stacklevel += 1 # get in develwarn
1185 1185 if self.tracebackflag:
1186 1186 util.debugstacktrace(msg, stacklevel, self.ferr, self.fout)
1187 1187 self.log('develwarn', '%s at:\n%s' %
1188 1188 (msg, ''.join(util.getstackframes(stacklevel))))
1189 1189 else:
1190 1190 curframe = inspect.currentframe()
1191 1191 calframe = inspect.getouterframes(curframe, 2)
1192 1192 self.write_err('%s at: %s:%s (%s)\n'
1193 1193 % ((msg,) + calframe[stacklevel][1:4]))
1194 1194 self.log('develwarn', '%s at: %s:%s (%s)\n',
1195 1195 msg, *calframe[stacklevel][1:4])
1196 1196 curframe = calframe = None # avoid cycles
1197 1197
1198 1198 def deprecwarn(self, msg, version):
1199 1199 """issue a deprecation warning
1200 1200
1201 1201 - msg: message explaining what is deprecated and how to upgrade,
1202 1202 - version: last version where the API will be supported,
1203 1203 """
1204 1204 if not (self.configbool('devel', 'all-warnings')
1205 1205 or self.configbool('devel', 'deprec-warn')):
1206 1206 return
1207 1207 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
1208 1208 " update your code.)") % version
1209 1209 self.develwarn(msg, stacklevel=2, config='deprec-warn')
1210 1210
1211 1211 @contextlib.contextmanager
1212 1212 def configoverride(self, overrides, source=""):
1213 1213 """Context manager for temporary config overrides
1214 1214 `overrides` must be a dict of the following structure:
1215 1215 {(section, name) : value}"""
1216 1216 backups = {}
1217 1217 try:
1218 1218 for (section, name), value in overrides.items():
1219 1219 backups[(section, name)] = self.backupconfig(section, name)
1220 1220 self.setconfig(section, name, value, source)
1221 1221 yield
1222 1222 finally:
1223 1223 for __, backup in backups.items():
1224 1224 self.restoreconfig(backup)
1225 1225 # just restoring ui.quiet config to the previous value is not enough
1226 1226 # as it does not update ui.quiet class member
1227 1227 if ('ui', 'quiet') in overrides:
1228 1228 self.fixconfig(section='ui')
1229 1229
1230 1230 class paths(dict):
1231 1231 """Represents a collection of paths and their configs.
1232 1232
1233 1233 Data is initially derived from ui instances and the config files they have
1234 1234 loaded.
1235 1235 """
1236 1236 def __init__(self, ui):
1237 1237 dict.__init__(self)
1238 1238
1239 1239 for name, loc in ui.configitems('paths', ignoresub=True):
1240 1240 # No location is the same as not existing.
1241 1241 if not loc:
1242 1242 continue
1243 1243 loc, sub = ui.configsuboptions('paths', name)
1244 1244 self[name] = path(ui, name, rawloc=loc, suboptions=sub)
1245 1245
1246 1246 def getpath(self, name, default=None):
1247 1247 """Return a ``path`` from a string, falling back to default.
1248 1248
1249 1249 ``name`` can be a named path or locations. Locations are filesystem
1250 1250 paths or URIs.
1251 1251
1252 1252 Returns None if ``name`` is not a registered path, a URI, or a local
1253 1253 path to a repo.
1254 1254 """
1255 1255 # Only fall back to default if no path was requested.
1256 1256 if name is None:
1257 1257 if not default:
1258 1258 default = ()
1259 1259 elif not isinstance(default, (tuple, list)):
1260 1260 default = (default,)
1261 1261 for k in default:
1262 1262 try:
1263 1263 return self[k]
1264 1264 except KeyError:
1265 1265 continue
1266 1266 return None
1267 1267
1268 1268 # Most likely empty string.
1269 1269 # This may need to raise in the future.
1270 1270 if not name:
1271 1271 return None
1272 1272
1273 1273 try:
1274 1274 return self[name]
1275 1275 except KeyError:
1276 1276 # Try to resolve as a local path or URI.
1277 1277 try:
1278 1278 # We don't pass sub-options in, so no need to pass ui instance.
1279 1279 return path(None, None, rawloc=name)
1280 1280 except ValueError:
1281 1281 raise error.RepoError(_('repository %s does not exist') %
1282 1282 name)
1283 1283
1284 1284 _pathsuboptions = {}
1285 1285
1286 1286 def pathsuboption(option, attr):
1287 1287 """Decorator used to declare a path sub-option.
1288 1288
1289 1289 Arguments are the sub-option name and the attribute it should set on
1290 1290 ``path`` instances.
1291 1291
1292 1292 The decorated function will receive as arguments a ``ui`` instance,
1293 1293 ``path`` instance, and the string value of this option from the config.
1294 1294 The function should return the value that will be set on the ``path``
1295 1295 instance.
1296 1296
1297 1297 This decorator can be used to perform additional verification of
1298 1298 sub-options and to change the type of sub-options.
1299 1299 """
1300 1300 def register(func):
1301 1301 _pathsuboptions[option] = (attr, func)
1302 1302 return func
1303 1303 return register
1304 1304
1305 1305 @pathsuboption('pushurl', 'pushloc')
1306 1306 def pushurlpathoption(ui, path, value):
1307 1307 u = util.url(value)
1308 1308 # Actually require a URL.
1309 1309 if not u.scheme:
1310 1310 ui.warn(_('(paths.%s:pushurl not a URL; ignoring)\n') % path.name)
1311 1311 return None
1312 1312
1313 1313 # Don't support the #foo syntax in the push URL to declare branch to
1314 1314 # push.
1315 1315 if u.fragment:
1316 1316 ui.warn(_('("#fragment" in paths.%s:pushurl not supported; '
1317 1317 'ignoring)\n') % path.name)
1318 1318 u.fragment = None
1319 1319
1320 1320 return str(u)
1321 1321
1322 1322 @pathsuboption('pushrev', 'pushrev')
1323 1323 def pushrevpathoption(ui, path, value):
1324 1324 return value
1325 1325
1326 1326 class path(object):
1327 1327 """Represents an individual path and its configuration."""
1328 1328
1329 1329 def __init__(self, ui, name, rawloc=None, suboptions=None):
1330 1330 """Construct a path from its config options.
1331 1331
1332 1332 ``ui`` is the ``ui`` instance the path is coming from.
1333 1333 ``name`` is the symbolic name of the path.
1334 1334 ``rawloc`` is the raw location, as defined in the config.
1335 1335 ``pushloc`` is the raw locations pushes should be made to.
1336 1336
1337 1337 If ``name`` is not defined, we require that the location be a) a local
1338 1338 filesystem path with a .hg directory or b) a URL. If not,
1339 1339 ``ValueError`` is raised.
1340 1340 """
1341 1341 if not rawloc:
1342 1342 raise ValueError('rawloc must be defined')
1343 1343
1344 1344 # Locations may define branches via syntax <base>#<branch>.
1345 1345 u = util.url(rawloc)
1346 1346 branch = None
1347 1347 if u.fragment:
1348 1348 branch = u.fragment
1349 1349 u.fragment = None
1350 1350
1351 1351 self.url = u
1352 1352 self.branch = branch
1353 1353
1354 1354 self.name = name
1355 1355 self.rawloc = rawloc
1356 1356 self.loc = str(u)
1357 1357
1358 1358 # When given a raw location but not a symbolic name, validate the
1359 1359 # location is valid.
1360 1360 if not name and not u.scheme and not self._isvalidlocalpath(self.loc):
1361 1361 raise ValueError('location is not a URL or path to a local '
1362 1362 'repo: %s' % rawloc)
1363 1363
1364 1364 suboptions = suboptions or {}
1365 1365
1366 1366 # Now process the sub-options. If a sub-option is registered, its
1367 1367 # attribute will always be present. The value will be None if there
1368 1368 # was no valid sub-option.
1369 1369 for suboption, (attr, func) in _pathsuboptions.iteritems():
1370 1370 if suboption not in suboptions:
1371 1371 setattr(self, attr, None)
1372 1372 continue
1373 1373
1374 1374 value = func(ui, self, suboptions[suboption])
1375 1375 setattr(self, attr, value)
1376 1376
1377 1377 def _isvalidlocalpath(self, path):
1378 1378 """Returns True if the given path is a potentially valid repository.
1379 1379 This is its own function so that extensions can change the definition of
1380 1380 'valid' in this case (like when pulling from a git repo into a hg
1381 1381 one)."""
1382 1382 return os.path.isdir(os.path.join(path, '.hg'))
1383 1383
1384 1384 @property
1385 1385 def suboptions(self):
1386 1386 """Return sub-options and their values for this path.
1387 1387
1388 1388 This is intended to be used for presentation purposes.
1389 1389 """
1390 1390 d = {}
1391 1391 for subopt, (attr, _func) in _pathsuboptions.iteritems():
1392 1392 value = getattr(self, attr)
1393 1393 if value is not None:
1394 1394 d[subopt] = value
1395 1395 return d
1396 1396
1397 1397 # we instantiate one globally shared progress bar to avoid
1398 1398 # competing progress bars when multiple UI objects get created
1399 1399 _progresssingleton = None
1400 1400
1401 1401 def getprogbar(ui):
1402 1402 global _progresssingleton
1403 1403 if _progresssingleton is None:
1404 1404 # passing 'ui' object to the singleton is fishy,
1405 1405 # this is how the extension used to work but feel free to rework it.
1406 1406 _progresssingleton = progress.progbar(ui)
1407 1407 return _progresssingleton
@@ -1,3246 +1,3246 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import collections
21 21 import datetime
22 22 import errno
23 23 import gc
24 24 import hashlib
25 25 import imp
26 26 import os
27 27 import platform as pyplatform
28 28 import re as remod
29 29 import shutil
30 30 import signal
31 31 import socket
32 32 import stat
33 33 import string
34 34 import subprocess
35 35 import sys
36 36 import tempfile
37 37 import textwrap
38 38 import time
39 39 import traceback
40 40 import zlib
41 41
42 42 from . import (
43 43 encoding,
44 44 error,
45 45 i18n,
46 46 osutil,
47 47 parsers,
48 48 pycompat,
49 49 )
50 50
51 51 empty = pycompat.empty
52 52 httplib = pycompat.httplib
53 53 httpserver = pycompat.httpserver
54 54 pickle = pycompat.pickle
55 55 queue = pycompat.queue
56 56 socketserver = pycompat.socketserver
57 57 stderr = pycompat.stderr
58 58 stdin = pycompat.stdin
59 59 stdout = pycompat.stdout
60 60 stringio = pycompat.stringio
61 61 urlerr = pycompat.urlerr
62 62 urlparse = pycompat.urlparse
63 63 urlreq = pycompat.urlreq
64 64 xmlrpclib = pycompat.xmlrpclib
65 65
66 66 if os.name == 'nt':
67 67 from . import windows as platform
68 68 stdout = platform.winstdout(pycompat.stdout)
69 69 else:
70 70 from . import posix as platform
71 71
72 72 _ = i18n._
73 73
74 74 bindunixsocket = platform.bindunixsocket
75 75 cachestat = platform.cachestat
76 76 checkexec = platform.checkexec
77 77 checklink = platform.checklink
78 78 copymode = platform.copymode
79 79 executablepath = platform.executablepath
80 80 expandglobs = platform.expandglobs
81 81 explainexit = platform.explainexit
82 82 findexe = platform.findexe
83 83 gethgcmd = platform.gethgcmd
84 84 getuser = platform.getuser
85 85 getpid = os.getpid
86 86 groupmembers = platform.groupmembers
87 87 groupname = platform.groupname
88 88 hidewindow = platform.hidewindow
89 89 isexec = platform.isexec
90 90 isowner = platform.isowner
91 91 localpath = platform.localpath
92 92 lookupreg = platform.lookupreg
93 93 makedir = platform.makedir
94 94 nlinks = platform.nlinks
95 95 normpath = platform.normpath
96 96 normcase = platform.normcase
97 97 normcasespec = platform.normcasespec
98 98 normcasefallback = platform.normcasefallback
99 99 openhardlinks = platform.openhardlinks
100 100 oslink = platform.oslink
101 101 parsepatchoutput = platform.parsepatchoutput
102 102 pconvert = platform.pconvert
103 103 poll = platform.poll
104 104 popen = platform.popen
105 105 posixfile = platform.posixfile
106 106 quotecommand = platform.quotecommand
107 107 readpipe = platform.readpipe
108 108 rename = platform.rename
109 109 removedirs = platform.removedirs
110 110 samedevice = platform.samedevice
111 111 samefile = platform.samefile
112 112 samestat = platform.samestat
113 113 setbinary = platform.setbinary
114 114 setflags = platform.setflags
115 115 setsignalhandler = platform.setsignalhandler
116 116 shellquote = platform.shellquote
117 117 spawndetached = platform.spawndetached
118 118 split = platform.split
119 119 sshargs = platform.sshargs
120 120 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
121 121 statisexec = platform.statisexec
122 122 statislink = platform.statislink
123 123 testpid = platform.testpid
124 124 umask = platform.umask
125 125 unlink = platform.unlink
126 126 unlinkpath = platform.unlinkpath
127 127 username = platform.username
128 128
129 129 # Python compatibility
130 130
131 131 _notset = object()
132 132
133 133 # disable Python's problematic floating point timestamps (issue4836)
134 134 # (Python hypocritically says you shouldn't change this behavior in
135 135 # libraries, and sure enough Mercurial is not a library.)
136 136 os.stat_float_times(False)
137 137
138 138 def safehasattr(thing, attr):
139 139 return getattr(thing, attr, _notset) is not _notset
140 140
141 141 DIGESTS = {
142 142 'md5': hashlib.md5,
143 143 'sha1': hashlib.sha1,
144 144 'sha512': hashlib.sha512,
145 145 }
146 146 # List of digest types from strongest to weakest
147 147 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
148 148
149 149 for k in DIGESTS_BY_STRENGTH:
150 150 assert k in DIGESTS
151 151
152 152 class digester(object):
153 153 """helper to compute digests.
154 154
155 155 This helper can be used to compute one or more digests given their name.
156 156
157 157 >>> d = digester(['md5', 'sha1'])
158 158 >>> d.update('foo')
159 159 >>> [k for k in sorted(d)]
160 160 ['md5', 'sha1']
161 161 >>> d['md5']
162 162 'acbd18db4cc2f85cedef654fccc4a4d8'
163 163 >>> d['sha1']
164 164 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
165 165 >>> digester.preferred(['md5', 'sha1'])
166 166 'sha1'
167 167 """
168 168
169 169 def __init__(self, digests, s=''):
170 170 self._hashes = {}
171 171 for k in digests:
172 172 if k not in DIGESTS:
173 173 raise Abort(_('unknown digest type: %s') % k)
174 174 self._hashes[k] = DIGESTS[k]()
175 175 if s:
176 176 self.update(s)
177 177
178 178 def update(self, data):
179 179 for h in self._hashes.values():
180 180 h.update(data)
181 181
182 182 def __getitem__(self, key):
183 183 if key not in DIGESTS:
184 184 raise Abort(_('unknown digest type: %s') % k)
185 185 return self._hashes[key].hexdigest()
186 186
187 187 def __iter__(self):
188 188 return iter(self._hashes)
189 189
190 190 @staticmethod
191 191 def preferred(supported):
192 192 """returns the strongest digest type in both supported and DIGESTS."""
193 193
194 194 for k in DIGESTS_BY_STRENGTH:
195 195 if k in supported:
196 196 return k
197 197 return None
198 198
199 199 class digestchecker(object):
200 200 """file handle wrapper that additionally checks content against a given
201 201 size and digests.
202 202
203 203 d = digestchecker(fh, size, {'md5': '...'})
204 204
205 205 When multiple digests are given, all of them are validated.
206 206 """
207 207
208 208 def __init__(self, fh, size, digests):
209 209 self._fh = fh
210 210 self._size = size
211 211 self._got = 0
212 212 self._digests = dict(digests)
213 213 self._digester = digester(self._digests.keys())
214 214
215 215 def read(self, length=-1):
216 216 content = self._fh.read(length)
217 217 self._digester.update(content)
218 218 self._got += len(content)
219 219 return content
220 220
221 221 def validate(self):
222 222 if self._size != self._got:
223 223 raise Abort(_('size mismatch: expected %d, got %d') %
224 224 (self._size, self._got))
225 225 for k, v in self._digests.items():
226 226 if v != self._digester[k]:
227 227 # i18n: first parameter is a digest name
228 228 raise Abort(_('%s mismatch: expected %s, got %s') %
229 229 (k, v, self._digester[k]))
230 230
231 231 try:
232 232 buffer = buffer
233 233 except NameError:
234 234 if not pycompat.ispy3:
235 235 def buffer(sliceable, offset=0):
236 236 return sliceable[offset:]
237 237 else:
238 238 def buffer(sliceable, offset=0):
239 239 return memoryview(sliceable)[offset:]
240 240
241 241 closefds = os.name == 'posix'
242 242
243 243 _chunksize = 4096
244 244
245 245 class bufferedinputpipe(object):
246 246 """a manually buffered input pipe
247 247
248 248 Python will not let us use buffered IO and lazy reading with 'polling' at
249 249 the same time. We cannot probe the buffer state and select will not detect
250 250 that data are ready to read if they are already buffered.
251 251
252 252 This class let us work around that by implementing its own buffering
253 253 (allowing efficient readline) while offering a way to know if the buffer is
254 254 empty from the output (allowing collaboration of the buffer with polling).
255 255
256 256 This class lives in the 'util' module because it makes use of the 'os'
257 257 module from the python stdlib.
258 258 """
259 259
260 260 def __init__(self, input):
261 261 self._input = input
262 262 self._buffer = []
263 263 self._eof = False
264 264 self._lenbuf = 0
265 265
266 266 @property
267 267 def hasbuffer(self):
268 268 """True is any data is currently buffered
269 269
270 270 This will be used externally a pre-step for polling IO. If there is
271 271 already data then no polling should be set in place."""
272 272 return bool(self._buffer)
273 273
274 274 @property
275 275 def closed(self):
276 276 return self._input.closed
277 277
278 278 def fileno(self):
279 279 return self._input.fileno()
280 280
281 281 def close(self):
282 282 return self._input.close()
283 283
284 284 def read(self, size):
285 285 while (not self._eof) and (self._lenbuf < size):
286 286 self._fillbuffer()
287 287 return self._frombuffer(size)
288 288
289 289 def readline(self, *args, **kwargs):
290 290 if 1 < len(self._buffer):
291 291 # this should not happen because both read and readline end with a
292 292 # _frombuffer call that collapse it.
293 293 self._buffer = [''.join(self._buffer)]
294 294 self._lenbuf = len(self._buffer[0])
295 295 lfi = -1
296 296 if self._buffer:
297 297 lfi = self._buffer[-1].find('\n')
298 298 while (not self._eof) and lfi < 0:
299 299 self._fillbuffer()
300 300 if self._buffer:
301 301 lfi = self._buffer[-1].find('\n')
302 302 size = lfi + 1
303 303 if lfi < 0: # end of file
304 304 size = self._lenbuf
305 305 elif 1 < len(self._buffer):
306 306 # we need to take previous chunks into account
307 307 size += self._lenbuf - len(self._buffer[-1])
308 308 return self._frombuffer(size)
309 309
310 310 def _frombuffer(self, size):
311 311 """return at most 'size' data from the buffer
312 312
313 313 The data are removed from the buffer."""
314 314 if size == 0 or not self._buffer:
315 315 return ''
316 316 buf = self._buffer[0]
317 317 if 1 < len(self._buffer):
318 318 buf = ''.join(self._buffer)
319 319
320 320 data = buf[:size]
321 321 buf = buf[len(data):]
322 322 if buf:
323 323 self._buffer = [buf]
324 324 self._lenbuf = len(buf)
325 325 else:
326 326 self._buffer = []
327 327 self._lenbuf = 0
328 328 return data
329 329
330 330 def _fillbuffer(self):
331 331 """read data to the buffer"""
332 332 data = os.read(self._input.fileno(), _chunksize)
333 333 if not data:
334 334 self._eof = True
335 335 else:
336 336 self._lenbuf += len(data)
337 337 self._buffer.append(data)
338 338
339 339 def popen2(cmd, env=None, newlines=False):
340 340 # Setting bufsize to -1 lets the system decide the buffer size.
341 341 # The default for bufsize is 0, meaning unbuffered. This leads to
342 342 # poor performance on Mac OS X: http://bugs.python.org/issue4194
343 343 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
344 344 close_fds=closefds,
345 345 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
346 346 universal_newlines=newlines,
347 347 env=env)
348 348 return p.stdin, p.stdout
349 349
350 350 def popen3(cmd, env=None, newlines=False):
351 351 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
352 352 return stdin, stdout, stderr
353 353
354 354 def popen4(cmd, env=None, newlines=False, bufsize=-1):
355 355 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
356 356 close_fds=closefds,
357 357 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
358 358 stderr=subprocess.PIPE,
359 359 universal_newlines=newlines,
360 360 env=env)
361 361 return p.stdin, p.stdout, p.stderr, p
362 362
363 363 def version():
364 364 """Return version information if available."""
365 365 try:
366 366 from . import __version__
367 367 return __version__.version
368 368 except ImportError:
369 369 return 'unknown'
370 370
371 371 def versiontuple(v=None, n=4):
372 372 """Parses a Mercurial version string into an N-tuple.
373 373
374 374 The version string to be parsed is specified with the ``v`` argument.
375 375 If it isn't defined, the current Mercurial version string will be parsed.
376 376
377 377 ``n`` can be 2, 3, or 4. Here is how some version strings map to
378 378 returned values:
379 379
380 380 >>> v = '3.6.1+190-df9b73d2d444'
381 381 >>> versiontuple(v, 2)
382 382 (3, 6)
383 383 >>> versiontuple(v, 3)
384 384 (3, 6, 1)
385 385 >>> versiontuple(v, 4)
386 386 (3, 6, 1, '190-df9b73d2d444')
387 387
388 388 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
389 389 (3, 6, 1, '190-df9b73d2d444+20151118')
390 390
391 391 >>> v = '3.6'
392 392 >>> versiontuple(v, 2)
393 393 (3, 6)
394 394 >>> versiontuple(v, 3)
395 395 (3, 6, None)
396 396 >>> versiontuple(v, 4)
397 397 (3, 6, None, None)
398 398
399 399 >>> v = '3.9-rc'
400 400 >>> versiontuple(v, 2)
401 401 (3, 9)
402 402 >>> versiontuple(v, 3)
403 403 (3, 9, None)
404 404 >>> versiontuple(v, 4)
405 405 (3, 9, None, 'rc')
406 406
407 407 >>> v = '3.9-rc+2-02a8fea4289b'
408 408 >>> versiontuple(v, 2)
409 409 (3, 9)
410 410 >>> versiontuple(v, 3)
411 411 (3, 9, None)
412 412 >>> versiontuple(v, 4)
413 413 (3, 9, None, 'rc+2-02a8fea4289b')
414 414 """
415 415 if not v:
416 416 v = version()
417 417 parts = remod.split('[\+-]', v, 1)
418 418 if len(parts) == 1:
419 419 vparts, extra = parts[0], None
420 420 else:
421 421 vparts, extra = parts
422 422
423 423 vints = []
424 424 for i in vparts.split('.'):
425 425 try:
426 426 vints.append(int(i))
427 427 except ValueError:
428 428 break
429 429 # (3, 6) -> (3, 6, None)
430 430 while len(vints) < 3:
431 431 vints.append(None)
432 432
433 433 if n == 2:
434 434 return (vints[0], vints[1])
435 435 if n == 3:
436 436 return (vints[0], vints[1], vints[2])
437 437 if n == 4:
438 438 return (vints[0], vints[1], vints[2], extra)
439 439
440 440 # used by parsedate
441 441 defaultdateformats = (
442 442 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
443 443 '%Y-%m-%dT%H:%M', # without seconds
444 444 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
445 445 '%Y-%m-%dT%H%M', # without seconds
446 446 '%Y-%m-%d %H:%M:%S', # our common legal variant
447 447 '%Y-%m-%d %H:%M', # without seconds
448 448 '%Y-%m-%d %H%M%S', # without :
449 449 '%Y-%m-%d %H%M', # without seconds
450 450 '%Y-%m-%d %I:%M:%S%p',
451 451 '%Y-%m-%d %H:%M',
452 452 '%Y-%m-%d %I:%M%p',
453 453 '%Y-%m-%d',
454 454 '%m-%d',
455 455 '%m/%d',
456 456 '%m/%d/%y',
457 457 '%m/%d/%Y',
458 458 '%a %b %d %H:%M:%S %Y',
459 459 '%a %b %d %I:%M:%S%p %Y',
460 460 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
461 461 '%b %d %H:%M:%S %Y',
462 462 '%b %d %I:%M:%S%p %Y',
463 463 '%b %d %H:%M:%S',
464 464 '%b %d %I:%M:%S%p',
465 465 '%b %d %H:%M',
466 466 '%b %d %I:%M%p',
467 467 '%b %d %Y',
468 468 '%b %d',
469 469 '%H:%M:%S',
470 470 '%I:%M:%S%p',
471 471 '%H:%M',
472 472 '%I:%M%p',
473 473 )
474 474
475 475 extendeddateformats = defaultdateformats + (
476 476 "%Y",
477 477 "%Y-%m",
478 478 "%b",
479 479 "%b %Y",
480 480 )
481 481
482 482 def cachefunc(func):
483 483 '''cache the result of function calls'''
484 484 # XXX doesn't handle keywords args
485 485 if func.__code__.co_argcount == 0:
486 486 cache = []
487 487 def f():
488 488 if len(cache) == 0:
489 489 cache.append(func())
490 490 return cache[0]
491 491 return f
492 492 cache = {}
493 493 if func.__code__.co_argcount == 1:
494 494 # we gain a small amount of time because
495 495 # we don't need to pack/unpack the list
496 496 def f(arg):
497 497 if arg not in cache:
498 498 cache[arg] = func(arg)
499 499 return cache[arg]
500 500 else:
501 501 def f(*args):
502 502 if args not in cache:
503 503 cache[args] = func(*args)
504 504 return cache[args]
505 505
506 506 return f
507 507
508 508 class sortdict(dict):
509 509 '''a simple sorted dictionary'''
510 510 def __init__(self, data=None):
511 511 self._list = []
512 512 if data:
513 513 self.update(data)
514 514 def copy(self):
515 515 return sortdict(self)
516 516 def __setitem__(self, key, val):
517 517 if key in self:
518 518 self._list.remove(key)
519 519 self._list.append(key)
520 520 dict.__setitem__(self, key, val)
521 521 def __iter__(self):
522 522 return self._list.__iter__()
523 523 def update(self, src):
524 524 if isinstance(src, dict):
525 525 src = src.iteritems()
526 526 for k, v in src:
527 527 self[k] = v
528 528 def clear(self):
529 529 dict.clear(self)
530 530 self._list = []
531 531 def items(self):
532 532 return [(k, self[k]) for k in self._list]
533 533 def __delitem__(self, key):
534 534 dict.__delitem__(self, key)
535 535 self._list.remove(key)
536 536 def pop(self, key, *args, **kwargs):
537 537 dict.pop(self, key, *args, **kwargs)
538 538 try:
539 539 self._list.remove(key)
540 540 except ValueError:
541 541 pass
542 542 def keys(self):
543 543 return self._list
544 544 def iterkeys(self):
545 545 return self._list.__iter__()
546 546 def iteritems(self):
547 547 for k in self._list:
548 548 yield k, self[k]
549 549 def insert(self, index, key, val):
550 550 self._list.insert(index, key)
551 551 dict.__setitem__(self, key, val)
552 552 def __repr__(self):
553 553 if not self:
554 554 return '%s()' % self.__class__.__name__
555 555 return '%s(%r)' % (self.__class__.__name__, self.items())
556 556
557 557 class _lrucachenode(object):
558 558 """A node in a doubly linked list.
559 559
560 560 Holds a reference to nodes on either side as well as a key-value
561 561 pair for the dictionary entry.
562 562 """
563 563 __slots__ = (u'next', u'prev', u'key', u'value')
564 564
565 565 def __init__(self):
566 566 self.next = None
567 567 self.prev = None
568 568
569 569 self.key = _notset
570 570 self.value = None
571 571
572 572 def markempty(self):
573 573 """Mark the node as emptied."""
574 574 self.key = _notset
575 575
576 576 class lrucachedict(object):
577 577 """Dict that caches most recent accesses and sets.
578 578
579 579 The dict consists of an actual backing dict - indexed by original
580 580 key - and a doubly linked circular list defining the order of entries in
581 581 the cache.
582 582
583 583 The head node is the newest entry in the cache. If the cache is full,
584 584 we recycle head.prev and make it the new head. Cache accesses result in
585 585 the node being moved to before the existing head and being marked as the
586 586 new head node.
587 587 """
588 588 def __init__(self, max):
589 589 self._cache = {}
590 590
591 591 self._head = head = _lrucachenode()
592 592 head.prev = head
593 593 head.next = head
594 594 self._size = 1
595 595 self._capacity = max
596 596
597 597 def __len__(self):
598 598 return len(self._cache)
599 599
600 600 def __contains__(self, k):
601 601 return k in self._cache
602 602
603 603 def __iter__(self):
604 604 # We don't have to iterate in cache order, but why not.
605 605 n = self._head
606 606 for i in range(len(self._cache)):
607 607 yield n.key
608 608 n = n.next
609 609
610 610 def __getitem__(self, k):
611 611 node = self._cache[k]
612 612 self._movetohead(node)
613 613 return node.value
614 614
615 615 def __setitem__(self, k, v):
616 616 node = self._cache.get(k)
617 617 # Replace existing value and mark as newest.
618 618 if node is not None:
619 619 node.value = v
620 620 self._movetohead(node)
621 621 return
622 622
623 623 if self._size < self._capacity:
624 624 node = self._addcapacity()
625 625 else:
626 626 # Grab the last/oldest item.
627 627 node = self._head.prev
628 628
629 629 # At capacity. Kill the old entry.
630 630 if node.key is not _notset:
631 631 del self._cache[node.key]
632 632
633 633 node.key = k
634 634 node.value = v
635 635 self._cache[k] = node
636 636 # And mark it as newest entry. No need to adjust order since it
637 637 # is already self._head.prev.
638 638 self._head = node
639 639
640 640 def __delitem__(self, k):
641 641 node = self._cache.pop(k)
642 642 node.markempty()
643 643
644 644 # Temporarily mark as newest item before re-adjusting head to make
645 645 # this node the oldest item.
646 646 self._movetohead(node)
647 647 self._head = node.next
648 648
649 649 # Additional dict methods.
650 650
651 651 def get(self, k, default=None):
652 652 try:
653 653 return self._cache[k].value
654 654 except KeyError:
655 655 return default
656 656
657 657 def clear(self):
658 658 n = self._head
659 659 while n.key is not _notset:
660 660 n.markempty()
661 661 n = n.next
662 662
663 663 self._cache.clear()
664 664
665 665 def copy(self):
666 666 result = lrucachedict(self._capacity)
667 667 n = self._head.prev
668 668 # Iterate in oldest-to-newest order, so the copy has the right ordering
669 669 for i in range(len(self._cache)):
670 670 result[n.key] = n.value
671 671 n = n.prev
672 672 return result
673 673
674 674 def _movetohead(self, node):
675 675 """Mark a node as the newest, making it the new head.
676 676
677 677 When a node is accessed, it becomes the freshest entry in the LRU
678 678 list, which is denoted by self._head.
679 679
680 680 Visually, let's make ``N`` the new head node (* denotes head):
681 681
682 682 previous/oldest <-> head <-> next/next newest
683 683
684 684 ----<->--- A* ---<->-----
685 685 | |
686 686 E <-> D <-> N <-> C <-> B
687 687
688 688 To:
689 689
690 690 ----<->--- N* ---<->-----
691 691 | |
692 692 E <-> D <-> C <-> B <-> A
693 693
694 694 This requires the following moves:
695 695
696 696 C.next = D (node.prev.next = node.next)
697 697 D.prev = C (node.next.prev = node.prev)
698 698 E.next = N (head.prev.next = node)
699 699 N.prev = E (node.prev = head.prev)
700 700 N.next = A (node.next = head)
701 701 A.prev = N (head.prev = node)
702 702 """
703 703 head = self._head
704 704 # C.next = D
705 705 node.prev.next = node.next
706 706 # D.prev = C
707 707 node.next.prev = node.prev
708 708 # N.prev = E
709 709 node.prev = head.prev
710 710 # N.next = A
711 711 # It is tempting to do just "head" here, however if node is
712 712 # adjacent to head, this will do bad things.
713 713 node.next = head.prev.next
714 714 # E.next = N
715 715 node.next.prev = node
716 716 # A.prev = N
717 717 node.prev.next = node
718 718
719 719 self._head = node
720 720
721 721 def _addcapacity(self):
722 722 """Add a node to the circular linked list.
723 723
724 724 The new node is inserted before the head node.
725 725 """
726 726 head = self._head
727 727 node = _lrucachenode()
728 728 head.prev.next = node
729 729 node.prev = head.prev
730 730 node.next = head
731 731 head.prev = node
732 732 self._size += 1
733 733 return node
734 734
735 735 def lrucachefunc(func):
736 736 '''cache most recent results of function calls'''
737 737 cache = {}
738 738 order = collections.deque()
739 739 if func.__code__.co_argcount == 1:
740 740 def f(arg):
741 741 if arg not in cache:
742 742 if len(cache) > 20:
743 743 del cache[order.popleft()]
744 744 cache[arg] = func(arg)
745 745 else:
746 746 order.remove(arg)
747 747 order.append(arg)
748 748 return cache[arg]
749 749 else:
750 750 def f(*args):
751 751 if args not in cache:
752 752 if len(cache) > 20:
753 753 del cache[order.popleft()]
754 754 cache[args] = func(*args)
755 755 else:
756 756 order.remove(args)
757 757 order.append(args)
758 758 return cache[args]
759 759
760 760 return f
761 761
762 762 class propertycache(object):
763 763 def __init__(self, func):
764 764 self.func = func
765 765 self.name = func.__name__
766 766 def __get__(self, obj, type=None):
767 767 result = self.func(obj)
768 768 self.cachevalue(obj, result)
769 769 return result
770 770
771 771 def cachevalue(self, obj, value):
772 772 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
773 773 obj.__dict__[self.name] = value
774 774
775 775 def pipefilter(s, cmd):
776 776 '''filter string S through command CMD, returning its output'''
777 777 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
778 778 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
779 779 pout, perr = p.communicate(s)
780 780 return pout
781 781
782 782 def tempfilter(s, cmd):
783 783 '''filter string S through a pair of temporary files with CMD.
784 784 CMD is used as a template to create the real command to be run,
785 785 with the strings INFILE and OUTFILE replaced by the real names of
786 786 the temporary files generated.'''
787 787 inname, outname = None, None
788 788 try:
789 789 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
790 790 fp = os.fdopen(infd, 'wb')
791 791 fp.write(s)
792 792 fp.close()
793 793 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
794 794 os.close(outfd)
795 795 cmd = cmd.replace('INFILE', inname)
796 796 cmd = cmd.replace('OUTFILE', outname)
797 797 code = os.system(cmd)
798 798 if sys.platform == 'OpenVMS' and code & 1:
799 799 code = 0
800 800 if code:
801 801 raise Abort(_("command '%s' failed: %s") %
802 802 (cmd, explainexit(code)))
803 803 return readfile(outname)
804 804 finally:
805 805 try:
806 806 if inname:
807 807 os.unlink(inname)
808 808 except OSError:
809 809 pass
810 810 try:
811 811 if outname:
812 812 os.unlink(outname)
813 813 except OSError:
814 814 pass
815 815
816 816 filtertable = {
817 817 'tempfile:': tempfilter,
818 818 'pipe:': pipefilter,
819 819 }
820 820
821 821 def filter(s, cmd):
822 822 "filter a string through a command that transforms its input to its output"
823 823 for name, fn in filtertable.iteritems():
824 824 if cmd.startswith(name):
825 825 return fn(s, cmd[len(name):].lstrip())
826 826 return pipefilter(s, cmd)
827 827
828 828 def binary(s):
829 829 """return true if a string is binary data"""
830 830 return bool(s and '\0' in s)
831 831
832 832 def increasingchunks(source, min=1024, max=65536):
833 833 '''return no less than min bytes per chunk while data remains,
834 834 doubling min after each chunk until it reaches max'''
835 835 def log2(x):
836 836 if not x:
837 837 return 0
838 838 i = 0
839 839 while x:
840 840 x >>= 1
841 841 i += 1
842 842 return i - 1
843 843
844 844 buf = []
845 845 blen = 0
846 846 for chunk in source:
847 847 buf.append(chunk)
848 848 blen += len(chunk)
849 849 if blen >= min:
850 850 if min < max:
851 851 min = min << 1
852 852 nmin = 1 << log2(blen)
853 853 if nmin > min:
854 854 min = nmin
855 855 if min > max:
856 856 min = max
857 857 yield ''.join(buf)
858 858 blen = 0
859 859 buf = []
860 860 if buf:
861 861 yield ''.join(buf)
862 862
863 863 Abort = error.Abort
864 864
865 865 def always(fn):
866 866 return True
867 867
868 868 def never(fn):
869 869 return False
870 870
871 871 def nogc(func):
872 872 """disable garbage collector
873 873
874 874 Python's garbage collector triggers a GC each time a certain number of
875 875 container objects (the number being defined by gc.get_threshold()) are
876 876 allocated even when marked not to be tracked by the collector. Tracking has
877 877 no effect on when GCs are triggered, only on what objects the GC looks
878 878 into. As a workaround, disable GC while building complex (huge)
879 879 containers.
880 880
881 881 This garbage collector issue have been fixed in 2.7.
882 882 """
883 883 if sys.version_info >= (2, 7):
884 884 return func
885 885 def wrapper(*args, **kwargs):
886 886 gcenabled = gc.isenabled()
887 887 gc.disable()
888 888 try:
889 889 return func(*args, **kwargs)
890 890 finally:
891 891 if gcenabled:
892 892 gc.enable()
893 893 return wrapper
894 894
895 895 def pathto(root, n1, n2):
896 896 '''return the relative path from one place to another.
897 897 root should use os.sep to separate directories
898 898 n1 should use os.sep to separate directories
899 899 n2 should use "/" to separate directories
900 900 returns an os.sep-separated path.
901 901
902 902 If n1 is a relative path, it's assumed it's
903 903 relative to root.
904 904 n2 should always be relative to root.
905 905 '''
906 906 if not n1:
907 907 return localpath(n2)
908 908 if os.path.isabs(n1):
909 909 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
910 910 return os.path.join(root, localpath(n2))
911 911 n2 = '/'.join((pconvert(root), n2))
912 912 a, b = splitpath(n1), n2.split('/')
913 913 a.reverse()
914 914 b.reverse()
915 915 while a and b and a[-1] == b[-1]:
916 916 a.pop()
917 917 b.pop()
918 918 b.reverse()
919 919 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
920 920
921 921 def mainfrozen():
922 922 """return True if we are a frozen executable.
923 923
924 924 The code supports py2exe (most common, Windows only) and tools/freeze
925 925 (portable, not much used).
926 926 """
927 927 return (safehasattr(sys, "frozen") or # new py2exe
928 928 safehasattr(sys, "importers") or # old py2exe
929 929 imp.is_frozen(u"__main__")) # tools/freeze
930 930
931 931 # the location of data files matching the source code
932 932 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
933 933 # executable version (py2exe) doesn't support __file__
934 934 datapath = os.path.dirname(sys.executable)
935 935 else:
936 936 datapath = os.path.dirname(__file__)
937 937
938 938 if not isinstance(datapath, bytes):
939 939 datapath = pycompat.fsencode(datapath)
940 940
941 941 i18n.setdatapath(datapath)
942 942
943 943 _hgexecutable = None
944 944
945 945 def hgexecutable():
946 946 """return location of the 'hg' executable.
947 947
948 948 Defaults to $HG or 'hg' in the search path.
949 949 """
950 950 if _hgexecutable is None:
951 hg = os.environ.get('HG')
951 hg = encoding.environ.get('HG')
952 952 mainmod = sys.modules['__main__']
953 953 if hg:
954 954 _sethgexecutable(hg)
955 955 elif mainfrozen():
956 956 if getattr(sys, 'frozen', None) == 'macosx_app':
957 957 # Env variable set by py2app
958 _sethgexecutable(os.environ['EXECUTABLEPATH'])
958 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
959 959 else:
960 960 _sethgexecutable(sys.executable)
961 961 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
962 962 _sethgexecutable(mainmod.__file__)
963 963 else:
964 964 exe = findexe('hg') or os.path.basename(sys.argv[0])
965 965 _sethgexecutable(exe)
966 966 return _hgexecutable
967 967
968 968 def _sethgexecutable(path):
969 969 """set location of the 'hg' executable"""
970 970 global _hgexecutable
971 971 _hgexecutable = path
972 972
973 973 def _isstdout(f):
974 974 fileno = getattr(f, 'fileno', None)
975 975 return fileno and fileno() == sys.__stdout__.fileno()
976 976
977 977 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
978 978 '''enhanced shell command execution.
979 979 run with environment maybe modified, maybe in different dir.
980 980
981 981 if command fails and onerr is None, return status, else raise onerr
982 982 object as exception.
983 983
984 984 if out is specified, it is assumed to be a file-like object that has a
985 985 write() method. stdout and stderr will be redirected to out.'''
986 986 if environ is None:
987 987 environ = {}
988 988 try:
989 989 stdout.flush()
990 990 except Exception:
991 991 pass
992 992 def py2shell(val):
993 993 'convert python object into string that is useful to shell'
994 994 if val is None or val is False:
995 995 return '0'
996 996 if val is True:
997 997 return '1'
998 998 return str(val)
999 999 origcmd = cmd
1000 1000 cmd = quotecommand(cmd)
1001 1001 if sys.platform == 'plan9' and (sys.version_info[0] == 2
1002 1002 and sys.version_info[1] < 7):
1003 1003 # subprocess kludge to work around issues in half-baked Python
1004 1004 # ports, notably bichued/python:
1005 1005 if not cwd is None:
1006 1006 os.chdir(cwd)
1007 1007 rc = os.system(cmd)
1008 1008 else:
1009 env = dict(os.environ)
1009 env = dict(encoding.environ)
1010 1010 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1011 1011 env['HG'] = hgexecutable()
1012 1012 if out is None or _isstdout(out):
1013 1013 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1014 1014 env=env, cwd=cwd)
1015 1015 else:
1016 1016 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1017 1017 env=env, cwd=cwd, stdout=subprocess.PIPE,
1018 1018 stderr=subprocess.STDOUT)
1019 1019 for line in iter(proc.stdout.readline, ''):
1020 1020 out.write(line)
1021 1021 proc.wait()
1022 1022 rc = proc.returncode
1023 1023 if sys.platform == 'OpenVMS' and rc & 1:
1024 1024 rc = 0
1025 1025 if rc and onerr:
1026 1026 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1027 1027 explainexit(rc)[0])
1028 1028 if errprefix:
1029 1029 errmsg = '%s: %s' % (errprefix, errmsg)
1030 1030 raise onerr(errmsg)
1031 1031 return rc
1032 1032
1033 1033 def checksignature(func):
1034 1034 '''wrap a function with code to check for calling errors'''
1035 1035 def check(*args, **kwargs):
1036 1036 try:
1037 1037 return func(*args, **kwargs)
1038 1038 except TypeError:
1039 1039 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1040 1040 raise error.SignatureError
1041 1041 raise
1042 1042
1043 1043 return check
1044 1044
1045 1045 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1046 1046 '''copy a file, preserving mode and optionally other stat info like
1047 1047 atime/mtime
1048 1048
1049 1049 checkambig argument is used with filestat, and is useful only if
1050 1050 destination file is guarded by any lock (e.g. repo.lock or
1051 1051 repo.wlock).
1052 1052
1053 1053 copystat and checkambig should be exclusive.
1054 1054 '''
1055 1055 assert not (copystat and checkambig)
1056 1056 oldstat = None
1057 1057 if os.path.lexists(dest):
1058 1058 if checkambig:
1059 1059 oldstat = checkambig and filestat(dest)
1060 1060 unlink(dest)
1061 1061 # hardlinks are problematic on CIFS, quietly ignore this flag
1062 1062 # until we find a way to work around it cleanly (issue4546)
1063 1063 if False and hardlink:
1064 1064 try:
1065 1065 oslink(src, dest)
1066 1066 return
1067 1067 except (IOError, OSError):
1068 1068 pass # fall back to normal copy
1069 1069 if os.path.islink(src):
1070 1070 os.symlink(os.readlink(src), dest)
1071 1071 # copytime is ignored for symlinks, but in general copytime isn't needed
1072 1072 # for them anyway
1073 1073 else:
1074 1074 try:
1075 1075 shutil.copyfile(src, dest)
1076 1076 if copystat:
1077 1077 # copystat also copies mode
1078 1078 shutil.copystat(src, dest)
1079 1079 else:
1080 1080 shutil.copymode(src, dest)
1081 1081 if oldstat and oldstat.stat:
1082 1082 newstat = filestat(dest)
1083 1083 if newstat.isambig(oldstat):
1084 1084 # stat of copied file is ambiguous to original one
1085 1085 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1086 1086 os.utime(dest, (advanced, advanced))
1087 1087 except shutil.Error as inst:
1088 1088 raise Abort(str(inst))
1089 1089
1090 1090 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1091 1091 """Copy a directory tree using hardlinks if possible."""
1092 1092 num = 0
1093 1093
1094 1094 if hardlink is None:
1095 1095 hardlink = (os.stat(src).st_dev ==
1096 1096 os.stat(os.path.dirname(dst)).st_dev)
1097 1097 if hardlink:
1098 1098 topic = _('linking')
1099 1099 else:
1100 1100 topic = _('copying')
1101 1101
1102 1102 if os.path.isdir(src):
1103 1103 os.mkdir(dst)
1104 1104 for name, kind in osutil.listdir(src):
1105 1105 srcname = os.path.join(src, name)
1106 1106 dstname = os.path.join(dst, name)
1107 1107 def nprog(t, pos):
1108 1108 if pos is not None:
1109 1109 return progress(t, pos + num)
1110 1110 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1111 1111 num += n
1112 1112 else:
1113 1113 if hardlink:
1114 1114 try:
1115 1115 oslink(src, dst)
1116 1116 except (IOError, OSError):
1117 1117 hardlink = False
1118 1118 shutil.copy(src, dst)
1119 1119 else:
1120 1120 shutil.copy(src, dst)
1121 1121 num += 1
1122 1122 progress(topic, num)
1123 1123 progress(topic, None)
1124 1124
1125 1125 return hardlink, num
1126 1126
1127 1127 _winreservednames = '''con prn aux nul
1128 1128 com1 com2 com3 com4 com5 com6 com7 com8 com9
1129 1129 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1130 1130 _winreservedchars = ':*?"<>|'
1131 1131 def checkwinfilename(path):
1132 1132 r'''Check that the base-relative path is a valid filename on Windows.
1133 1133 Returns None if the path is ok, or a UI string describing the problem.
1134 1134
1135 1135 >>> checkwinfilename("just/a/normal/path")
1136 1136 >>> checkwinfilename("foo/bar/con.xml")
1137 1137 "filename contains 'con', which is reserved on Windows"
1138 1138 >>> checkwinfilename("foo/con.xml/bar")
1139 1139 "filename contains 'con', which is reserved on Windows"
1140 1140 >>> checkwinfilename("foo/bar/xml.con")
1141 1141 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1142 1142 "filename contains 'AUX', which is reserved on Windows"
1143 1143 >>> checkwinfilename("foo/bar/bla:.txt")
1144 1144 "filename contains ':', which is reserved on Windows"
1145 1145 >>> checkwinfilename("foo/bar/b\07la.txt")
1146 1146 "filename contains '\\x07', which is invalid on Windows"
1147 1147 >>> checkwinfilename("foo/bar/bla ")
1148 1148 "filename ends with ' ', which is not allowed on Windows"
1149 1149 >>> checkwinfilename("../bar")
1150 1150 >>> checkwinfilename("foo\\")
1151 1151 "filename ends with '\\', which is invalid on Windows"
1152 1152 >>> checkwinfilename("foo\\/bar")
1153 1153 "directory name ends with '\\', which is invalid on Windows"
1154 1154 '''
1155 1155 if path.endswith('\\'):
1156 1156 return _("filename ends with '\\', which is invalid on Windows")
1157 1157 if '\\/' in path:
1158 1158 return _("directory name ends with '\\', which is invalid on Windows")
1159 1159 for n in path.replace('\\', '/').split('/'):
1160 1160 if not n:
1161 1161 continue
1162 1162 for c in n:
1163 1163 if c in _winreservedchars:
1164 1164 return _("filename contains '%s', which is reserved "
1165 1165 "on Windows") % c
1166 1166 if ord(c) <= 31:
1167 1167 return _("filename contains %r, which is invalid "
1168 1168 "on Windows") % c
1169 1169 base = n.split('.')[0]
1170 1170 if base and base.lower() in _winreservednames:
1171 1171 return _("filename contains '%s', which is reserved "
1172 1172 "on Windows") % base
1173 1173 t = n[-1]
1174 1174 if t in '. ' and n not in '..':
1175 1175 return _("filename ends with '%s', which is not allowed "
1176 1176 "on Windows") % t
1177 1177
1178 1178 if os.name == 'nt':
1179 1179 checkosfilename = checkwinfilename
1180 1180 else:
1181 1181 checkosfilename = platform.checkosfilename
1182 1182
1183 1183 def makelock(info, pathname):
1184 1184 try:
1185 1185 return os.symlink(info, pathname)
1186 1186 except OSError as why:
1187 1187 if why.errno == errno.EEXIST:
1188 1188 raise
1189 1189 except AttributeError: # no symlink in os
1190 1190 pass
1191 1191
1192 1192 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1193 1193 os.write(ld, info)
1194 1194 os.close(ld)
1195 1195
1196 1196 def readlock(pathname):
1197 1197 try:
1198 1198 return os.readlink(pathname)
1199 1199 except OSError as why:
1200 1200 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1201 1201 raise
1202 1202 except AttributeError: # no symlink in os
1203 1203 pass
1204 1204 fp = posixfile(pathname)
1205 1205 r = fp.read()
1206 1206 fp.close()
1207 1207 return r
1208 1208
1209 1209 def fstat(fp):
1210 1210 '''stat file object that may not have fileno method.'''
1211 1211 try:
1212 1212 return os.fstat(fp.fileno())
1213 1213 except AttributeError:
1214 1214 return os.stat(fp.name)
1215 1215
1216 1216 # File system features
1217 1217
1218 1218 def fscasesensitive(path):
1219 1219 """
1220 1220 Return true if the given path is on a case-sensitive filesystem
1221 1221
1222 1222 Requires a path (like /foo/.hg) ending with a foldable final
1223 1223 directory component.
1224 1224 """
1225 1225 s1 = os.lstat(path)
1226 1226 d, b = os.path.split(path)
1227 1227 b2 = b.upper()
1228 1228 if b == b2:
1229 1229 b2 = b.lower()
1230 1230 if b == b2:
1231 1231 return True # no evidence against case sensitivity
1232 1232 p2 = os.path.join(d, b2)
1233 1233 try:
1234 1234 s2 = os.lstat(p2)
1235 1235 if s2 == s1:
1236 1236 return False
1237 1237 return True
1238 1238 except OSError:
1239 1239 return True
1240 1240
1241 1241 try:
1242 1242 import re2
1243 1243 _re2 = None
1244 1244 except ImportError:
1245 1245 _re2 = False
1246 1246
1247 1247 class _re(object):
1248 1248 def _checkre2(self):
1249 1249 global _re2
1250 1250 try:
1251 1251 # check if match works, see issue3964
1252 1252 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1253 1253 except ImportError:
1254 1254 _re2 = False
1255 1255
1256 1256 def compile(self, pat, flags=0):
1257 1257 '''Compile a regular expression, using re2 if possible
1258 1258
1259 1259 For best performance, use only re2-compatible regexp features. The
1260 1260 only flags from the re module that are re2-compatible are
1261 1261 IGNORECASE and MULTILINE.'''
1262 1262 if _re2 is None:
1263 1263 self._checkre2()
1264 1264 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1265 1265 if flags & remod.IGNORECASE:
1266 1266 pat = '(?i)' + pat
1267 1267 if flags & remod.MULTILINE:
1268 1268 pat = '(?m)' + pat
1269 1269 try:
1270 1270 return re2.compile(pat)
1271 1271 except re2.error:
1272 1272 pass
1273 1273 return remod.compile(pat, flags)
1274 1274
1275 1275 @propertycache
1276 1276 def escape(self):
1277 1277 '''Return the version of escape corresponding to self.compile.
1278 1278
1279 1279 This is imperfect because whether re2 or re is used for a particular
1280 1280 function depends on the flags, etc, but it's the best we can do.
1281 1281 '''
1282 1282 global _re2
1283 1283 if _re2 is None:
1284 1284 self._checkre2()
1285 1285 if _re2:
1286 1286 return re2.escape
1287 1287 else:
1288 1288 return remod.escape
1289 1289
1290 1290 re = _re()
1291 1291
1292 1292 _fspathcache = {}
1293 1293 def fspath(name, root):
1294 1294 '''Get name in the case stored in the filesystem
1295 1295
1296 1296 The name should be relative to root, and be normcase-ed for efficiency.
1297 1297
1298 1298 Note that this function is unnecessary, and should not be
1299 1299 called, for case-sensitive filesystems (simply because it's expensive).
1300 1300
1301 1301 The root should be normcase-ed, too.
1302 1302 '''
1303 1303 def _makefspathcacheentry(dir):
1304 1304 return dict((normcase(n), n) for n in os.listdir(dir))
1305 1305
1306 1306 seps = pycompat.ossep
1307 1307 if pycompat.osaltsep:
1308 1308 seps = seps + pycompat.osaltsep
1309 1309 # Protect backslashes. This gets silly very quickly.
1310 1310 seps.replace('\\','\\\\')
1311 1311 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1312 1312 dir = os.path.normpath(root)
1313 1313 result = []
1314 1314 for part, sep in pattern.findall(name):
1315 1315 if sep:
1316 1316 result.append(sep)
1317 1317 continue
1318 1318
1319 1319 if dir not in _fspathcache:
1320 1320 _fspathcache[dir] = _makefspathcacheentry(dir)
1321 1321 contents = _fspathcache[dir]
1322 1322
1323 1323 found = contents.get(part)
1324 1324 if not found:
1325 1325 # retry "once per directory" per "dirstate.walk" which
1326 1326 # may take place for each patches of "hg qpush", for example
1327 1327 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1328 1328 found = contents.get(part)
1329 1329
1330 1330 result.append(found or part)
1331 1331 dir = os.path.join(dir, part)
1332 1332
1333 1333 return ''.join(result)
1334 1334
1335 1335 def checknlink(testfile):
1336 1336 '''check whether hardlink count reporting works properly'''
1337 1337
1338 1338 # testfile may be open, so we need a separate file for checking to
1339 1339 # work around issue2543 (or testfile may get lost on Samba shares)
1340 1340 f1 = testfile + ".hgtmp1"
1341 1341 if os.path.lexists(f1):
1342 1342 return False
1343 1343 try:
1344 1344 posixfile(f1, 'w').close()
1345 1345 except IOError:
1346 1346 try:
1347 1347 os.unlink(f1)
1348 1348 except OSError:
1349 1349 pass
1350 1350 return False
1351 1351
1352 1352 f2 = testfile + ".hgtmp2"
1353 1353 fd = None
1354 1354 try:
1355 1355 oslink(f1, f2)
1356 1356 # nlinks() may behave differently for files on Windows shares if
1357 1357 # the file is open.
1358 1358 fd = posixfile(f2)
1359 1359 return nlinks(f2) > 1
1360 1360 except OSError:
1361 1361 return False
1362 1362 finally:
1363 1363 if fd is not None:
1364 1364 fd.close()
1365 1365 for f in (f1, f2):
1366 1366 try:
1367 1367 os.unlink(f)
1368 1368 except OSError:
1369 1369 pass
1370 1370
1371 1371 def endswithsep(path):
1372 1372 '''Check path ends with os.sep or os.altsep.'''
1373 1373 return (path.endswith(pycompat.ossep)
1374 1374 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1375 1375
1376 1376 def splitpath(path):
1377 1377 '''Split path by os.sep.
1378 1378 Note that this function does not use os.altsep because this is
1379 1379 an alternative of simple "xxx.split(os.sep)".
1380 1380 It is recommended to use os.path.normpath() before using this
1381 1381 function if need.'''
1382 1382 return path.split(pycompat.ossep)
1383 1383
1384 1384 def gui():
1385 1385 '''Are we running in a GUI?'''
1386 1386 if sys.platform == 'darwin':
1387 if 'SSH_CONNECTION' in os.environ:
1387 if 'SSH_CONNECTION' in encoding.environ:
1388 1388 # handle SSH access to a box where the user is logged in
1389 1389 return False
1390 1390 elif getattr(osutil, 'isgui', None):
1391 1391 # check if a CoreGraphics session is available
1392 1392 return osutil.isgui()
1393 1393 else:
1394 1394 # pure build; use a safe default
1395 1395 return True
1396 1396 else:
1397 return os.name == "nt" or os.environ.get("DISPLAY")
1397 return os.name == "nt" or encoding.environ.get("DISPLAY")
1398 1398
1399 1399 def mktempcopy(name, emptyok=False, createmode=None):
1400 1400 """Create a temporary file with the same contents from name
1401 1401
1402 1402 The permission bits are copied from the original file.
1403 1403
1404 1404 If the temporary file is going to be truncated immediately, you
1405 1405 can use emptyok=True as an optimization.
1406 1406
1407 1407 Returns the name of the temporary file.
1408 1408 """
1409 1409 d, fn = os.path.split(name)
1410 1410 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1411 1411 os.close(fd)
1412 1412 # Temporary files are created with mode 0600, which is usually not
1413 1413 # what we want. If the original file already exists, just copy
1414 1414 # its mode. Otherwise, manually obey umask.
1415 1415 copymode(name, temp, createmode)
1416 1416 if emptyok:
1417 1417 return temp
1418 1418 try:
1419 1419 try:
1420 1420 ifp = posixfile(name, "rb")
1421 1421 except IOError as inst:
1422 1422 if inst.errno == errno.ENOENT:
1423 1423 return temp
1424 1424 if not getattr(inst, 'filename', None):
1425 1425 inst.filename = name
1426 1426 raise
1427 1427 ofp = posixfile(temp, "wb")
1428 1428 for chunk in filechunkiter(ifp):
1429 1429 ofp.write(chunk)
1430 1430 ifp.close()
1431 1431 ofp.close()
1432 1432 except: # re-raises
1433 1433 try: os.unlink(temp)
1434 1434 except OSError: pass
1435 1435 raise
1436 1436 return temp
1437 1437
1438 1438 class filestat(object):
1439 1439 """help to exactly detect change of a file
1440 1440
1441 1441 'stat' attribute is result of 'os.stat()' if specified 'path'
1442 1442 exists. Otherwise, it is None. This can avoid preparative
1443 1443 'exists()' examination on client side of this class.
1444 1444 """
1445 1445 def __init__(self, path):
1446 1446 try:
1447 1447 self.stat = os.stat(path)
1448 1448 except OSError as err:
1449 1449 if err.errno != errno.ENOENT:
1450 1450 raise
1451 1451 self.stat = None
1452 1452
1453 1453 __hash__ = object.__hash__
1454 1454
1455 1455 def __eq__(self, old):
1456 1456 try:
1457 1457 # if ambiguity between stat of new and old file is
1458 1458 # avoided, comparison of size, ctime and mtime is enough
1459 1459 # to exactly detect change of a file regardless of platform
1460 1460 return (self.stat.st_size == old.stat.st_size and
1461 1461 self.stat.st_ctime == old.stat.st_ctime and
1462 1462 self.stat.st_mtime == old.stat.st_mtime)
1463 1463 except AttributeError:
1464 1464 return False
1465 1465
1466 1466 def isambig(self, old):
1467 1467 """Examine whether new (= self) stat is ambiguous against old one
1468 1468
1469 1469 "S[N]" below means stat of a file at N-th change:
1470 1470
1471 1471 - S[n-1].ctime < S[n].ctime: can detect change of a file
1472 1472 - S[n-1].ctime == S[n].ctime
1473 1473 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1474 1474 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1475 1475 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1476 1476 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1477 1477
1478 1478 Case (*2) above means that a file was changed twice or more at
1479 1479 same time in sec (= S[n-1].ctime), and comparison of timestamp
1480 1480 is ambiguous.
1481 1481
1482 1482 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1483 1483 timestamp is ambiguous".
1484 1484
1485 1485 But advancing mtime only in case (*2) doesn't work as
1486 1486 expected, because naturally advanced S[n].mtime in case (*1)
1487 1487 might be equal to manually advanced S[n-1 or earlier].mtime.
1488 1488
1489 1489 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1490 1490 treated as ambiguous regardless of mtime, to avoid overlooking
1491 1491 by confliction between such mtime.
1492 1492
1493 1493 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1494 1494 S[n].mtime", even if size of a file isn't changed.
1495 1495 """
1496 1496 try:
1497 1497 return (self.stat.st_ctime == old.stat.st_ctime)
1498 1498 except AttributeError:
1499 1499 return False
1500 1500
1501 1501 def avoidambig(self, path, old):
1502 1502 """Change file stat of specified path to avoid ambiguity
1503 1503
1504 1504 'old' should be previous filestat of 'path'.
1505 1505
1506 1506 This skips avoiding ambiguity, if a process doesn't have
1507 1507 appropriate privileges for 'path'.
1508 1508 """
1509 1509 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1510 1510 try:
1511 1511 os.utime(path, (advanced, advanced))
1512 1512 except OSError as inst:
1513 1513 if inst.errno == errno.EPERM:
1514 1514 # utime() on the file created by another user causes EPERM,
1515 1515 # if a process doesn't have appropriate privileges
1516 1516 return
1517 1517 raise
1518 1518
1519 1519 def __ne__(self, other):
1520 1520 return not self == other
1521 1521
1522 1522 class atomictempfile(object):
1523 1523 '''writable file object that atomically updates a file
1524 1524
1525 1525 All writes will go to a temporary copy of the original file. Call
1526 1526 close() when you are done writing, and atomictempfile will rename
1527 1527 the temporary copy to the original name, making the changes
1528 1528 visible. If the object is destroyed without being closed, all your
1529 1529 writes are discarded.
1530 1530
1531 1531 checkambig argument of constructor is used with filestat, and is
1532 1532 useful only if target file is guarded by any lock (e.g. repo.lock
1533 1533 or repo.wlock).
1534 1534 '''
1535 1535 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1536 1536 self.__name = name # permanent name
1537 1537 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1538 1538 createmode=createmode)
1539 1539 self._fp = posixfile(self._tempname, mode)
1540 1540 self._checkambig = checkambig
1541 1541
1542 1542 # delegated methods
1543 1543 self.read = self._fp.read
1544 1544 self.write = self._fp.write
1545 1545 self.seek = self._fp.seek
1546 1546 self.tell = self._fp.tell
1547 1547 self.fileno = self._fp.fileno
1548 1548
1549 1549 def close(self):
1550 1550 if not self._fp.closed:
1551 1551 self._fp.close()
1552 1552 filename = localpath(self.__name)
1553 1553 oldstat = self._checkambig and filestat(filename)
1554 1554 if oldstat and oldstat.stat:
1555 1555 rename(self._tempname, filename)
1556 1556 newstat = filestat(filename)
1557 1557 if newstat.isambig(oldstat):
1558 1558 # stat of changed file is ambiguous to original one
1559 1559 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1560 1560 os.utime(filename, (advanced, advanced))
1561 1561 else:
1562 1562 rename(self._tempname, filename)
1563 1563
1564 1564 def discard(self):
1565 1565 if not self._fp.closed:
1566 1566 try:
1567 1567 os.unlink(self._tempname)
1568 1568 except OSError:
1569 1569 pass
1570 1570 self._fp.close()
1571 1571
1572 1572 def __del__(self):
1573 1573 if safehasattr(self, '_fp'): # constructor actually did something
1574 1574 self.discard()
1575 1575
1576 1576 def __enter__(self):
1577 1577 return self
1578 1578
1579 1579 def __exit__(self, exctype, excvalue, traceback):
1580 1580 if exctype is not None:
1581 1581 self.discard()
1582 1582 else:
1583 1583 self.close()
1584 1584
1585 1585 def makedirs(name, mode=None, notindexed=False):
1586 1586 """recursive directory creation with parent mode inheritance
1587 1587
1588 1588 Newly created directories are marked as "not to be indexed by
1589 1589 the content indexing service", if ``notindexed`` is specified
1590 1590 for "write" mode access.
1591 1591 """
1592 1592 try:
1593 1593 makedir(name, notindexed)
1594 1594 except OSError as err:
1595 1595 if err.errno == errno.EEXIST:
1596 1596 return
1597 1597 if err.errno != errno.ENOENT or not name:
1598 1598 raise
1599 1599 parent = os.path.dirname(os.path.abspath(name))
1600 1600 if parent == name:
1601 1601 raise
1602 1602 makedirs(parent, mode, notindexed)
1603 1603 try:
1604 1604 makedir(name, notindexed)
1605 1605 except OSError as err:
1606 1606 # Catch EEXIST to handle races
1607 1607 if err.errno == errno.EEXIST:
1608 1608 return
1609 1609 raise
1610 1610 if mode is not None:
1611 1611 os.chmod(name, mode)
1612 1612
1613 1613 def readfile(path):
1614 1614 with open(path, 'rb') as fp:
1615 1615 return fp.read()
1616 1616
1617 1617 def writefile(path, text):
1618 1618 with open(path, 'wb') as fp:
1619 1619 fp.write(text)
1620 1620
1621 1621 def appendfile(path, text):
1622 1622 with open(path, 'ab') as fp:
1623 1623 fp.write(text)
1624 1624
1625 1625 class chunkbuffer(object):
1626 1626 """Allow arbitrary sized chunks of data to be efficiently read from an
1627 1627 iterator over chunks of arbitrary size."""
1628 1628
1629 1629 def __init__(self, in_iter):
1630 1630 """in_iter is the iterator that's iterating over the input chunks.
1631 1631 targetsize is how big a buffer to try to maintain."""
1632 1632 def splitbig(chunks):
1633 1633 for chunk in chunks:
1634 1634 if len(chunk) > 2**20:
1635 1635 pos = 0
1636 1636 while pos < len(chunk):
1637 1637 end = pos + 2 ** 18
1638 1638 yield chunk[pos:end]
1639 1639 pos = end
1640 1640 else:
1641 1641 yield chunk
1642 1642 self.iter = splitbig(in_iter)
1643 1643 self._queue = collections.deque()
1644 1644 self._chunkoffset = 0
1645 1645
1646 1646 def read(self, l=None):
1647 1647 """Read L bytes of data from the iterator of chunks of data.
1648 1648 Returns less than L bytes if the iterator runs dry.
1649 1649
1650 1650 If size parameter is omitted, read everything"""
1651 1651 if l is None:
1652 1652 return ''.join(self.iter)
1653 1653
1654 1654 left = l
1655 1655 buf = []
1656 1656 queue = self._queue
1657 1657 while left > 0:
1658 1658 # refill the queue
1659 1659 if not queue:
1660 1660 target = 2**18
1661 1661 for chunk in self.iter:
1662 1662 queue.append(chunk)
1663 1663 target -= len(chunk)
1664 1664 if target <= 0:
1665 1665 break
1666 1666 if not queue:
1667 1667 break
1668 1668
1669 1669 # The easy way to do this would be to queue.popleft(), modify the
1670 1670 # chunk (if necessary), then queue.appendleft(). However, for cases
1671 1671 # where we read partial chunk content, this incurs 2 dequeue
1672 1672 # mutations and creates a new str for the remaining chunk in the
1673 1673 # queue. Our code below avoids this overhead.
1674 1674
1675 1675 chunk = queue[0]
1676 1676 chunkl = len(chunk)
1677 1677 offset = self._chunkoffset
1678 1678
1679 1679 # Use full chunk.
1680 1680 if offset == 0 and left >= chunkl:
1681 1681 left -= chunkl
1682 1682 queue.popleft()
1683 1683 buf.append(chunk)
1684 1684 # self._chunkoffset remains at 0.
1685 1685 continue
1686 1686
1687 1687 chunkremaining = chunkl - offset
1688 1688
1689 1689 # Use all of unconsumed part of chunk.
1690 1690 if left >= chunkremaining:
1691 1691 left -= chunkremaining
1692 1692 queue.popleft()
1693 1693 # offset == 0 is enabled by block above, so this won't merely
1694 1694 # copy via ``chunk[0:]``.
1695 1695 buf.append(chunk[offset:])
1696 1696 self._chunkoffset = 0
1697 1697
1698 1698 # Partial chunk needed.
1699 1699 else:
1700 1700 buf.append(chunk[offset:offset + left])
1701 1701 self._chunkoffset += left
1702 1702 left -= chunkremaining
1703 1703
1704 1704 return ''.join(buf)
1705 1705
1706 1706 def filechunkiter(f, size=131072, limit=None):
1707 1707 """Create a generator that produces the data in the file size
1708 1708 (default 131072) bytes at a time, up to optional limit (default is
1709 1709 to read all data). Chunks may be less than size bytes if the
1710 1710 chunk is the last chunk in the file, or the file is a socket or
1711 1711 some other type of file that sometimes reads less data than is
1712 1712 requested."""
1713 1713 assert size >= 0
1714 1714 assert limit is None or limit >= 0
1715 1715 while True:
1716 1716 if limit is None:
1717 1717 nbytes = size
1718 1718 else:
1719 1719 nbytes = min(limit, size)
1720 1720 s = nbytes and f.read(nbytes)
1721 1721 if not s:
1722 1722 break
1723 1723 if limit:
1724 1724 limit -= len(s)
1725 1725 yield s
1726 1726
1727 1727 def makedate(timestamp=None):
1728 1728 '''Return a unix timestamp (or the current time) as a (unixtime,
1729 1729 offset) tuple based off the local timezone.'''
1730 1730 if timestamp is None:
1731 1731 timestamp = time.time()
1732 1732 if timestamp < 0:
1733 1733 hint = _("check your clock")
1734 1734 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1735 1735 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1736 1736 datetime.datetime.fromtimestamp(timestamp))
1737 1737 tz = delta.days * 86400 + delta.seconds
1738 1738 return timestamp, tz
1739 1739
1740 1740 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1741 1741 """represent a (unixtime, offset) tuple as a localized time.
1742 1742 unixtime is seconds since the epoch, and offset is the time zone's
1743 1743 number of seconds away from UTC.
1744 1744
1745 1745 >>> datestr((0, 0))
1746 1746 'Thu Jan 01 00:00:00 1970 +0000'
1747 1747 >>> datestr((42, 0))
1748 1748 'Thu Jan 01 00:00:42 1970 +0000'
1749 1749 >>> datestr((-42, 0))
1750 1750 'Wed Dec 31 23:59:18 1969 +0000'
1751 1751 >>> datestr((0x7fffffff, 0))
1752 1752 'Tue Jan 19 03:14:07 2038 +0000'
1753 1753 >>> datestr((-0x80000000, 0))
1754 1754 'Fri Dec 13 20:45:52 1901 +0000'
1755 1755 """
1756 1756 t, tz = date or makedate()
1757 1757 if "%1" in format or "%2" in format or "%z" in format:
1758 1758 sign = (tz > 0) and "-" or "+"
1759 1759 minutes = abs(tz) // 60
1760 1760 q, r = divmod(minutes, 60)
1761 1761 format = format.replace("%z", "%1%2")
1762 1762 format = format.replace("%1", "%c%02d" % (sign, q))
1763 1763 format = format.replace("%2", "%02d" % r)
1764 1764 d = t - tz
1765 1765 if d > 0x7fffffff:
1766 1766 d = 0x7fffffff
1767 1767 elif d < -0x80000000:
1768 1768 d = -0x80000000
1769 1769 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1770 1770 # because they use the gmtime() system call which is buggy on Windows
1771 1771 # for negative values.
1772 1772 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1773 1773 s = t.strftime(format)
1774 1774 return s
1775 1775
1776 1776 def shortdate(date=None):
1777 1777 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1778 1778 return datestr(date, format='%Y-%m-%d')
1779 1779
1780 1780 def parsetimezone(s):
1781 1781 """find a trailing timezone, if any, in string, and return a
1782 1782 (offset, remainder) pair"""
1783 1783
1784 1784 if s.endswith("GMT") or s.endswith("UTC"):
1785 1785 return 0, s[:-3].rstrip()
1786 1786
1787 1787 # Unix-style timezones [+-]hhmm
1788 1788 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1789 1789 sign = (s[-5] == "+") and 1 or -1
1790 1790 hours = int(s[-4:-2])
1791 1791 minutes = int(s[-2:])
1792 1792 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1793 1793
1794 1794 # ISO8601 trailing Z
1795 1795 if s.endswith("Z") and s[-2:-1].isdigit():
1796 1796 return 0, s[:-1]
1797 1797
1798 1798 # ISO8601-style [+-]hh:mm
1799 1799 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1800 1800 s[-5:-3].isdigit() and s[-2:].isdigit()):
1801 1801 sign = (s[-6] == "+") and 1 or -1
1802 1802 hours = int(s[-5:-3])
1803 1803 minutes = int(s[-2:])
1804 1804 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1805 1805
1806 1806 return None, s
1807 1807
1808 1808 def strdate(string, format, defaults=[]):
1809 1809 """parse a localized time string and return a (unixtime, offset) tuple.
1810 1810 if the string cannot be parsed, ValueError is raised."""
1811 1811 # NOTE: unixtime = localunixtime + offset
1812 1812 offset, date = parsetimezone(string)
1813 1813
1814 1814 # add missing elements from defaults
1815 1815 usenow = False # default to using biased defaults
1816 1816 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1817 1817 found = [True for p in part if ("%"+p) in format]
1818 1818 if not found:
1819 1819 date += "@" + defaults[part][usenow]
1820 1820 format += "@%" + part[0]
1821 1821 else:
1822 1822 # We've found a specific time element, less specific time
1823 1823 # elements are relative to today
1824 1824 usenow = True
1825 1825
1826 1826 timetuple = time.strptime(date, format)
1827 1827 localunixtime = int(calendar.timegm(timetuple))
1828 1828 if offset is None:
1829 1829 # local timezone
1830 1830 unixtime = int(time.mktime(timetuple))
1831 1831 offset = unixtime - localunixtime
1832 1832 else:
1833 1833 unixtime = localunixtime + offset
1834 1834 return unixtime, offset
1835 1835
1836 1836 def parsedate(date, formats=None, bias=None):
1837 1837 """parse a localized date/time and return a (unixtime, offset) tuple.
1838 1838
1839 1839 The date may be a "unixtime offset" string or in one of the specified
1840 1840 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1841 1841
1842 1842 >>> parsedate(' today ') == parsedate(\
1843 1843 datetime.date.today().strftime('%b %d'))
1844 1844 True
1845 1845 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1846 1846 datetime.timedelta(days=1)\
1847 1847 ).strftime('%b %d'))
1848 1848 True
1849 1849 >>> now, tz = makedate()
1850 1850 >>> strnow, strtz = parsedate('now')
1851 1851 >>> (strnow - now) < 1
1852 1852 True
1853 1853 >>> tz == strtz
1854 1854 True
1855 1855 """
1856 1856 if bias is None:
1857 1857 bias = {}
1858 1858 if not date:
1859 1859 return 0, 0
1860 1860 if isinstance(date, tuple) and len(date) == 2:
1861 1861 return date
1862 1862 if not formats:
1863 1863 formats = defaultdateformats
1864 1864 date = date.strip()
1865 1865
1866 1866 if date == 'now' or date == _('now'):
1867 1867 return makedate()
1868 1868 if date == 'today' or date == _('today'):
1869 1869 date = datetime.date.today().strftime('%b %d')
1870 1870 elif date == 'yesterday' or date == _('yesterday'):
1871 1871 date = (datetime.date.today() -
1872 1872 datetime.timedelta(days=1)).strftime('%b %d')
1873 1873
1874 1874 try:
1875 1875 when, offset = map(int, date.split(' '))
1876 1876 except ValueError:
1877 1877 # fill out defaults
1878 1878 now = makedate()
1879 1879 defaults = {}
1880 1880 for part in ("d", "mb", "yY", "HI", "M", "S"):
1881 1881 # this piece is for rounding the specific end of unknowns
1882 1882 b = bias.get(part)
1883 1883 if b is None:
1884 1884 if part[0] in "HMS":
1885 1885 b = "00"
1886 1886 else:
1887 1887 b = "0"
1888 1888
1889 1889 # this piece is for matching the generic end to today's date
1890 1890 n = datestr(now, "%" + part[0])
1891 1891
1892 1892 defaults[part] = (b, n)
1893 1893
1894 1894 for format in formats:
1895 1895 try:
1896 1896 when, offset = strdate(date, format, defaults)
1897 1897 except (ValueError, OverflowError):
1898 1898 pass
1899 1899 else:
1900 1900 break
1901 1901 else:
1902 1902 raise Abort(_('invalid date: %r') % date)
1903 1903 # validate explicit (probably user-specified) date and
1904 1904 # time zone offset. values must fit in signed 32 bits for
1905 1905 # current 32-bit linux runtimes. timezones go from UTC-12
1906 1906 # to UTC+14
1907 1907 if when < -0x80000000 or when > 0x7fffffff:
1908 1908 raise Abort(_('date exceeds 32 bits: %d') % when)
1909 1909 if offset < -50400 or offset > 43200:
1910 1910 raise Abort(_('impossible time zone offset: %d') % offset)
1911 1911 return when, offset
1912 1912
1913 1913 def matchdate(date):
1914 1914 """Return a function that matches a given date match specifier
1915 1915
1916 1916 Formats include:
1917 1917
1918 1918 '{date}' match a given date to the accuracy provided
1919 1919
1920 1920 '<{date}' on or before a given date
1921 1921
1922 1922 '>{date}' on or after a given date
1923 1923
1924 1924 >>> p1 = parsedate("10:29:59")
1925 1925 >>> p2 = parsedate("10:30:00")
1926 1926 >>> p3 = parsedate("10:30:59")
1927 1927 >>> p4 = parsedate("10:31:00")
1928 1928 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1929 1929 >>> f = matchdate("10:30")
1930 1930 >>> f(p1[0])
1931 1931 False
1932 1932 >>> f(p2[0])
1933 1933 True
1934 1934 >>> f(p3[0])
1935 1935 True
1936 1936 >>> f(p4[0])
1937 1937 False
1938 1938 >>> f(p5[0])
1939 1939 False
1940 1940 """
1941 1941
1942 1942 def lower(date):
1943 1943 d = {'mb': "1", 'd': "1"}
1944 1944 return parsedate(date, extendeddateformats, d)[0]
1945 1945
1946 1946 def upper(date):
1947 1947 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1948 1948 for days in ("31", "30", "29"):
1949 1949 try:
1950 1950 d["d"] = days
1951 1951 return parsedate(date, extendeddateformats, d)[0]
1952 1952 except Abort:
1953 1953 pass
1954 1954 d["d"] = "28"
1955 1955 return parsedate(date, extendeddateformats, d)[0]
1956 1956
1957 1957 date = date.strip()
1958 1958
1959 1959 if not date:
1960 1960 raise Abort(_("dates cannot consist entirely of whitespace"))
1961 1961 elif date[0] == "<":
1962 1962 if not date[1:]:
1963 1963 raise Abort(_("invalid day spec, use '<DATE'"))
1964 1964 when = upper(date[1:])
1965 1965 return lambda x: x <= when
1966 1966 elif date[0] == ">":
1967 1967 if not date[1:]:
1968 1968 raise Abort(_("invalid day spec, use '>DATE'"))
1969 1969 when = lower(date[1:])
1970 1970 return lambda x: x >= when
1971 1971 elif date[0] == "-":
1972 1972 try:
1973 1973 days = int(date[1:])
1974 1974 except ValueError:
1975 1975 raise Abort(_("invalid day spec: %s") % date[1:])
1976 1976 if days < 0:
1977 1977 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1978 1978 % date[1:])
1979 1979 when = makedate()[0] - days * 3600 * 24
1980 1980 return lambda x: x >= when
1981 1981 elif " to " in date:
1982 1982 a, b = date.split(" to ")
1983 1983 start, stop = lower(a), upper(b)
1984 1984 return lambda x: x >= start and x <= stop
1985 1985 else:
1986 1986 start, stop = lower(date), upper(date)
1987 1987 return lambda x: x >= start and x <= stop
1988 1988
1989 1989 def stringmatcher(pattern):
1990 1990 """
1991 1991 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1992 1992 returns the matcher name, pattern, and matcher function.
1993 1993 missing or unknown prefixes are treated as literal matches.
1994 1994
1995 1995 helper for tests:
1996 1996 >>> def test(pattern, *tests):
1997 1997 ... kind, pattern, matcher = stringmatcher(pattern)
1998 1998 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1999 1999
2000 2000 exact matching (no prefix):
2001 2001 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2002 2002 ('literal', 'abcdefg', [False, False, True])
2003 2003
2004 2004 regex matching ('re:' prefix)
2005 2005 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2006 2006 ('re', 'a.+b', [False, False, True])
2007 2007
2008 2008 force exact matches ('literal:' prefix)
2009 2009 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2010 2010 ('literal', 're:foobar', [False, True])
2011 2011
2012 2012 unknown prefixes are ignored and treated as literals
2013 2013 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2014 2014 ('literal', 'foo:bar', [False, False, True])
2015 2015 """
2016 2016 if pattern.startswith('re:'):
2017 2017 pattern = pattern[3:]
2018 2018 try:
2019 2019 regex = remod.compile(pattern)
2020 2020 except remod.error as e:
2021 2021 raise error.ParseError(_('invalid regular expression: %s')
2022 2022 % e)
2023 2023 return 're', pattern, regex.search
2024 2024 elif pattern.startswith('literal:'):
2025 2025 pattern = pattern[8:]
2026 2026 return 'literal', pattern, pattern.__eq__
2027 2027
2028 2028 def shortuser(user):
2029 2029 """Return a short representation of a user name or email address."""
2030 2030 f = user.find('@')
2031 2031 if f >= 0:
2032 2032 user = user[:f]
2033 2033 f = user.find('<')
2034 2034 if f >= 0:
2035 2035 user = user[f + 1:]
2036 2036 f = user.find(' ')
2037 2037 if f >= 0:
2038 2038 user = user[:f]
2039 2039 f = user.find('.')
2040 2040 if f >= 0:
2041 2041 user = user[:f]
2042 2042 return user
2043 2043
2044 2044 def emailuser(user):
2045 2045 """Return the user portion of an email address."""
2046 2046 f = user.find('@')
2047 2047 if f >= 0:
2048 2048 user = user[:f]
2049 2049 f = user.find('<')
2050 2050 if f >= 0:
2051 2051 user = user[f + 1:]
2052 2052 return user
2053 2053
2054 2054 def email(author):
2055 2055 '''get email of author.'''
2056 2056 r = author.find('>')
2057 2057 if r == -1:
2058 2058 r = None
2059 2059 return author[author.find('<') + 1:r]
2060 2060
2061 2061 def ellipsis(text, maxlength=400):
2062 2062 """Trim string to at most maxlength (default: 400) columns in display."""
2063 2063 return encoding.trim(text, maxlength, ellipsis='...')
2064 2064
2065 2065 def unitcountfn(*unittable):
2066 2066 '''return a function that renders a readable count of some quantity'''
2067 2067
2068 2068 def go(count):
2069 2069 for multiplier, divisor, format in unittable:
2070 2070 if count >= divisor * multiplier:
2071 2071 return format % (count / float(divisor))
2072 2072 return unittable[-1][2] % count
2073 2073
2074 2074 return go
2075 2075
2076 2076 bytecount = unitcountfn(
2077 2077 (100, 1 << 30, _('%.0f GB')),
2078 2078 (10, 1 << 30, _('%.1f GB')),
2079 2079 (1, 1 << 30, _('%.2f GB')),
2080 2080 (100, 1 << 20, _('%.0f MB')),
2081 2081 (10, 1 << 20, _('%.1f MB')),
2082 2082 (1, 1 << 20, _('%.2f MB')),
2083 2083 (100, 1 << 10, _('%.0f KB')),
2084 2084 (10, 1 << 10, _('%.1f KB')),
2085 2085 (1, 1 << 10, _('%.2f KB')),
2086 2086 (1, 1, _('%.0f bytes')),
2087 2087 )
2088 2088
2089 2089 def uirepr(s):
2090 2090 # Avoid double backslash in Windows path repr()
2091 2091 return repr(s).replace('\\\\', '\\')
2092 2092
2093 2093 # delay import of textwrap
2094 2094 def MBTextWrapper(**kwargs):
2095 2095 class tw(textwrap.TextWrapper):
2096 2096 """
2097 2097 Extend TextWrapper for width-awareness.
2098 2098
2099 2099 Neither number of 'bytes' in any encoding nor 'characters' is
2100 2100 appropriate to calculate terminal columns for specified string.
2101 2101
2102 2102 Original TextWrapper implementation uses built-in 'len()' directly,
2103 2103 so overriding is needed to use width information of each characters.
2104 2104
2105 2105 In addition, characters classified into 'ambiguous' width are
2106 2106 treated as wide in East Asian area, but as narrow in other.
2107 2107
2108 2108 This requires use decision to determine width of such characters.
2109 2109 """
2110 2110 def _cutdown(self, ucstr, space_left):
2111 2111 l = 0
2112 2112 colwidth = encoding.ucolwidth
2113 2113 for i in xrange(len(ucstr)):
2114 2114 l += colwidth(ucstr[i])
2115 2115 if space_left < l:
2116 2116 return (ucstr[:i], ucstr[i:])
2117 2117 return ucstr, ''
2118 2118
2119 2119 # overriding of base class
2120 2120 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2121 2121 space_left = max(width - cur_len, 1)
2122 2122
2123 2123 if self.break_long_words:
2124 2124 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2125 2125 cur_line.append(cut)
2126 2126 reversed_chunks[-1] = res
2127 2127 elif not cur_line:
2128 2128 cur_line.append(reversed_chunks.pop())
2129 2129
2130 2130 # this overriding code is imported from TextWrapper of Python 2.6
2131 2131 # to calculate columns of string by 'encoding.ucolwidth()'
2132 2132 def _wrap_chunks(self, chunks):
2133 2133 colwidth = encoding.ucolwidth
2134 2134
2135 2135 lines = []
2136 2136 if self.width <= 0:
2137 2137 raise ValueError("invalid width %r (must be > 0)" % self.width)
2138 2138
2139 2139 # Arrange in reverse order so items can be efficiently popped
2140 2140 # from a stack of chucks.
2141 2141 chunks.reverse()
2142 2142
2143 2143 while chunks:
2144 2144
2145 2145 # Start the list of chunks that will make up the current line.
2146 2146 # cur_len is just the length of all the chunks in cur_line.
2147 2147 cur_line = []
2148 2148 cur_len = 0
2149 2149
2150 2150 # Figure out which static string will prefix this line.
2151 2151 if lines:
2152 2152 indent = self.subsequent_indent
2153 2153 else:
2154 2154 indent = self.initial_indent
2155 2155
2156 2156 # Maximum width for this line.
2157 2157 width = self.width - len(indent)
2158 2158
2159 2159 # First chunk on line is whitespace -- drop it, unless this
2160 2160 # is the very beginning of the text (i.e. no lines started yet).
2161 2161 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2162 2162 del chunks[-1]
2163 2163
2164 2164 while chunks:
2165 2165 l = colwidth(chunks[-1])
2166 2166
2167 2167 # Can at least squeeze this chunk onto the current line.
2168 2168 if cur_len + l <= width:
2169 2169 cur_line.append(chunks.pop())
2170 2170 cur_len += l
2171 2171
2172 2172 # Nope, this line is full.
2173 2173 else:
2174 2174 break
2175 2175
2176 2176 # The current line is full, and the next chunk is too big to
2177 2177 # fit on *any* line (not just this one).
2178 2178 if chunks and colwidth(chunks[-1]) > width:
2179 2179 self._handle_long_word(chunks, cur_line, cur_len, width)
2180 2180
2181 2181 # If the last chunk on this line is all whitespace, drop it.
2182 2182 if (self.drop_whitespace and
2183 2183 cur_line and cur_line[-1].strip() == ''):
2184 2184 del cur_line[-1]
2185 2185
2186 2186 # Convert current line back to a string and store it in list
2187 2187 # of all lines (return value).
2188 2188 if cur_line:
2189 2189 lines.append(indent + ''.join(cur_line))
2190 2190
2191 2191 return lines
2192 2192
2193 2193 global MBTextWrapper
2194 2194 MBTextWrapper = tw
2195 2195 return tw(**kwargs)
2196 2196
2197 2197 def wrap(line, width, initindent='', hangindent=''):
2198 2198 maxindent = max(len(hangindent), len(initindent))
2199 2199 if width <= maxindent:
2200 2200 # adjust for weird terminal size
2201 2201 width = max(78, maxindent + 1)
2202 2202 line = line.decode(encoding.encoding, encoding.encodingmode)
2203 2203 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2204 2204 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2205 2205 wrapper = MBTextWrapper(width=width,
2206 2206 initial_indent=initindent,
2207 2207 subsequent_indent=hangindent)
2208 2208 return wrapper.fill(line).encode(encoding.encoding)
2209 2209
2210 2210 if (pyplatform.python_implementation() == 'CPython' and
2211 2211 sys.version_info < (3, 0)):
2212 2212 # There is an issue in CPython that some IO methods do not handle EINTR
2213 2213 # correctly. The following table shows what CPython version (and functions)
2214 2214 # are affected (buggy: has the EINTR bug, okay: otherwise):
2215 2215 #
2216 2216 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2217 2217 # --------------------------------------------------
2218 2218 # fp.__iter__ | buggy | buggy | okay
2219 2219 # fp.read* | buggy | okay [1] | okay
2220 2220 #
2221 2221 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2222 2222 #
2223 2223 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2224 2224 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2225 2225 #
2226 2226 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2227 2227 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2228 2228 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2229 2229 # fp.__iter__ but not other fp.read* methods.
2230 2230 #
2231 2231 # On modern systems like Linux, the "read" syscall cannot be interrupted
2232 2232 # when reading "fast" files like on-disk files. So the EINTR issue only
2233 2233 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2234 2234 # files approximately as "fast" files and use the fast (unsafe) code path,
2235 2235 # to minimize the performance impact.
2236 2236 if sys.version_info >= (2, 7, 4):
2237 2237 # fp.readline deals with EINTR correctly, use it as a workaround.
2238 2238 def _safeiterfile(fp):
2239 2239 return iter(fp.readline, '')
2240 2240 else:
2241 2241 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2242 2242 # note: this may block longer than necessary because of bufsize.
2243 2243 def _safeiterfile(fp, bufsize=4096):
2244 2244 fd = fp.fileno()
2245 2245 line = ''
2246 2246 while True:
2247 2247 try:
2248 2248 buf = os.read(fd, bufsize)
2249 2249 except OSError as ex:
2250 2250 # os.read only raises EINTR before any data is read
2251 2251 if ex.errno == errno.EINTR:
2252 2252 continue
2253 2253 else:
2254 2254 raise
2255 2255 line += buf
2256 2256 if '\n' in buf:
2257 2257 splitted = line.splitlines(True)
2258 2258 line = ''
2259 2259 for l in splitted:
2260 2260 if l[-1] == '\n':
2261 2261 yield l
2262 2262 else:
2263 2263 line = l
2264 2264 if not buf:
2265 2265 break
2266 2266 if line:
2267 2267 yield line
2268 2268
2269 2269 def iterfile(fp):
2270 2270 fastpath = True
2271 2271 if type(fp) is file:
2272 2272 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2273 2273 if fastpath:
2274 2274 return fp
2275 2275 else:
2276 2276 return _safeiterfile(fp)
2277 2277 else:
2278 2278 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2279 2279 def iterfile(fp):
2280 2280 return fp
2281 2281
2282 2282 def iterlines(iterator):
2283 2283 for chunk in iterator:
2284 2284 for line in chunk.splitlines():
2285 2285 yield line
2286 2286
2287 2287 def expandpath(path):
2288 2288 return os.path.expanduser(os.path.expandvars(path))
2289 2289
2290 2290 def hgcmd():
2291 2291 """Return the command used to execute current hg
2292 2292
2293 2293 This is different from hgexecutable() because on Windows we want
2294 2294 to avoid things opening new shell windows like batch files, so we
2295 2295 get either the python call or current executable.
2296 2296 """
2297 2297 if mainfrozen():
2298 2298 if getattr(sys, 'frozen', None) == 'macosx_app':
2299 2299 # Env variable set by py2app
2300 return [os.environ['EXECUTABLEPATH']]
2300 return [encoding.environ['EXECUTABLEPATH']]
2301 2301 else:
2302 2302 return [sys.executable]
2303 2303 return gethgcmd()
2304 2304
2305 2305 def rundetached(args, condfn):
2306 2306 """Execute the argument list in a detached process.
2307 2307
2308 2308 condfn is a callable which is called repeatedly and should return
2309 2309 True once the child process is known to have started successfully.
2310 2310 At this point, the child process PID is returned. If the child
2311 2311 process fails to start or finishes before condfn() evaluates to
2312 2312 True, return -1.
2313 2313 """
2314 2314 # Windows case is easier because the child process is either
2315 2315 # successfully starting and validating the condition or exiting
2316 2316 # on failure. We just poll on its PID. On Unix, if the child
2317 2317 # process fails to start, it will be left in a zombie state until
2318 2318 # the parent wait on it, which we cannot do since we expect a long
2319 2319 # running process on success. Instead we listen for SIGCHLD telling
2320 2320 # us our child process terminated.
2321 2321 terminated = set()
2322 2322 def handler(signum, frame):
2323 2323 terminated.add(os.wait())
2324 2324 prevhandler = None
2325 2325 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2326 2326 if SIGCHLD is not None:
2327 2327 prevhandler = signal.signal(SIGCHLD, handler)
2328 2328 try:
2329 2329 pid = spawndetached(args)
2330 2330 while not condfn():
2331 2331 if ((pid in terminated or not testpid(pid))
2332 2332 and not condfn()):
2333 2333 return -1
2334 2334 time.sleep(0.1)
2335 2335 return pid
2336 2336 finally:
2337 2337 if prevhandler is not None:
2338 2338 signal.signal(signal.SIGCHLD, prevhandler)
2339 2339
2340 2340 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2341 2341 """Return the result of interpolating items in the mapping into string s.
2342 2342
2343 2343 prefix is a single character string, or a two character string with
2344 2344 a backslash as the first character if the prefix needs to be escaped in
2345 2345 a regular expression.
2346 2346
2347 2347 fn is an optional function that will be applied to the replacement text
2348 2348 just before replacement.
2349 2349
2350 2350 escape_prefix is an optional flag that allows using doubled prefix for
2351 2351 its escaping.
2352 2352 """
2353 2353 fn = fn or (lambda s: s)
2354 2354 patterns = '|'.join(mapping.keys())
2355 2355 if escape_prefix:
2356 2356 patterns += '|' + prefix
2357 2357 if len(prefix) > 1:
2358 2358 prefix_char = prefix[1:]
2359 2359 else:
2360 2360 prefix_char = prefix
2361 2361 mapping[prefix_char] = prefix_char
2362 2362 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2363 2363 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2364 2364
2365 2365 def getport(port):
2366 2366 """Return the port for a given network service.
2367 2367
2368 2368 If port is an integer, it's returned as is. If it's a string, it's
2369 2369 looked up using socket.getservbyname(). If there's no matching
2370 2370 service, error.Abort is raised.
2371 2371 """
2372 2372 try:
2373 2373 return int(port)
2374 2374 except ValueError:
2375 2375 pass
2376 2376
2377 2377 try:
2378 2378 return socket.getservbyname(port)
2379 2379 except socket.error:
2380 2380 raise Abort(_("no port number associated with service '%s'") % port)
2381 2381
2382 2382 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2383 2383 '0': False, 'no': False, 'false': False, 'off': False,
2384 2384 'never': False}
2385 2385
2386 2386 def parsebool(s):
2387 2387 """Parse s into a boolean.
2388 2388
2389 2389 If s is not a valid boolean, returns None.
2390 2390 """
2391 2391 return _booleans.get(s.lower(), None)
2392 2392
2393 2393 _hextochr = dict((a + b, chr(int(a + b, 16)))
2394 2394 for a in string.hexdigits for b in string.hexdigits)
2395 2395
2396 2396 class url(object):
2397 2397 r"""Reliable URL parser.
2398 2398
2399 2399 This parses URLs and provides attributes for the following
2400 2400 components:
2401 2401
2402 2402 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2403 2403
2404 2404 Missing components are set to None. The only exception is
2405 2405 fragment, which is set to '' if present but empty.
2406 2406
2407 2407 If parsefragment is False, fragment is included in query. If
2408 2408 parsequery is False, query is included in path. If both are
2409 2409 False, both fragment and query are included in path.
2410 2410
2411 2411 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2412 2412
2413 2413 Note that for backward compatibility reasons, bundle URLs do not
2414 2414 take host names. That means 'bundle://../' has a path of '../'.
2415 2415
2416 2416 Examples:
2417 2417
2418 2418 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2419 2419 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2420 2420 >>> url('ssh://[::1]:2200//home/joe/repo')
2421 2421 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2422 2422 >>> url('file:///home/joe/repo')
2423 2423 <url scheme: 'file', path: '/home/joe/repo'>
2424 2424 >>> url('file:///c:/temp/foo/')
2425 2425 <url scheme: 'file', path: 'c:/temp/foo/'>
2426 2426 >>> url('bundle:foo')
2427 2427 <url scheme: 'bundle', path: 'foo'>
2428 2428 >>> url('bundle://../foo')
2429 2429 <url scheme: 'bundle', path: '../foo'>
2430 2430 >>> url(r'c:\foo\bar')
2431 2431 <url path: 'c:\\foo\\bar'>
2432 2432 >>> url(r'\\blah\blah\blah')
2433 2433 <url path: '\\\\blah\\blah\\blah'>
2434 2434 >>> url(r'\\blah\blah\blah#baz')
2435 2435 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2436 2436 >>> url(r'file:///C:\users\me')
2437 2437 <url scheme: 'file', path: 'C:\\users\\me'>
2438 2438
2439 2439 Authentication credentials:
2440 2440
2441 2441 >>> url('ssh://joe:xyz@x/repo')
2442 2442 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2443 2443 >>> url('ssh://joe@x/repo')
2444 2444 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2445 2445
2446 2446 Query strings and fragments:
2447 2447
2448 2448 >>> url('http://host/a?b#c')
2449 2449 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2450 2450 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2451 2451 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2452 2452
2453 2453 Empty path:
2454 2454
2455 2455 >>> url('')
2456 2456 <url path: ''>
2457 2457 >>> url('#a')
2458 2458 <url path: '', fragment: 'a'>
2459 2459 >>> url('http://host/')
2460 2460 <url scheme: 'http', host: 'host', path: ''>
2461 2461 >>> url('http://host/#a')
2462 2462 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2463 2463
2464 2464 Only scheme:
2465 2465
2466 2466 >>> url('http:')
2467 2467 <url scheme: 'http'>
2468 2468 """
2469 2469
2470 2470 _safechars = "!~*'()+"
2471 2471 _safepchars = "/!~*'()+:\\"
2472 2472 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2473 2473
2474 2474 def __init__(self, path, parsequery=True, parsefragment=True):
2475 2475 # We slowly chomp away at path until we have only the path left
2476 2476 self.scheme = self.user = self.passwd = self.host = None
2477 2477 self.port = self.path = self.query = self.fragment = None
2478 2478 self._localpath = True
2479 2479 self._hostport = ''
2480 2480 self._origpath = path
2481 2481
2482 2482 if parsefragment and '#' in path:
2483 2483 path, self.fragment = path.split('#', 1)
2484 2484
2485 2485 # special case for Windows drive letters and UNC paths
2486 2486 if hasdriveletter(path) or path.startswith('\\\\'):
2487 2487 self.path = path
2488 2488 return
2489 2489
2490 2490 # For compatibility reasons, we can't handle bundle paths as
2491 2491 # normal URLS
2492 2492 if path.startswith('bundle:'):
2493 2493 self.scheme = 'bundle'
2494 2494 path = path[7:]
2495 2495 if path.startswith('//'):
2496 2496 path = path[2:]
2497 2497 self.path = path
2498 2498 return
2499 2499
2500 2500 if self._matchscheme(path):
2501 2501 parts = path.split(':', 1)
2502 2502 if parts[0]:
2503 2503 self.scheme, path = parts
2504 2504 self._localpath = False
2505 2505
2506 2506 if not path:
2507 2507 path = None
2508 2508 if self._localpath:
2509 2509 self.path = ''
2510 2510 return
2511 2511 else:
2512 2512 if self._localpath:
2513 2513 self.path = path
2514 2514 return
2515 2515
2516 2516 if parsequery and '?' in path:
2517 2517 path, self.query = path.split('?', 1)
2518 2518 if not path:
2519 2519 path = None
2520 2520 if not self.query:
2521 2521 self.query = None
2522 2522
2523 2523 # // is required to specify a host/authority
2524 2524 if path and path.startswith('//'):
2525 2525 parts = path[2:].split('/', 1)
2526 2526 if len(parts) > 1:
2527 2527 self.host, path = parts
2528 2528 else:
2529 2529 self.host = parts[0]
2530 2530 path = None
2531 2531 if not self.host:
2532 2532 self.host = None
2533 2533 # path of file:///d is /d
2534 2534 # path of file:///d:/ is d:/, not /d:/
2535 2535 if path and not hasdriveletter(path):
2536 2536 path = '/' + path
2537 2537
2538 2538 if self.host and '@' in self.host:
2539 2539 self.user, self.host = self.host.rsplit('@', 1)
2540 2540 if ':' in self.user:
2541 2541 self.user, self.passwd = self.user.split(':', 1)
2542 2542 if not self.host:
2543 2543 self.host = None
2544 2544
2545 2545 # Don't split on colons in IPv6 addresses without ports
2546 2546 if (self.host and ':' in self.host and
2547 2547 not (self.host.startswith('[') and self.host.endswith(']'))):
2548 2548 self._hostport = self.host
2549 2549 self.host, self.port = self.host.rsplit(':', 1)
2550 2550 if not self.host:
2551 2551 self.host = None
2552 2552
2553 2553 if (self.host and self.scheme == 'file' and
2554 2554 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2555 2555 raise Abort(_('file:// URLs can only refer to localhost'))
2556 2556
2557 2557 self.path = path
2558 2558
2559 2559 # leave the query string escaped
2560 2560 for a in ('user', 'passwd', 'host', 'port',
2561 2561 'path', 'fragment'):
2562 2562 v = getattr(self, a)
2563 2563 if v is not None:
2564 2564 setattr(self, a, pycompat.urlunquote(v))
2565 2565
2566 2566 def __repr__(self):
2567 2567 attrs = []
2568 2568 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2569 2569 'query', 'fragment'):
2570 2570 v = getattr(self, a)
2571 2571 if v is not None:
2572 2572 attrs.append('%s: %r' % (a, v))
2573 2573 return '<url %s>' % ', '.join(attrs)
2574 2574
2575 2575 def __str__(self):
2576 2576 r"""Join the URL's components back into a URL string.
2577 2577
2578 2578 Examples:
2579 2579
2580 2580 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2581 2581 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2582 2582 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2583 2583 'http://user:pw@host:80/?foo=bar&baz=42'
2584 2584 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2585 2585 'http://user:pw@host:80/?foo=bar%3dbaz'
2586 2586 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2587 2587 'ssh://user:pw@[::1]:2200//home/joe#'
2588 2588 >>> str(url('http://localhost:80//'))
2589 2589 'http://localhost:80//'
2590 2590 >>> str(url('http://localhost:80/'))
2591 2591 'http://localhost:80/'
2592 2592 >>> str(url('http://localhost:80'))
2593 2593 'http://localhost:80/'
2594 2594 >>> str(url('bundle:foo'))
2595 2595 'bundle:foo'
2596 2596 >>> str(url('bundle://../foo'))
2597 2597 'bundle:../foo'
2598 2598 >>> str(url('path'))
2599 2599 'path'
2600 2600 >>> str(url('file:///tmp/foo/bar'))
2601 2601 'file:///tmp/foo/bar'
2602 2602 >>> str(url('file:///c:/tmp/foo/bar'))
2603 2603 'file:///c:/tmp/foo/bar'
2604 2604 >>> print url(r'bundle:foo\bar')
2605 2605 bundle:foo\bar
2606 2606 >>> print url(r'file:///D:\data\hg')
2607 2607 file:///D:\data\hg
2608 2608 """
2609 2609 if self._localpath:
2610 2610 s = self.path
2611 2611 if self.scheme == 'bundle':
2612 2612 s = 'bundle:' + s
2613 2613 if self.fragment:
2614 2614 s += '#' + self.fragment
2615 2615 return s
2616 2616
2617 2617 s = self.scheme + ':'
2618 2618 if self.user or self.passwd or self.host:
2619 2619 s += '//'
2620 2620 elif self.scheme and (not self.path or self.path.startswith('/')
2621 2621 or hasdriveletter(self.path)):
2622 2622 s += '//'
2623 2623 if hasdriveletter(self.path):
2624 2624 s += '/'
2625 2625 if self.user:
2626 2626 s += urlreq.quote(self.user, safe=self._safechars)
2627 2627 if self.passwd:
2628 2628 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2629 2629 if self.user or self.passwd:
2630 2630 s += '@'
2631 2631 if self.host:
2632 2632 if not (self.host.startswith('[') and self.host.endswith(']')):
2633 2633 s += urlreq.quote(self.host)
2634 2634 else:
2635 2635 s += self.host
2636 2636 if self.port:
2637 2637 s += ':' + urlreq.quote(self.port)
2638 2638 if self.host:
2639 2639 s += '/'
2640 2640 if self.path:
2641 2641 # TODO: similar to the query string, we should not unescape the
2642 2642 # path when we store it, the path might contain '%2f' = '/',
2643 2643 # which we should *not* escape.
2644 2644 s += urlreq.quote(self.path, safe=self._safepchars)
2645 2645 if self.query:
2646 2646 # we store the query in escaped form.
2647 2647 s += '?' + self.query
2648 2648 if self.fragment is not None:
2649 2649 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2650 2650 return s
2651 2651
2652 2652 def authinfo(self):
2653 2653 user, passwd = self.user, self.passwd
2654 2654 try:
2655 2655 self.user, self.passwd = None, None
2656 2656 s = str(self)
2657 2657 finally:
2658 2658 self.user, self.passwd = user, passwd
2659 2659 if not self.user:
2660 2660 return (s, None)
2661 2661 # authinfo[1] is passed to urllib2 password manager, and its
2662 2662 # URIs must not contain credentials. The host is passed in the
2663 2663 # URIs list because Python < 2.4.3 uses only that to search for
2664 2664 # a password.
2665 2665 return (s, (None, (s, self.host),
2666 2666 self.user, self.passwd or ''))
2667 2667
2668 2668 def isabs(self):
2669 2669 if self.scheme and self.scheme != 'file':
2670 2670 return True # remote URL
2671 2671 if hasdriveletter(self.path):
2672 2672 return True # absolute for our purposes - can't be joined()
2673 2673 if self.path.startswith(r'\\'):
2674 2674 return True # Windows UNC path
2675 2675 if self.path.startswith('/'):
2676 2676 return True # POSIX-style
2677 2677 return False
2678 2678
2679 2679 def localpath(self):
2680 2680 if self.scheme == 'file' or self.scheme == 'bundle':
2681 2681 path = self.path or '/'
2682 2682 # For Windows, we need to promote hosts containing drive
2683 2683 # letters to paths with drive letters.
2684 2684 if hasdriveletter(self._hostport):
2685 2685 path = self._hostport + '/' + self.path
2686 2686 elif (self.host is not None and self.path
2687 2687 and not hasdriveletter(path)):
2688 2688 path = '/' + path
2689 2689 return path
2690 2690 return self._origpath
2691 2691
2692 2692 def islocal(self):
2693 2693 '''whether localpath will return something that posixfile can open'''
2694 2694 return (not self.scheme or self.scheme == 'file'
2695 2695 or self.scheme == 'bundle')
2696 2696
2697 2697 def hasscheme(path):
2698 2698 return bool(url(path).scheme)
2699 2699
2700 2700 def hasdriveletter(path):
2701 2701 return path and path[1:2] == ':' and path[0:1].isalpha()
2702 2702
2703 2703 def urllocalpath(path):
2704 2704 return url(path, parsequery=False, parsefragment=False).localpath()
2705 2705
2706 2706 def hidepassword(u):
2707 2707 '''hide user credential in a url string'''
2708 2708 u = url(u)
2709 2709 if u.passwd:
2710 2710 u.passwd = '***'
2711 2711 return str(u)
2712 2712
2713 2713 def removeauth(u):
2714 2714 '''remove all authentication information from a url string'''
2715 2715 u = url(u)
2716 2716 u.user = u.passwd = None
2717 2717 return str(u)
2718 2718
2719 2719 def isatty(fp):
2720 2720 try:
2721 2721 return fp.isatty()
2722 2722 except AttributeError:
2723 2723 return False
2724 2724
2725 2725 timecount = unitcountfn(
2726 2726 (1, 1e3, _('%.0f s')),
2727 2727 (100, 1, _('%.1f s')),
2728 2728 (10, 1, _('%.2f s')),
2729 2729 (1, 1, _('%.3f s')),
2730 2730 (100, 0.001, _('%.1f ms')),
2731 2731 (10, 0.001, _('%.2f ms')),
2732 2732 (1, 0.001, _('%.3f ms')),
2733 2733 (100, 0.000001, _('%.1f us')),
2734 2734 (10, 0.000001, _('%.2f us')),
2735 2735 (1, 0.000001, _('%.3f us')),
2736 2736 (100, 0.000000001, _('%.1f ns')),
2737 2737 (10, 0.000000001, _('%.2f ns')),
2738 2738 (1, 0.000000001, _('%.3f ns')),
2739 2739 )
2740 2740
2741 2741 _timenesting = [0]
2742 2742
2743 2743 def timed(func):
2744 2744 '''Report the execution time of a function call to stderr.
2745 2745
2746 2746 During development, use as a decorator when you need to measure
2747 2747 the cost of a function, e.g. as follows:
2748 2748
2749 2749 @util.timed
2750 2750 def foo(a, b, c):
2751 2751 pass
2752 2752 '''
2753 2753
2754 2754 def wrapper(*args, **kwargs):
2755 2755 start = time.time()
2756 2756 indent = 2
2757 2757 _timenesting[0] += indent
2758 2758 try:
2759 2759 return func(*args, **kwargs)
2760 2760 finally:
2761 2761 elapsed = time.time() - start
2762 2762 _timenesting[0] -= indent
2763 2763 stderr.write('%s%s: %s\n' %
2764 2764 (' ' * _timenesting[0], func.__name__,
2765 2765 timecount(elapsed)))
2766 2766 return wrapper
2767 2767
2768 2768 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2769 2769 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2770 2770
2771 2771 def sizetoint(s):
2772 2772 '''Convert a space specifier to a byte count.
2773 2773
2774 2774 >>> sizetoint('30')
2775 2775 30
2776 2776 >>> sizetoint('2.2kb')
2777 2777 2252
2778 2778 >>> sizetoint('6M')
2779 2779 6291456
2780 2780 '''
2781 2781 t = s.strip().lower()
2782 2782 try:
2783 2783 for k, u in _sizeunits:
2784 2784 if t.endswith(k):
2785 2785 return int(float(t[:-len(k)]) * u)
2786 2786 return int(t)
2787 2787 except ValueError:
2788 2788 raise error.ParseError(_("couldn't parse size: %s") % s)
2789 2789
2790 2790 class hooks(object):
2791 2791 '''A collection of hook functions that can be used to extend a
2792 2792 function's behavior. Hooks are called in lexicographic order,
2793 2793 based on the names of their sources.'''
2794 2794
2795 2795 def __init__(self):
2796 2796 self._hooks = []
2797 2797
2798 2798 def add(self, source, hook):
2799 2799 self._hooks.append((source, hook))
2800 2800
2801 2801 def __call__(self, *args):
2802 2802 self._hooks.sort(key=lambda x: x[0])
2803 2803 results = []
2804 2804 for source, hook in self._hooks:
2805 2805 results.append(hook(*args))
2806 2806 return results
2807 2807
2808 2808 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2809 2809 '''Yields lines for a nicely formatted stacktrace.
2810 2810 Skips the 'skip' last entries.
2811 2811 Each file+linenumber is formatted according to fileline.
2812 2812 Each line is formatted according to line.
2813 2813 If line is None, it yields:
2814 2814 length of longest filepath+line number,
2815 2815 filepath+linenumber,
2816 2816 function
2817 2817
2818 2818 Not be used in production code but very convenient while developing.
2819 2819 '''
2820 2820 entries = [(fileline % (fn, ln), func)
2821 2821 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2822 2822 if entries:
2823 2823 fnmax = max(len(entry[0]) for entry in entries)
2824 2824 for fnln, func in entries:
2825 2825 if line is None:
2826 2826 yield (fnmax, fnln, func)
2827 2827 else:
2828 2828 yield line % (fnmax, fnln, func)
2829 2829
2830 2830 def debugstacktrace(msg='stacktrace', skip=0, f=stderr, otherf=stdout):
2831 2831 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2832 2832 Skips the 'skip' last entries. By default it will flush stdout first.
2833 2833 It can be used everywhere and intentionally does not require an ui object.
2834 2834 Not be used in production code but very convenient while developing.
2835 2835 '''
2836 2836 if otherf:
2837 2837 otherf.flush()
2838 2838 f.write('%s at:\n' % msg)
2839 2839 for line in getstackframes(skip + 1):
2840 2840 f.write(line)
2841 2841 f.flush()
2842 2842
2843 2843 class dirs(object):
2844 2844 '''a multiset of directory names from a dirstate or manifest'''
2845 2845
2846 2846 def __init__(self, map, skip=None):
2847 2847 self._dirs = {}
2848 2848 addpath = self.addpath
2849 2849 if safehasattr(map, 'iteritems') and skip is not None:
2850 2850 for f, s in map.iteritems():
2851 2851 if s[0] != skip:
2852 2852 addpath(f)
2853 2853 else:
2854 2854 for f in map:
2855 2855 addpath(f)
2856 2856
2857 2857 def addpath(self, path):
2858 2858 dirs = self._dirs
2859 2859 for base in finddirs(path):
2860 2860 if base in dirs:
2861 2861 dirs[base] += 1
2862 2862 return
2863 2863 dirs[base] = 1
2864 2864
2865 2865 def delpath(self, path):
2866 2866 dirs = self._dirs
2867 2867 for base in finddirs(path):
2868 2868 if dirs[base] > 1:
2869 2869 dirs[base] -= 1
2870 2870 return
2871 2871 del dirs[base]
2872 2872
2873 2873 def __iter__(self):
2874 2874 return self._dirs.iterkeys()
2875 2875
2876 2876 def __contains__(self, d):
2877 2877 return d in self._dirs
2878 2878
2879 2879 if safehasattr(parsers, 'dirs'):
2880 2880 dirs = parsers.dirs
2881 2881
2882 2882 def finddirs(path):
2883 2883 pos = path.rfind('/')
2884 2884 while pos != -1:
2885 2885 yield path[:pos]
2886 2886 pos = path.rfind('/', 0, pos)
2887 2887
2888 2888 class ctxmanager(object):
2889 2889 '''A context manager for use in 'with' blocks to allow multiple
2890 2890 contexts to be entered at once. This is both safer and more
2891 2891 flexible than contextlib.nested.
2892 2892
2893 2893 Once Mercurial supports Python 2.7+, this will become mostly
2894 2894 unnecessary.
2895 2895 '''
2896 2896
2897 2897 def __init__(self, *args):
2898 2898 '''Accepts a list of no-argument functions that return context
2899 2899 managers. These will be invoked at __call__ time.'''
2900 2900 self._pending = args
2901 2901 self._atexit = []
2902 2902
2903 2903 def __enter__(self):
2904 2904 return self
2905 2905
2906 2906 def enter(self):
2907 2907 '''Create and enter context managers in the order in which they were
2908 2908 passed to the constructor.'''
2909 2909 values = []
2910 2910 for func in self._pending:
2911 2911 obj = func()
2912 2912 values.append(obj.__enter__())
2913 2913 self._atexit.append(obj.__exit__)
2914 2914 del self._pending
2915 2915 return values
2916 2916
2917 2917 def atexit(self, func, *args, **kwargs):
2918 2918 '''Add a function to call when this context manager exits. The
2919 2919 ordering of multiple atexit calls is unspecified, save that
2920 2920 they will happen before any __exit__ functions.'''
2921 2921 def wrapper(exc_type, exc_val, exc_tb):
2922 2922 func(*args, **kwargs)
2923 2923 self._atexit.append(wrapper)
2924 2924 return func
2925 2925
2926 2926 def __exit__(self, exc_type, exc_val, exc_tb):
2927 2927 '''Context managers are exited in the reverse order from which
2928 2928 they were created.'''
2929 2929 received = exc_type is not None
2930 2930 suppressed = False
2931 2931 pending = None
2932 2932 self._atexit.reverse()
2933 2933 for exitfunc in self._atexit:
2934 2934 try:
2935 2935 if exitfunc(exc_type, exc_val, exc_tb):
2936 2936 suppressed = True
2937 2937 exc_type = None
2938 2938 exc_val = None
2939 2939 exc_tb = None
2940 2940 except BaseException:
2941 2941 pending = sys.exc_info()
2942 2942 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2943 2943 del self._atexit
2944 2944 if pending:
2945 2945 raise exc_val
2946 2946 return received and suppressed
2947 2947
2948 2948 # compression code
2949 2949
2950 2950 class compressormanager(object):
2951 2951 """Holds registrations of various compression engines.
2952 2952
2953 2953 This class essentially abstracts the differences between compression
2954 2954 engines to allow new compression formats to be added easily, possibly from
2955 2955 extensions.
2956 2956
2957 2957 Compressors are registered against the global instance by calling its
2958 2958 ``register()`` method.
2959 2959 """
2960 2960 def __init__(self):
2961 2961 self._engines = {}
2962 2962 # Bundle spec human name to engine name.
2963 2963 self._bundlenames = {}
2964 2964 # Internal bundle identifier to engine name.
2965 2965 self._bundletypes = {}
2966 2966
2967 2967 def __getitem__(self, key):
2968 2968 return self._engines[key]
2969 2969
2970 2970 def __contains__(self, key):
2971 2971 return key in self._engines
2972 2972
2973 2973 def __iter__(self):
2974 2974 return iter(self._engines.keys())
2975 2975
2976 2976 def register(self, engine):
2977 2977 """Register a compression engine with the manager.
2978 2978
2979 2979 The argument must be a ``compressionengine`` instance.
2980 2980 """
2981 2981 if not isinstance(engine, compressionengine):
2982 2982 raise ValueError(_('argument must be a compressionengine'))
2983 2983
2984 2984 name = engine.name()
2985 2985
2986 2986 if name in self._engines:
2987 2987 raise error.Abort(_('compression engine %s already registered') %
2988 2988 name)
2989 2989
2990 2990 bundleinfo = engine.bundletype()
2991 2991 if bundleinfo:
2992 2992 bundlename, bundletype = bundleinfo
2993 2993
2994 2994 if bundlename in self._bundlenames:
2995 2995 raise error.Abort(_('bundle name %s already registered') %
2996 2996 bundlename)
2997 2997 if bundletype in self._bundletypes:
2998 2998 raise error.Abort(_('bundle type %s already registered by %s') %
2999 2999 (bundletype, self._bundletypes[bundletype]))
3000 3000
3001 3001 # No external facing name declared.
3002 3002 if bundlename:
3003 3003 self._bundlenames[bundlename] = name
3004 3004
3005 3005 self._bundletypes[bundletype] = name
3006 3006
3007 3007 self._engines[name] = engine
3008 3008
3009 3009 @property
3010 3010 def supportedbundlenames(self):
3011 3011 return set(self._bundlenames.keys())
3012 3012
3013 3013 @property
3014 3014 def supportedbundletypes(self):
3015 3015 return set(self._bundletypes.keys())
3016 3016
3017 3017 def forbundlename(self, bundlename):
3018 3018 """Obtain a compression engine registered to a bundle name.
3019 3019
3020 3020 Will raise KeyError if the bundle type isn't registered.
3021 3021
3022 3022 Will abort if the engine is known but not available.
3023 3023 """
3024 3024 engine = self._engines[self._bundlenames[bundlename]]
3025 3025 if not engine.available():
3026 3026 raise error.Abort(_('compression engine %s could not be loaded') %
3027 3027 engine.name())
3028 3028 return engine
3029 3029
3030 3030 def forbundletype(self, bundletype):
3031 3031 """Obtain a compression engine registered to a bundle type.
3032 3032
3033 3033 Will raise KeyError if the bundle type isn't registered.
3034 3034
3035 3035 Will abort if the engine is known but not available.
3036 3036 """
3037 3037 engine = self._engines[self._bundletypes[bundletype]]
3038 3038 if not engine.available():
3039 3039 raise error.Abort(_('compression engine %s could not be loaded') %
3040 3040 engine.name())
3041 3041 return engine
3042 3042
3043 3043 compengines = compressormanager()
3044 3044
3045 3045 class compressionengine(object):
3046 3046 """Base class for compression engines.
3047 3047
3048 3048 Compression engines must implement the interface defined by this class.
3049 3049 """
3050 3050 def name(self):
3051 3051 """Returns the name of the compression engine.
3052 3052
3053 3053 This is the key the engine is registered under.
3054 3054
3055 3055 This method must be implemented.
3056 3056 """
3057 3057 raise NotImplementedError()
3058 3058
3059 3059 def available(self):
3060 3060 """Whether the compression engine is available.
3061 3061
3062 3062 The intent of this method is to allow optional compression engines
3063 3063 that may not be available in all installations (such as engines relying
3064 3064 on C extensions that may not be present).
3065 3065 """
3066 3066 return True
3067 3067
3068 3068 def bundletype(self):
3069 3069 """Describes bundle identifiers for this engine.
3070 3070
3071 3071 If this compression engine isn't supported for bundles, returns None.
3072 3072
3073 3073 If this engine can be used for bundles, returns a 2-tuple of strings of
3074 3074 the user-facing "bundle spec" compression name and an internal
3075 3075 identifier used to denote the compression format within bundles. To
3076 3076 exclude the name from external usage, set the first element to ``None``.
3077 3077
3078 3078 If bundle compression is supported, the class must also implement
3079 3079 ``compressstream`` and `decompressorreader``.
3080 3080 """
3081 3081 return None
3082 3082
3083 3083 def compressstream(self, it, opts=None):
3084 3084 """Compress an iterator of chunks.
3085 3085
3086 3086 The method receives an iterator (ideally a generator) of chunks of
3087 3087 bytes to be compressed. It returns an iterator (ideally a generator)
3088 3088 of bytes of chunks representing the compressed output.
3089 3089
3090 3090 Optionally accepts an argument defining how to perform compression.
3091 3091 Each engine treats this argument differently.
3092 3092 """
3093 3093 raise NotImplementedError()
3094 3094
3095 3095 def decompressorreader(self, fh):
3096 3096 """Perform decompression on a file object.
3097 3097
3098 3098 Argument is an object with a ``read(size)`` method that returns
3099 3099 compressed data. Return value is an object with a ``read(size)`` that
3100 3100 returns uncompressed data.
3101 3101 """
3102 3102 raise NotImplementedError()
3103 3103
3104 3104 class _zlibengine(compressionengine):
3105 3105 def name(self):
3106 3106 return 'zlib'
3107 3107
3108 3108 def bundletype(self):
3109 3109 return 'gzip', 'GZ'
3110 3110
3111 3111 def compressstream(self, it, opts=None):
3112 3112 opts = opts or {}
3113 3113
3114 3114 z = zlib.compressobj(opts.get('level', -1))
3115 3115 for chunk in it:
3116 3116 data = z.compress(chunk)
3117 3117 # Not all calls to compress emit data. It is cheaper to inspect
3118 3118 # here than to feed empty chunks through generator.
3119 3119 if data:
3120 3120 yield data
3121 3121
3122 3122 yield z.flush()
3123 3123
3124 3124 def decompressorreader(self, fh):
3125 3125 def gen():
3126 3126 d = zlib.decompressobj()
3127 3127 for chunk in filechunkiter(fh):
3128 3128 while chunk:
3129 3129 # Limit output size to limit memory.
3130 3130 yield d.decompress(chunk, 2 ** 18)
3131 3131 chunk = d.unconsumed_tail
3132 3132
3133 3133 return chunkbuffer(gen())
3134 3134
3135 3135 compengines.register(_zlibengine())
3136 3136
3137 3137 class _bz2engine(compressionengine):
3138 3138 def name(self):
3139 3139 return 'bz2'
3140 3140
3141 3141 def bundletype(self):
3142 3142 return 'bzip2', 'BZ'
3143 3143
3144 3144 def compressstream(self, it, opts=None):
3145 3145 opts = opts or {}
3146 3146 z = bz2.BZ2Compressor(opts.get('level', 9))
3147 3147 for chunk in it:
3148 3148 data = z.compress(chunk)
3149 3149 if data:
3150 3150 yield data
3151 3151
3152 3152 yield z.flush()
3153 3153
3154 3154 def decompressorreader(self, fh):
3155 3155 def gen():
3156 3156 d = bz2.BZ2Decompressor()
3157 3157 for chunk in filechunkiter(fh):
3158 3158 yield d.decompress(chunk)
3159 3159
3160 3160 return chunkbuffer(gen())
3161 3161
3162 3162 compengines.register(_bz2engine())
3163 3163
3164 3164 class _truncatedbz2engine(compressionengine):
3165 3165 def name(self):
3166 3166 return 'bz2truncated'
3167 3167
3168 3168 def bundletype(self):
3169 3169 return None, '_truncatedBZ'
3170 3170
3171 3171 # We don't implement compressstream because it is hackily handled elsewhere.
3172 3172
3173 3173 def decompressorreader(self, fh):
3174 3174 def gen():
3175 3175 # The input stream doesn't have the 'BZ' header. So add it back.
3176 3176 d = bz2.BZ2Decompressor()
3177 3177 d.decompress('BZ')
3178 3178 for chunk in filechunkiter(fh):
3179 3179 yield d.decompress(chunk)
3180 3180
3181 3181 return chunkbuffer(gen())
3182 3182
3183 3183 compengines.register(_truncatedbz2engine())
3184 3184
3185 3185 class _noopengine(compressionengine):
3186 3186 def name(self):
3187 3187 return 'none'
3188 3188
3189 3189 def bundletype(self):
3190 3190 return 'none', 'UN'
3191 3191
3192 3192 def compressstream(self, it, opts=None):
3193 3193 return it
3194 3194
3195 3195 def decompressorreader(self, fh):
3196 3196 return fh
3197 3197
3198 3198 compengines.register(_noopengine())
3199 3199
3200 3200 class _zstdengine(compressionengine):
3201 3201 def name(self):
3202 3202 return 'zstd'
3203 3203
3204 3204 @propertycache
3205 3205 def _module(self):
3206 3206 # Not all installs have the zstd module available. So defer importing
3207 3207 # until first access.
3208 3208 try:
3209 3209 from . import zstd
3210 3210 # Force delayed import.
3211 3211 zstd.__version__
3212 3212 return zstd
3213 3213 except ImportError:
3214 3214 return None
3215 3215
3216 3216 def available(self):
3217 3217 return bool(self._module)
3218 3218
3219 3219 def bundletype(self):
3220 3220 return 'zstd', 'ZS'
3221 3221
3222 3222 def compressstream(self, it, opts=None):
3223 3223 opts = opts or {}
3224 3224 # zstd level 3 is almost always significantly faster than zlib
3225 3225 # while providing no worse compression. It strikes a good balance
3226 3226 # between speed and compression.
3227 3227 level = opts.get('level', 3)
3228 3228
3229 3229 zstd = self._module
3230 3230 z = zstd.ZstdCompressor(level=level).compressobj()
3231 3231 for chunk in it:
3232 3232 data = z.compress(chunk)
3233 3233 if data:
3234 3234 yield data
3235 3235
3236 3236 yield z.flush()
3237 3237
3238 3238 def decompressorreader(self, fh):
3239 3239 zstd = self._module
3240 3240 dctx = zstd.ZstdDecompressor()
3241 3241 return chunkbuffer(dctx.read_from(fh))
3242 3242
3243 3243 compengines.register(_zstdengine())
3244 3244
3245 3245 # convenient shortcut
3246 3246 dst = debugstacktrace
@@ -1,503 +1,505 b''
1 1 # win32.py - utility functions that use win32 API
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import ctypes
11 11 import errno
12 12 import msvcrt
13 13 import os
14 14 import random
15 15 import subprocess
16 16
17 from . import encoding
18
17 19 _kernel32 = ctypes.windll.kernel32
18 20 _advapi32 = ctypes.windll.advapi32
19 21 _user32 = ctypes.windll.user32
20 22
21 23 _BOOL = ctypes.c_long
22 24 _WORD = ctypes.c_ushort
23 25 _DWORD = ctypes.c_ulong
24 26 _UINT = ctypes.c_uint
25 27 _LONG = ctypes.c_long
26 28 _LPCSTR = _LPSTR = ctypes.c_char_p
27 29 _HANDLE = ctypes.c_void_p
28 30 _HWND = _HANDLE
29 31
30 32 _INVALID_HANDLE_VALUE = _HANDLE(-1).value
31 33
32 34 # GetLastError
33 35 _ERROR_SUCCESS = 0
34 36 _ERROR_NO_MORE_FILES = 18
35 37 _ERROR_INVALID_PARAMETER = 87
36 38 _ERROR_BROKEN_PIPE = 109
37 39 _ERROR_INSUFFICIENT_BUFFER = 122
38 40
39 41 # WPARAM is defined as UINT_PTR (unsigned type)
40 42 # LPARAM is defined as LONG_PTR (signed type)
41 43 if ctypes.sizeof(ctypes.c_long) == ctypes.sizeof(ctypes.c_void_p):
42 44 _WPARAM = ctypes.c_ulong
43 45 _LPARAM = ctypes.c_long
44 46 elif ctypes.sizeof(ctypes.c_longlong) == ctypes.sizeof(ctypes.c_void_p):
45 47 _WPARAM = ctypes.c_ulonglong
46 48 _LPARAM = ctypes.c_longlong
47 49
48 50 class _FILETIME(ctypes.Structure):
49 51 _fields_ = [('dwLowDateTime', _DWORD),
50 52 ('dwHighDateTime', _DWORD)]
51 53
52 54 class _BY_HANDLE_FILE_INFORMATION(ctypes.Structure):
53 55 _fields_ = [('dwFileAttributes', _DWORD),
54 56 ('ftCreationTime', _FILETIME),
55 57 ('ftLastAccessTime', _FILETIME),
56 58 ('ftLastWriteTime', _FILETIME),
57 59 ('dwVolumeSerialNumber', _DWORD),
58 60 ('nFileSizeHigh', _DWORD),
59 61 ('nFileSizeLow', _DWORD),
60 62 ('nNumberOfLinks', _DWORD),
61 63 ('nFileIndexHigh', _DWORD),
62 64 ('nFileIndexLow', _DWORD)]
63 65
64 66 # CreateFile
65 67 _FILE_SHARE_READ = 0x00000001
66 68 _FILE_SHARE_WRITE = 0x00000002
67 69 _FILE_SHARE_DELETE = 0x00000004
68 70
69 71 _OPEN_EXISTING = 3
70 72
71 73 _FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
72 74
73 75 # SetFileAttributes
74 76 _FILE_ATTRIBUTE_NORMAL = 0x80
75 77 _FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x2000
76 78
77 79 # Process Security and Access Rights
78 80 _PROCESS_QUERY_INFORMATION = 0x0400
79 81
80 82 # GetExitCodeProcess
81 83 _STILL_ACTIVE = 259
82 84
83 85 class _STARTUPINFO(ctypes.Structure):
84 86 _fields_ = [('cb', _DWORD),
85 87 ('lpReserved', _LPSTR),
86 88 ('lpDesktop', _LPSTR),
87 89 ('lpTitle', _LPSTR),
88 90 ('dwX', _DWORD),
89 91 ('dwY', _DWORD),
90 92 ('dwXSize', _DWORD),
91 93 ('dwYSize', _DWORD),
92 94 ('dwXCountChars', _DWORD),
93 95 ('dwYCountChars', _DWORD),
94 96 ('dwFillAttribute', _DWORD),
95 97 ('dwFlags', _DWORD),
96 98 ('wShowWindow', _WORD),
97 99 ('cbReserved2', _WORD),
98 100 ('lpReserved2', ctypes.c_char_p),
99 101 ('hStdInput', _HANDLE),
100 102 ('hStdOutput', _HANDLE),
101 103 ('hStdError', _HANDLE)]
102 104
103 105 class _PROCESS_INFORMATION(ctypes.Structure):
104 106 _fields_ = [('hProcess', _HANDLE),
105 107 ('hThread', _HANDLE),
106 108 ('dwProcessId', _DWORD),
107 109 ('dwThreadId', _DWORD)]
108 110
109 111 _CREATE_NO_WINDOW = 0x08000000
110 112 _SW_HIDE = 0
111 113
112 114 class _COORD(ctypes.Structure):
113 115 _fields_ = [('X', ctypes.c_short),
114 116 ('Y', ctypes.c_short)]
115 117
116 118 class _SMALL_RECT(ctypes.Structure):
117 119 _fields_ = [('Left', ctypes.c_short),
118 120 ('Top', ctypes.c_short),
119 121 ('Right', ctypes.c_short),
120 122 ('Bottom', ctypes.c_short)]
121 123
122 124 class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
123 125 _fields_ = [('dwSize', _COORD),
124 126 ('dwCursorPosition', _COORD),
125 127 ('wAttributes', _WORD),
126 128 ('srWindow', _SMALL_RECT),
127 129 ('dwMaximumWindowSize', _COORD)]
128 130
129 131 _STD_ERROR_HANDLE = _DWORD(-12).value
130 132
131 133 # CreateToolhelp32Snapshot, Process32First, Process32Next
132 134 _TH32CS_SNAPPROCESS = 0x00000002
133 135 _MAX_PATH = 260
134 136
135 137 class _tagPROCESSENTRY32(ctypes.Structure):
136 138 _fields_ = [('dwsize', _DWORD),
137 139 ('cntUsage', _DWORD),
138 140 ('th32ProcessID', _DWORD),
139 141 ('th32DefaultHeapID', ctypes.c_void_p),
140 142 ('th32ModuleID', _DWORD),
141 143 ('cntThreads', _DWORD),
142 144 ('th32ParentProcessID', _DWORD),
143 145 ('pcPriClassBase', _LONG),
144 146 ('dwFlags', _DWORD),
145 147 ('szExeFile', ctypes.c_char * _MAX_PATH)]
146 148
147 149 def __init__(self):
148 150 super(_tagPROCESSENTRY32, self).__init__()
149 151 self.dwsize = ctypes.sizeof(self)
150 152
151 153
152 154 # types of parameters of C functions used (required by pypy)
153 155
154 156 _kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p,
155 157 _DWORD, _DWORD, _HANDLE]
156 158 _kernel32.CreateFileA.restype = _HANDLE
157 159
158 160 _kernel32.GetFileInformationByHandle.argtypes = [_HANDLE, ctypes.c_void_p]
159 161 _kernel32.GetFileInformationByHandle.restype = _BOOL
160 162
161 163 _kernel32.CloseHandle.argtypes = [_HANDLE]
162 164 _kernel32.CloseHandle.restype = _BOOL
163 165
164 166 try:
165 167 _kernel32.CreateHardLinkA.argtypes = [_LPCSTR, _LPCSTR, ctypes.c_void_p]
166 168 _kernel32.CreateHardLinkA.restype = _BOOL
167 169 except AttributeError:
168 170 pass
169 171
170 172 _kernel32.SetFileAttributesA.argtypes = [_LPCSTR, _DWORD]
171 173 _kernel32.SetFileAttributesA.restype = _BOOL
172 174
173 175 _kernel32.OpenProcess.argtypes = [_DWORD, _BOOL, _DWORD]
174 176 _kernel32.OpenProcess.restype = _HANDLE
175 177
176 178 _kernel32.GetExitCodeProcess.argtypes = [_HANDLE, ctypes.c_void_p]
177 179 _kernel32.GetExitCodeProcess.restype = _BOOL
178 180
179 181 _kernel32.GetLastError.argtypes = []
180 182 _kernel32.GetLastError.restype = _DWORD
181 183
182 184 _kernel32.GetModuleFileNameA.argtypes = [_HANDLE, ctypes.c_void_p, _DWORD]
183 185 _kernel32.GetModuleFileNameA.restype = _DWORD
184 186
185 187 _kernel32.CreateProcessA.argtypes = [_LPCSTR, _LPCSTR, ctypes.c_void_p,
186 188 ctypes.c_void_p, _BOOL, _DWORD, ctypes.c_void_p, _LPCSTR, ctypes.c_void_p,
187 189 ctypes.c_void_p]
188 190 _kernel32.CreateProcessA.restype = _BOOL
189 191
190 192 _kernel32.ExitProcess.argtypes = [_UINT]
191 193 _kernel32.ExitProcess.restype = None
192 194
193 195 _kernel32.GetCurrentProcessId.argtypes = []
194 196 _kernel32.GetCurrentProcessId.restype = _DWORD
195 197
196 198 _SIGNAL_HANDLER = ctypes.WINFUNCTYPE(_BOOL, _DWORD)
197 199 _kernel32.SetConsoleCtrlHandler.argtypes = [_SIGNAL_HANDLER, _BOOL]
198 200 _kernel32.SetConsoleCtrlHandler.restype = _BOOL
199 201
200 202 _kernel32.GetStdHandle.argtypes = [_DWORD]
201 203 _kernel32.GetStdHandle.restype = _HANDLE
202 204
203 205 _kernel32.GetConsoleScreenBufferInfo.argtypes = [_HANDLE, ctypes.c_void_p]
204 206 _kernel32.GetConsoleScreenBufferInfo.restype = _BOOL
205 207
206 208 _advapi32.GetUserNameA.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
207 209 _advapi32.GetUserNameA.restype = _BOOL
208 210
209 211 _user32.GetWindowThreadProcessId.argtypes = [_HANDLE, ctypes.c_void_p]
210 212 _user32.GetWindowThreadProcessId.restype = _DWORD
211 213
212 214 _user32.ShowWindow.argtypes = [_HANDLE, ctypes.c_int]
213 215 _user32.ShowWindow.restype = _BOOL
214 216
215 217 _WNDENUMPROC = ctypes.WINFUNCTYPE(_BOOL, _HWND, _LPARAM)
216 218 _user32.EnumWindows.argtypes = [_WNDENUMPROC, _LPARAM]
217 219 _user32.EnumWindows.restype = _BOOL
218 220
219 221 _kernel32.CreateToolhelp32Snapshot.argtypes = [_DWORD, _DWORD]
220 222 _kernel32.CreateToolhelp32Snapshot.restype = _BOOL
221 223
222 224 _kernel32.PeekNamedPipe.argtypes = [_HANDLE, ctypes.c_void_p, _DWORD,
223 225 ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
224 226 _kernel32.PeekNamedPipe.restype = _BOOL
225 227
226 228 _kernel32.Process32First.argtypes = [_HANDLE, ctypes.c_void_p]
227 229 _kernel32.Process32First.restype = _BOOL
228 230
229 231 _kernel32.Process32Next.argtypes = [_HANDLE, ctypes.c_void_p]
230 232 _kernel32.Process32Next.restype = _BOOL
231 233
232 234 def _raiseoserror(name):
233 235 err = ctypes.WinError()
234 236 raise OSError(err.errno, '%s: %s' % (name, err.strerror))
235 237
236 238 def _getfileinfo(name):
237 239 fh = _kernel32.CreateFileA(name, 0,
238 240 _FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE,
239 241 None, _OPEN_EXISTING, _FILE_FLAG_BACKUP_SEMANTICS, None)
240 242 if fh == _INVALID_HANDLE_VALUE:
241 243 _raiseoserror(name)
242 244 try:
243 245 fi = _BY_HANDLE_FILE_INFORMATION()
244 246 if not _kernel32.GetFileInformationByHandle(fh, ctypes.byref(fi)):
245 247 _raiseoserror(name)
246 248 return fi
247 249 finally:
248 250 _kernel32.CloseHandle(fh)
249 251
250 252 def oslink(src, dst):
251 253 try:
252 254 if not _kernel32.CreateHardLinkA(dst, src, None):
253 255 _raiseoserror(src)
254 256 except AttributeError: # Wine doesn't support this function
255 257 _raiseoserror(src)
256 258
257 259 def nlinks(name):
258 260 '''return number of hardlinks for the given file'''
259 261 return _getfileinfo(name).nNumberOfLinks
260 262
261 263 def samefile(path1, path2):
262 264 '''Returns whether path1 and path2 refer to the same file or directory.'''
263 265 res1 = _getfileinfo(path1)
264 266 res2 = _getfileinfo(path2)
265 267 return (res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
266 268 and res1.nFileIndexHigh == res2.nFileIndexHigh
267 269 and res1.nFileIndexLow == res2.nFileIndexLow)
268 270
269 271 def samedevice(path1, path2):
270 272 '''Returns whether path1 and path2 are on the same device.'''
271 273 res1 = _getfileinfo(path1)
272 274 res2 = _getfileinfo(path2)
273 275 return res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
274 276
275 277 def peekpipe(pipe):
276 278 handle = msvcrt.get_osfhandle(pipe.fileno())
277 279 avail = _DWORD()
278 280
279 281 if not _kernel32.PeekNamedPipe(handle, None, 0, None, ctypes.byref(avail),
280 282 None):
281 283 err = _kernel32.GetLastError()
282 284 if err == _ERROR_BROKEN_PIPE:
283 285 return 0
284 286 raise ctypes.WinError(err)
285 287
286 288 return avail.value
287 289
288 290 def testpid(pid):
289 291 '''return True if pid is still running or unable to
290 292 determine, False otherwise'''
291 293 h = _kernel32.OpenProcess(_PROCESS_QUERY_INFORMATION, False, pid)
292 294 if h:
293 295 try:
294 296 status = _DWORD()
295 297 if _kernel32.GetExitCodeProcess(h, ctypes.byref(status)):
296 298 return status.value == _STILL_ACTIVE
297 299 finally:
298 300 _kernel32.CloseHandle(h)
299 301 return _kernel32.GetLastError() != _ERROR_INVALID_PARAMETER
300 302
301 303 def executablepath():
302 304 '''return full path of hg.exe'''
303 305 size = 600
304 306 buf = ctypes.create_string_buffer(size + 1)
305 307 len = _kernel32.GetModuleFileNameA(None, ctypes.byref(buf), size)
306 308 if len == 0:
307 309 raise ctypes.WinError() # Note: WinError is a function
308 310 elif len == size:
309 311 raise ctypes.WinError(_ERROR_INSUFFICIENT_BUFFER)
310 312 return buf.value
311 313
312 314 def getuser():
313 315 '''return name of current user'''
314 316 size = _DWORD(300)
315 317 buf = ctypes.create_string_buffer(size.value + 1)
316 318 if not _advapi32.GetUserNameA(ctypes.byref(buf), ctypes.byref(size)):
317 319 raise ctypes.WinError()
318 320 return buf.value
319 321
320 322 _signalhandler = []
321 323
322 324 def setsignalhandler():
323 325 '''Register a termination handler for console events including
324 326 CTRL+C. python signal handlers do not work well with socket
325 327 operations.
326 328 '''
327 329 def handler(event):
328 330 _kernel32.ExitProcess(1)
329 331
330 332 if _signalhandler:
331 333 return # already registered
332 334 h = _SIGNAL_HANDLER(handler)
333 335 _signalhandler.append(h) # needed to prevent garbage collection
334 336 if not _kernel32.SetConsoleCtrlHandler(h, True):
335 337 raise ctypes.WinError()
336 338
337 339 def hidewindow():
338 340
339 341 def callback(hwnd, pid):
340 342 wpid = _DWORD()
341 343 _user32.GetWindowThreadProcessId(hwnd, ctypes.byref(wpid))
342 344 if pid == wpid.value:
343 345 _user32.ShowWindow(hwnd, _SW_HIDE)
344 346 return False # stop enumerating windows
345 347 return True
346 348
347 349 pid = _kernel32.GetCurrentProcessId()
348 350 _user32.EnumWindows(_WNDENUMPROC(callback), pid)
349 351
350 352 def termsize():
351 353 # cmd.exe does not handle CR like a unix console, the CR is
352 354 # counted in the line length. On 80 columns consoles, if 80
353 355 # characters are written, the following CR won't apply on the
354 356 # current line but on the new one. Keep room for it.
355 357 width = 80 - 1
356 358 height = 25
357 359 # Query stderr to avoid problems with redirections
358 360 screenbuf = _kernel32.GetStdHandle(
359 361 _STD_ERROR_HANDLE) # don't close the handle returned
360 362 if screenbuf is None or screenbuf == _INVALID_HANDLE_VALUE:
361 363 return width, height
362 364 csbi = _CONSOLE_SCREEN_BUFFER_INFO()
363 365 if not _kernel32.GetConsoleScreenBufferInfo(
364 366 screenbuf, ctypes.byref(csbi)):
365 367 return width, height
366 368 width = csbi.srWindow.Right - csbi.srWindow.Left # don't '+ 1'
367 369 height = csbi.srWindow.Bottom - csbi.srWindow.Top + 1
368 370 return width, height
369 371
370 372 def _1stchild(pid):
371 373 '''return the 1st found child of the given pid
372 374
373 375 None is returned when no child is found'''
374 376 pe = _tagPROCESSENTRY32()
375 377
376 378 # create handle to list all processes
377 379 ph = _kernel32.CreateToolhelp32Snapshot(_TH32CS_SNAPPROCESS, 0)
378 380 if ph == _INVALID_HANDLE_VALUE:
379 381 raise ctypes.WinError()
380 382 try:
381 383 r = _kernel32.Process32First(ph, ctypes.byref(pe))
382 384 # loop over all processes
383 385 while r:
384 386 if pe.th32ParentProcessID == pid:
385 387 # return first child found
386 388 return pe.th32ProcessID
387 389 r = _kernel32.Process32Next(ph, ctypes.byref(pe))
388 390 finally:
389 391 _kernel32.CloseHandle(ph)
390 392 if _kernel32.GetLastError() != _ERROR_NO_MORE_FILES:
391 393 raise ctypes.WinError()
392 394 return None # no child found
393 395
394 396 class _tochildpid(int): # pid is _DWORD, which always matches in an int
395 397 '''helper for spawndetached, returns the child pid on conversion to string
396 398
397 399 Does not resolve the child pid immediately because the child may not yet be
398 400 started.
399 401 '''
400 402 def childpid(self):
401 403 '''returns the child pid of the first found child of the process
402 404 with this pid'''
403 405 return _1stchild(self)
404 406 def __str__(self):
405 407 # run when the pid is written to the file
406 408 ppid = self.childpid()
407 409 if ppid is None:
408 410 # race, child has exited since check
409 411 # fall back to this pid. Its process will also have disappeared,
410 412 # raising the same error type later as when the child pid would
411 413 # be returned.
412 414 return " %d" % self
413 415 return str(ppid)
414 416
415 417 def spawndetached(args):
416 418 # No standard library function really spawns a fully detached
417 419 # process under win32 because they allocate pipes or other objects
418 420 # to handle standard streams communications. Passing these objects
419 421 # to the child process requires handle inheritance to be enabled
420 422 # which makes really detached processes impossible.
421 423 si = _STARTUPINFO()
422 424 si.cb = ctypes.sizeof(_STARTUPINFO)
423 425
424 426 pi = _PROCESS_INFORMATION()
425 427
426 428 env = ''
427 for k in os.environ:
428 env += "%s=%s\0" % (k, os.environ[k])
429 for k in encoding.environ:
430 env += "%s=%s\0" % (k, encoding.environ[k])
429 431 if not env:
430 432 env = '\0'
431 433 env += '\0'
432 434
433 435 args = subprocess.list2cmdline(args)
434 436 # Not running the command in shell mode makes Python 2.6 hang when
435 437 # writing to hgweb output socket.
436 comspec = os.environ.get("COMSPEC", "cmd.exe")
438 comspec = encoding.environ.get("COMSPEC", "cmd.exe")
437 439 args = comspec + " /c " + args
438 440
439 441 res = _kernel32.CreateProcessA(
440 442 None, args, None, None, False, _CREATE_NO_WINDOW,
441 443 env, os.getcwd(), ctypes.byref(si), ctypes.byref(pi))
442 444 if not res:
443 445 raise ctypes.WinError()
444 446
445 447 # _tochildpid because the process is the child of COMSPEC
446 448 return _tochildpid(pi.dwProcessId)
447 449
448 450 def unlink(f):
449 451 '''try to implement POSIX' unlink semantics on Windows'''
450 452
451 453 if os.path.isdir(f):
452 454 # use EPERM because it is POSIX prescribed value, even though
453 455 # unlink(2) on directories returns EISDIR on Linux
454 456 raise IOError(errno.EPERM,
455 457 "Unlinking directory not permitted: '%s'" % f)
456 458
457 459 # POSIX allows to unlink and rename open files. Windows has serious
458 460 # problems with doing that:
459 461 # - Calling os.unlink (or os.rename) on a file f fails if f or any
460 462 # hardlinked copy of f has been opened with Python's open(). There is no
461 463 # way such a file can be deleted or renamed on Windows (other than
462 464 # scheduling the delete or rename for the next reboot).
463 465 # - Calling os.unlink on a file that has been opened with Mercurial's
464 466 # posixfile (or comparable methods) will delay the actual deletion of
465 467 # the file for as long as the file is held open. The filename is blocked
466 468 # during that time and cannot be used for recreating a new file under
467 469 # that same name ("zombie file"). Directories containing such zombie files
468 470 # cannot be removed or moved.
469 471 # A file that has been opened with posixfile can be renamed, so we rename
470 472 # f to a random temporary name before calling os.unlink on it. This allows
471 473 # callers to recreate f immediately while having other readers do their
472 474 # implicit zombie filename blocking on a temporary name.
473 475
474 476 for tries in xrange(10):
475 477 temp = '%s-%08x' % (f, random.randint(0, 0xffffffff))
476 478 try:
477 479 os.rename(f, temp) # raises OSError EEXIST if temp exists
478 480 break
479 481 except OSError as e:
480 482 if e.errno != errno.EEXIST:
481 483 raise
482 484 else:
483 485 raise IOError(errno.EEXIST, "No usable temporary filename found")
484 486
485 487 try:
486 488 os.unlink(temp)
487 489 except OSError:
488 490 # The unlink might have failed because the READONLY attribute may heave
489 491 # been set on the original file. Rename works fine with READONLY set,
490 492 # but not os.unlink. Reset all attributes and try again.
491 493 _kernel32.SetFileAttributesA(temp, _FILE_ATTRIBUTE_NORMAL)
492 494 try:
493 495 os.unlink(temp)
494 496 except OSError:
495 497 # The unlink might have failed due to some very rude AV-Scanners.
496 498 # Leaking a tempfile is the lesser evil than aborting here and
497 499 # leaving some potentially serious inconsistencies.
498 500 pass
499 501
500 502 def makedir(path, notindexed):
501 503 os.mkdir(path)
502 504 if notindexed:
503 505 _kernel32.SetFileAttributesA(path, _FILE_ATTRIBUTE_NOT_CONTENT_INDEXED)
General Comments 0
You need to be logged in to leave comments. Login now