##// END OF EJS Templates
date: fix boundary check of negative integer
Florent Gallaire -
r28864:b0811a9f default
parent child Browse files
Show More
@@ -1,2741 +1,2741
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import collections
21 21 import datetime
22 22 import errno
23 23 import gc
24 24 import hashlib
25 25 import imp
26 26 import os
27 27 import re as remod
28 28 import shutil
29 29 import signal
30 30 import socket
31 31 import subprocess
32 32 import sys
33 33 import tempfile
34 34 import textwrap
35 35 import time
36 36 import traceback
37 37 import urllib
38 38 import zlib
39 39
40 40 from . import (
41 41 encoding,
42 42 error,
43 43 i18n,
44 44 osutil,
45 45 parsers,
46 46 pycompat,
47 47 )
48 48
49 49 for attr in (
50 50 'empty',
51 51 'queue',
52 52 'stringio',
53 53 ):
54 54 globals()[attr] = getattr(pycompat, attr)
55 55
56 56 if os.name == 'nt':
57 57 from . import windows as platform
58 58 else:
59 59 from . import posix as platform
60 60
61 61 md5 = hashlib.md5
62 62 sha1 = hashlib.sha1
63 63 sha512 = hashlib.sha512
64 64 _ = i18n._
65 65
66 66 cachestat = platform.cachestat
67 67 checkexec = platform.checkexec
68 68 checklink = platform.checklink
69 69 copymode = platform.copymode
70 70 executablepath = platform.executablepath
71 71 expandglobs = platform.expandglobs
72 72 explainexit = platform.explainexit
73 73 findexe = platform.findexe
74 74 gethgcmd = platform.gethgcmd
75 75 getuser = platform.getuser
76 76 getpid = os.getpid
77 77 groupmembers = platform.groupmembers
78 78 groupname = platform.groupname
79 79 hidewindow = platform.hidewindow
80 80 isexec = platform.isexec
81 81 isowner = platform.isowner
82 82 localpath = platform.localpath
83 83 lookupreg = platform.lookupreg
84 84 makedir = platform.makedir
85 85 nlinks = platform.nlinks
86 86 normpath = platform.normpath
87 87 normcase = platform.normcase
88 88 normcasespec = platform.normcasespec
89 89 normcasefallback = platform.normcasefallback
90 90 openhardlinks = platform.openhardlinks
91 91 oslink = platform.oslink
92 92 parsepatchoutput = platform.parsepatchoutput
93 93 pconvert = platform.pconvert
94 94 poll = platform.poll
95 95 popen = platform.popen
96 96 posixfile = platform.posixfile
97 97 quotecommand = platform.quotecommand
98 98 readpipe = platform.readpipe
99 99 rename = platform.rename
100 100 removedirs = platform.removedirs
101 101 samedevice = platform.samedevice
102 102 samefile = platform.samefile
103 103 samestat = platform.samestat
104 104 setbinary = platform.setbinary
105 105 setflags = platform.setflags
106 106 setsignalhandler = platform.setsignalhandler
107 107 shellquote = platform.shellquote
108 108 spawndetached = platform.spawndetached
109 109 split = platform.split
110 110 sshargs = platform.sshargs
111 111 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
112 112 statisexec = platform.statisexec
113 113 statislink = platform.statislink
114 114 termwidth = platform.termwidth
115 115 testpid = platform.testpid
116 116 umask = platform.umask
117 117 unlink = platform.unlink
118 118 unlinkpath = platform.unlinkpath
119 119 username = platform.username
120 120
121 121 # Python compatibility
122 122
123 123 _notset = object()
124 124
125 125 # disable Python's problematic floating point timestamps (issue4836)
126 126 # (Python hypocritically says you shouldn't change this behavior in
127 127 # libraries, and sure enough Mercurial is not a library.)
128 128 os.stat_float_times(False)
129 129
130 130 def safehasattr(thing, attr):
131 131 return getattr(thing, attr, _notset) is not _notset
132 132
133 133 DIGESTS = {
134 134 'md5': md5,
135 135 'sha1': sha1,
136 136 'sha512': sha512,
137 137 }
138 138 # List of digest types from strongest to weakest
139 139 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
140 140
141 141 for k in DIGESTS_BY_STRENGTH:
142 142 assert k in DIGESTS
143 143
144 144 class digester(object):
145 145 """helper to compute digests.
146 146
147 147 This helper can be used to compute one or more digests given their name.
148 148
149 149 >>> d = digester(['md5', 'sha1'])
150 150 >>> d.update('foo')
151 151 >>> [k for k in sorted(d)]
152 152 ['md5', 'sha1']
153 153 >>> d['md5']
154 154 'acbd18db4cc2f85cedef654fccc4a4d8'
155 155 >>> d['sha1']
156 156 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
157 157 >>> digester.preferred(['md5', 'sha1'])
158 158 'sha1'
159 159 """
160 160
161 161 def __init__(self, digests, s=''):
162 162 self._hashes = {}
163 163 for k in digests:
164 164 if k not in DIGESTS:
165 165 raise Abort(_('unknown digest type: %s') % k)
166 166 self._hashes[k] = DIGESTS[k]()
167 167 if s:
168 168 self.update(s)
169 169
170 170 def update(self, data):
171 171 for h in self._hashes.values():
172 172 h.update(data)
173 173
174 174 def __getitem__(self, key):
175 175 if key not in DIGESTS:
176 176 raise Abort(_('unknown digest type: %s') % k)
177 177 return self._hashes[key].hexdigest()
178 178
179 179 def __iter__(self):
180 180 return iter(self._hashes)
181 181
182 182 @staticmethod
183 183 def preferred(supported):
184 184 """returns the strongest digest type in both supported and DIGESTS."""
185 185
186 186 for k in DIGESTS_BY_STRENGTH:
187 187 if k in supported:
188 188 return k
189 189 return None
190 190
191 191 class digestchecker(object):
192 192 """file handle wrapper that additionally checks content against a given
193 193 size and digests.
194 194
195 195 d = digestchecker(fh, size, {'md5': '...'})
196 196
197 197 When multiple digests are given, all of them are validated.
198 198 """
199 199
200 200 def __init__(self, fh, size, digests):
201 201 self._fh = fh
202 202 self._size = size
203 203 self._got = 0
204 204 self._digests = dict(digests)
205 205 self._digester = digester(self._digests.keys())
206 206
207 207 def read(self, length=-1):
208 208 content = self._fh.read(length)
209 209 self._digester.update(content)
210 210 self._got += len(content)
211 211 return content
212 212
213 213 def validate(self):
214 214 if self._size != self._got:
215 215 raise Abort(_('size mismatch: expected %d, got %d') %
216 216 (self._size, self._got))
217 217 for k, v in self._digests.items():
218 218 if v != self._digester[k]:
219 219 # i18n: first parameter is a digest name
220 220 raise Abort(_('%s mismatch: expected %s, got %s') %
221 221 (k, v, self._digester[k]))
222 222
223 223 try:
224 224 buffer = buffer
225 225 except NameError:
226 226 if sys.version_info[0] < 3:
227 227 def buffer(sliceable, offset=0):
228 228 return sliceable[offset:]
229 229 else:
230 230 def buffer(sliceable, offset=0):
231 231 return memoryview(sliceable)[offset:]
232 232
233 233 closefds = os.name == 'posix'
234 234
235 235 _chunksize = 4096
236 236
237 237 class bufferedinputpipe(object):
238 238 """a manually buffered input pipe
239 239
240 240 Python will not let us use buffered IO and lazy reading with 'polling' at
241 241 the same time. We cannot probe the buffer state and select will not detect
242 242 that data are ready to read if they are already buffered.
243 243
244 244 This class let us work around that by implementing its own buffering
245 245 (allowing efficient readline) while offering a way to know if the buffer is
246 246 empty from the output (allowing collaboration of the buffer with polling).
247 247
248 248 This class lives in the 'util' module because it makes use of the 'os'
249 249 module from the python stdlib.
250 250 """
251 251
252 252 def __init__(self, input):
253 253 self._input = input
254 254 self._buffer = []
255 255 self._eof = False
256 256 self._lenbuf = 0
257 257
258 258 @property
259 259 def hasbuffer(self):
260 260 """True is any data is currently buffered
261 261
262 262 This will be used externally a pre-step for polling IO. If there is
263 263 already data then no polling should be set in place."""
264 264 return bool(self._buffer)
265 265
266 266 @property
267 267 def closed(self):
268 268 return self._input.closed
269 269
270 270 def fileno(self):
271 271 return self._input.fileno()
272 272
273 273 def close(self):
274 274 return self._input.close()
275 275
276 276 def read(self, size):
277 277 while (not self._eof) and (self._lenbuf < size):
278 278 self._fillbuffer()
279 279 return self._frombuffer(size)
280 280
281 281 def readline(self, *args, **kwargs):
282 282 if 1 < len(self._buffer):
283 283 # this should not happen because both read and readline end with a
284 284 # _frombuffer call that collapse it.
285 285 self._buffer = [''.join(self._buffer)]
286 286 self._lenbuf = len(self._buffer[0])
287 287 lfi = -1
288 288 if self._buffer:
289 289 lfi = self._buffer[-1].find('\n')
290 290 while (not self._eof) and lfi < 0:
291 291 self._fillbuffer()
292 292 if self._buffer:
293 293 lfi = self._buffer[-1].find('\n')
294 294 size = lfi + 1
295 295 if lfi < 0: # end of file
296 296 size = self._lenbuf
297 297 elif 1 < len(self._buffer):
298 298 # we need to take previous chunks into account
299 299 size += self._lenbuf - len(self._buffer[-1])
300 300 return self._frombuffer(size)
301 301
302 302 def _frombuffer(self, size):
303 303 """return at most 'size' data from the buffer
304 304
305 305 The data are removed from the buffer."""
306 306 if size == 0 or not self._buffer:
307 307 return ''
308 308 buf = self._buffer[0]
309 309 if 1 < len(self._buffer):
310 310 buf = ''.join(self._buffer)
311 311
312 312 data = buf[:size]
313 313 buf = buf[len(data):]
314 314 if buf:
315 315 self._buffer = [buf]
316 316 self._lenbuf = len(buf)
317 317 else:
318 318 self._buffer = []
319 319 self._lenbuf = 0
320 320 return data
321 321
322 322 def _fillbuffer(self):
323 323 """read data to the buffer"""
324 324 data = os.read(self._input.fileno(), _chunksize)
325 325 if not data:
326 326 self._eof = True
327 327 else:
328 328 self._lenbuf += len(data)
329 329 self._buffer.append(data)
330 330
331 331 def popen2(cmd, env=None, newlines=False):
332 332 # Setting bufsize to -1 lets the system decide the buffer size.
333 333 # The default for bufsize is 0, meaning unbuffered. This leads to
334 334 # poor performance on Mac OS X: http://bugs.python.org/issue4194
335 335 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
336 336 close_fds=closefds,
337 337 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
338 338 universal_newlines=newlines,
339 339 env=env)
340 340 return p.stdin, p.stdout
341 341
342 342 def popen3(cmd, env=None, newlines=False):
343 343 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
344 344 return stdin, stdout, stderr
345 345
346 346 def popen4(cmd, env=None, newlines=False, bufsize=-1):
347 347 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
348 348 close_fds=closefds,
349 349 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
350 350 stderr=subprocess.PIPE,
351 351 universal_newlines=newlines,
352 352 env=env)
353 353 return p.stdin, p.stdout, p.stderr, p
354 354
355 355 def version():
356 356 """Return version information if available."""
357 357 try:
358 358 from . import __version__
359 359 return __version__.version
360 360 except ImportError:
361 361 return 'unknown'
362 362
363 363 def versiontuple(v=None, n=4):
364 364 """Parses a Mercurial version string into an N-tuple.
365 365
366 366 The version string to be parsed is specified with the ``v`` argument.
367 367 If it isn't defined, the current Mercurial version string will be parsed.
368 368
369 369 ``n`` can be 2, 3, or 4. Here is how some version strings map to
370 370 returned values:
371 371
372 372 >>> v = '3.6.1+190-df9b73d2d444'
373 373 >>> versiontuple(v, 2)
374 374 (3, 6)
375 375 >>> versiontuple(v, 3)
376 376 (3, 6, 1)
377 377 >>> versiontuple(v, 4)
378 378 (3, 6, 1, '190-df9b73d2d444')
379 379
380 380 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
381 381 (3, 6, 1, '190-df9b73d2d444+20151118')
382 382
383 383 >>> v = '3.6'
384 384 >>> versiontuple(v, 2)
385 385 (3, 6)
386 386 >>> versiontuple(v, 3)
387 387 (3, 6, None)
388 388 >>> versiontuple(v, 4)
389 389 (3, 6, None, None)
390 390 """
391 391 if not v:
392 392 v = version()
393 393 parts = v.split('+', 1)
394 394 if len(parts) == 1:
395 395 vparts, extra = parts[0], None
396 396 else:
397 397 vparts, extra = parts
398 398
399 399 vints = []
400 400 for i in vparts.split('.'):
401 401 try:
402 402 vints.append(int(i))
403 403 except ValueError:
404 404 break
405 405 # (3, 6) -> (3, 6, None)
406 406 while len(vints) < 3:
407 407 vints.append(None)
408 408
409 409 if n == 2:
410 410 return (vints[0], vints[1])
411 411 if n == 3:
412 412 return (vints[0], vints[1], vints[2])
413 413 if n == 4:
414 414 return (vints[0], vints[1], vints[2], extra)
415 415
416 416 # used by parsedate
417 417 defaultdateformats = (
418 418 '%Y-%m-%d %H:%M:%S',
419 419 '%Y-%m-%d %I:%M:%S%p',
420 420 '%Y-%m-%d %H:%M',
421 421 '%Y-%m-%d %I:%M%p',
422 422 '%Y-%m-%d',
423 423 '%m-%d',
424 424 '%m/%d',
425 425 '%m/%d/%y',
426 426 '%m/%d/%Y',
427 427 '%a %b %d %H:%M:%S %Y',
428 428 '%a %b %d %I:%M:%S%p %Y',
429 429 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
430 430 '%b %d %H:%M:%S %Y',
431 431 '%b %d %I:%M:%S%p %Y',
432 432 '%b %d %H:%M:%S',
433 433 '%b %d %I:%M:%S%p',
434 434 '%b %d %H:%M',
435 435 '%b %d %I:%M%p',
436 436 '%b %d %Y',
437 437 '%b %d',
438 438 '%H:%M:%S',
439 439 '%I:%M:%S%p',
440 440 '%H:%M',
441 441 '%I:%M%p',
442 442 )
443 443
444 444 extendeddateformats = defaultdateformats + (
445 445 "%Y",
446 446 "%Y-%m",
447 447 "%b",
448 448 "%b %Y",
449 449 )
450 450
451 451 def cachefunc(func):
452 452 '''cache the result of function calls'''
453 453 # XXX doesn't handle keywords args
454 454 if func.__code__.co_argcount == 0:
455 455 cache = []
456 456 def f():
457 457 if len(cache) == 0:
458 458 cache.append(func())
459 459 return cache[0]
460 460 return f
461 461 cache = {}
462 462 if func.__code__.co_argcount == 1:
463 463 # we gain a small amount of time because
464 464 # we don't need to pack/unpack the list
465 465 def f(arg):
466 466 if arg not in cache:
467 467 cache[arg] = func(arg)
468 468 return cache[arg]
469 469 else:
470 470 def f(*args):
471 471 if args not in cache:
472 472 cache[args] = func(*args)
473 473 return cache[args]
474 474
475 475 return f
476 476
477 477 class sortdict(dict):
478 478 '''a simple sorted dictionary'''
479 479 def __init__(self, data=None):
480 480 self._list = []
481 481 if data:
482 482 self.update(data)
483 483 def copy(self):
484 484 return sortdict(self)
485 485 def __setitem__(self, key, val):
486 486 if key in self:
487 487 self._list.remove(key)
488 488 self._list.append(key)
489 489 dict.__setitem__(self, key, val)
490 490 def __iter__(self):
491 491 return self._list.__iter__()
492 492 def update(self, src):
493 493 if isinstance(src, dict):
494 494 src = src.iteritems()
495 495 for k, v in src:
496 496 self[k] = v
497 497 def clear(self):
498 498 dict.clear(self)
499 499 self._list = []
500 500 def items(self):
501 501 return [(k, self[k]) for k in self._list]
502 502 def __delitem__(self, key):
503 503 dict.__delitem__(self, key)
504 504 self._list.remove(key)
505 505 def pop(self, key, *args, **kwargs):
506 506 dict.pop(self, key, *args, **kwargs)
507 507 try:
508 508 self._list.remove(key)
509 509 except ValueError:
510 510 pass
511 511 def keys(self):
512 512 return self._list
513 513 def iterkeys(self):
514 514 return self._list.__iter__()
515 515 def iteritems(self):
516 516 for k in self._list:
517 517 yield k, self[k]
518 518 def insert(self, index, key, val):
519 519 self._list.insert(index, key)
520 520 dict.__setitem__(self, key, val)
521 521
522 522 class _lrucachenode(object):
523 523 """A node in a doubly linked list.
524 524
525 525 Holds a reference to nodes on either side as well as a key-value
526 526 pair for the dictionary entry.
527 527 """
528 528 __slots__ = ('next', 'prev', 'key', 'value')
529 529
530 530 def __init__(self):
531 531 self.next = None
532 532 self.prev = None
533 533
534 534 self.key = _notset
535 535 self.value = None
536 536
537 537 def markempty(self):
538 538 """Mark the node as emptied."""
539 539 self.key = _notset
540 540
541 541 class lrucachedict(object):
542 542 """Dict that caches most recent accesses and sets.
543 543
544 544 The dict consists of an actual backing dict - indexed by original
545 545 key - and a doubly linked circular list defining the order of entries in
546 546 the cache.
547 547
548 548 The head node is the newest entry in the cache. If the cache is full,
549 549 we recycle head.prev and make it the new head. Cache accesses result in
550 550 the node being moved to before the existing head and being marked as the
551 551 new head node.
552 552 """
553 553 def __init__(self, max):
554 554 self._cache = {}
555 555
556 556 self._head = head = _lrucachenode()
557 557 head.prev = head
558 558 head.next = head
559 559 self._size = 1
560 560 self._capacity = max
561 561
562 562 def __len__(self):
563 563 return len(self._cache)
564 564
565 565 def __contains__(self, k):
566 566 return k in self._cache
567 567
568 568 def __iter__(self):
569 569 # We don't have to iterate in cache order, but why not.
570 570 n = self._head
571 571 for i in range(len(self._cache)):
572 572 yield n.key
573 573 n = n.next
574 574
575 575 def __getitem__(self, k):
576 576 node = self._cache[k]
577 577 self._movetohead(node)
578 578 return node.value
579 579
580 580 def __setitem__(self, k, v):
581 581 node = self._cache.get(k)
582 582 # Replace existing value and mark as newest.
583 583 if node is not None:
584 584 node.value = v
585 585 self._movetohead(node)
586 586 return
587 587
588 588 if self._size < self._capacity:
589 589 node = self._addcapacity()
590 590 else:
591 591 # Grab the last/oldest item.
592 592 node = self._head.prev
593 593
594 594 # At capacity. Kill the old entry.
595 595 if node.key is not _notset:
596 596 del self._cache[node.key]
597 597
598 598 node.key = k
599 599 node.value = v
600 600 self._cache[k] = node
601 601 # And mark it as newest entry. No need to adjust order since it
602 602 # is already self._head.prev.
603 603 self._head = node
604 604
605 605 def __delitem__(self, k):
606 606 node = self._cache.pop(k)
607 607 node.markempty()
608 608
609 609 # Temporarily mark as newest item before re-adjusting head to make
610 610 # this node the oldest item.
611 611 self._movetohead(node)
612 612 self._head = node.next
613 613
614 614 # Additional dict methods.
615 615
616 616 def get(self, k, default=None):
617 617 try:
618 618 return self._cache[k]
619 619 except KeyError:
620 620 return default
621 621
622 622 def clear(self):
623 623 n = self._head
624 624 while n.key is not _notset:
625 625 n.markempty()
626 626 n = n.next
627 627
628 628 self._cache.clear()
629 629
630 630 def copy(self):
631 631 result = lrucachedict(self._capacity)
632 632 n = self._head.prev
633 633 # Iterate in oldest-to-newest order, so the copy has the right ordering
634 634 for i in range(len(self._cache)):
635 635 result[n.key] = n.value
636 636 n = n.prev
637 637 return result
638 638
639 639 def _movetohead(self, node):
640 640 """Mark a node as the newest, making it the new head.
641 641
642 642 When a node is accessed, it becomes the freshest entry in the LRU
643 643 list, which is denoted by self._head.
644 644
645 645 Visually, let's make ``N`` the new head node (* denotes head):
646 646
647 647 previous/oldest <-> head <-> next/next newest
648 648
649 649 ----<->--- A* ---<->-----
650 650 | |
651 651 E <-> D <-> N <-> C <-> B
652 652
653 653 To:
654 654
655 655 ----<->--- N* ---<->-----
656 656 | |
657 657 E <-> D <-> C <-> B <-> A
658 658
659 659 This requires the following moves:
660 660
661 661 C.next = D (node.prev.next = node.next)
662 662 D.prev = C (node.next.prev = node.prev)
663 663 E.next = N (head.prev.next = node)
664 664 N.prev = E (node.prev = head.prev)
665 665 N.next = A (node.next = head)
666 666 A.prev = N (head.prev = node)
667 667 """
668 668 head = self._head
669 669 # C.next = D
670 670 node.prev.next = node.next
671 671 # D.prev = C
672 672 node.next.prev = node.prev
673 673 # N.prev = E
674 674 node.prev = head.prev
675 675 # N.next = A
676 676 # It is tempting to do just "head" here, however if node is
677 677 # adjacent to head, this will do bad things.
678 678 node.next = head.prev.next
679 679 # E.next = N
680 680 node.next.prev = node
681 681 # A.prev = N
682 682 node.prev.next = node
683 683
684 684 self._head = node
685 685
686 686 def _addcapacity(self):
687 687 """Add a node to the circular linked list.
688 688
689 689 The new node is inserted before the head node.
690 690 """
691 691 head = self._head
692 692 node = _lrucachenode()
693 693 head.prev.next = node
694 694 node.prev = head.prev
695 695 node.next = head
696 696 head.prev = node
697 697 self._size += 1
698 698 return node
699 699
700 700 def lrucachefunc(func):
701 701 '''cache most recent results of function calls'''
702 702 cache = {}
703 703 order = collections.deque()
704 704 if func.__code__.co_argcount == 1:
705 705 def f(arg):
706 706 if arg not in cache:
707 707 if len(cache) > 20:
708 708 del cache[order.popleft()]
709 709 cache[arg] = func(arg)
710 710 else:
711 711 order.remove(arg)
712 712 order.append(arg)
713 713 return cache[arg]
714 714 else:
715 715 def f(*args):
716 716 if args not in cache:
717 717 if len(cache) > 20:
718 718 del cache[order.popleft()]
719 719 cache[args] = func(*args)
720 720 else:
721 721 order.remove(args)
722 722 order.append(args)
723 723 return cache[args]
724 724
725 725 return f
726 726
727 727 class propertycache(object):
728 728 def __init__(self, func):
729 729 self.func = func
730 730 self.name = func.__name__
731 731 def __get__(self, obj, type=None):
732 732 result = self.func(obj)
733 733 self.cachevalue(obj, result)
734 734 return result
735 735
736 736 def cachevalue(self, obj, value):
737 737 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
738 738 obj.__dict__[self.name] = value
739 739
740 740 def pipefilter(s, cmd):
741 741 '''filter string S through command CMD, returning its output'''
742 742 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
743 743 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
744 744 pout, perr = p.communicate(s)
745 745 return pout
746 746
747 747 def tempfilter(s, cmd):
748 748 '''filter string S through a pair of temporary files with CMD.
749 749 CMD is used as a template to create the real command to be run,
750 750 with the strings INFILE and OUTFILE replaced by the real names of
751 751 the temporary files generated.'''
752 752 inname, outname = None, None
753 753 try:
754 754 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
755 755 fp = os.fdopen(infd, 'wb')
756 756 fp.write(s)
757 757 fp.close()
758 758 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
759 759 os.close(outfd)
760 760 cmd = cmd.replace('INFILE', inname)
761 761 cmd = cmd.replace('OUTFILE', outname)
762 762 code = os.system(cmd)
763 763 if sys.platform == 'OpenVMS' and code & 1:
764 764 code = 0
765 765 if code:
766 766 raise Abort(_("command '%s' failed: %s") %
767 767 (cmd, explainexit(code)))
768 768 return readfile(outname)
769 769 finally:
770 770 try:
771 771 if inname:
772 772 os.unlink(inname)
773 773 except OSError:
774 774 pass
775 775 try:
776 776 if outname:
777 777 os.unlink(outname)
778 778 except OSError:
779 779 pass
780 780
781 781 filtertable = {
782 782 'tempfile:': tempfilter,
783 783 'pipe:': pipefilter,
784 784 }
785 785
786 786 def filter(s, cmd):
787 787 "filter a string through a command that transforms its input to its output"
788 788 for name, fn in filtertable.iteritems():
789 789 if cmd.startswith(name):
790 790 return fn(s, cmd[len(name):].lstrip())
791 791 return pipefilter(s, cmd)
792 792
793 793 def binary(s):
794 794 """return true if a string is binary data"""
795 795 return bool(s and '\0' in s)
796 796
797 797 def increasingchunks(source, min=1024, max=65536):
798 798 '''return no less than min bytes per chunk while data remains,
799 799 doubling min after each chunk until it reaches max'''
800 800 def log2(x):
801 801 if not x:
802 802 return 0
803 803 i = 0
804 804 while x:
805 805 x >>= 1
806 806 i += 1
807 807 return i - 1
808 808
809 809 buf = []
810 810 blen = 0
811 811 for chunk in source:
812 812 buf.append(chunk)
813 813 blen += len(chunk)
814 814 if blen >= min:
815 815 if min < max:
816 816 min = min << 1
817 817 nmin = 1 << log2(blen)
818 818 if nmin > min:
819 819 min = nmin
820 820 if min > max:
821 821 min = max
822 822 yield ''.join(buf)
823 823 blen = 0
824 824 buf = []
825 825 if buf:
826 826 yield ''.join(buf)
827 827
828 828 Abort = error.Abort
829 829
830 830 def always(fn):
831 831 return True
832 832
833 833 def never(fn):
834 834 return False
835 835
836 836 def nogc(func):
837 837 """disable garbage collector
838 838
839 839 Python's garbage collector triggers a GC each time a certain number of
840 840 container objects (the number being defined by gc.get_threshold()) are
841 841 allocated even when marked not to be tracked by the collector. Tracking has
842 842 no effect on when GCs are triggered, only on what objects the GC looks
843 843 into. As a workaround, disable GC while building complex (huge)
844 844 containers.
845 845
846 846 This garbage collector issue have been fixed in 2.7.
847 847 """
848 848 def wrapper(*args, **kwargs):
849 849 gcenabled = gc.isenabled()
850 850 gc.disable()
851 851 try:
852 852 return func(*args, **kwargs)
853 853 finally:
854 854 if gcenabled:
855 855 gc.enable()
856 856 return wrapper
857 857
858 858 def pathto(root, n1, n2):
859 859 '''return the relative path from one place to another.
860 860 root should use os.sep to separate directories
861 861 n1 should use os.sep to separate directories
862 862 n2 should use "/" to separate directories
863 863 returns an os.sep-separated path.
864 864
865 865 If n1 is a relative path, it's assumed it's
866 866 relative to root.
867 867 n2 should always be relative to root.
868 868 '''
869 869 if not n1:
870 870 return localpath(n2)
871 871 if os.path.isabs(n1):
872 872 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
873 873 return os.path.join(root, localpath(n2))
874 874 n2 = '/'.join((pconvert(root), n2))
875 875 a, b = splitpath(n1), n2.split('/')
876 876 a.reverse()
877 877 b.reverse()
878 878 while a and b and a[-1] == b[-1]:
879 879 a.pop()
880 880 b.pop()
881 881 b.reverse()
882 882 return os.sep.join((['..'] * len(a)) + b) or '.'
883 883
884 884 def mainfrozen():
885 885 """return True if we are a frozen executable.
886 886
887 887 The code supports py2exe (most common, Windows only) and tools/freeze
888 888 (portable, not much used).
889 889 """
890 890 return (safehasattr(sys, "frozen") or # new py2exe
891 891 safehasattr(sys, "importers") or # old py2exe
892 892 imp.is_frozen("__main__")) # tools/freeze
893 893
894 894 # the location of data files matching the source code
895 895 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
896 896 # executable version (py2exe) doesn't support __file__
897 897 datapath = os.path.dirname(sys.executable)
898 898 else:
899 899 datapath = os.path.dirname(__file__)
900 900
901 901 i18n.setdatapath(datapath)
902 902
903 903 _hgexecutable = None
904 904
905 905 def hgexecutable():
906 906 """return location of the 'hg' executable.
907 907
908 908 Defaults to $HG or 'hg' in the search path.
909 909 """
910 910 if _hgexecutable is None:
911 911 hg = os.environ.get('HG')
912 912 mainmod = sys.modules['__main__']
913 913 if hg:
914 914 _sethgexecutable(hg)
915 915 elif mainfrozen():
916 916 if getattr(sys, 'frozen', None) == 'macosx_app':
917 917 # Env variable set by py2app
918 918 _sethgexecutable(os.environ['EXECUTABLEPATH'])
919 919 else:
920 920 _sethgexecutable(sys.executable)
921 921 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
922 922 _sethgexecutable(mainmod.__file__)
923 923 else:
924 924 exe = findexe('hg') or os.path.basename(sys.argv[0])
925 925 _sethgexecutable(exe)
926 926 return _hgexecutable
927 927
928 928 def _sethgexecutable(path):
929 929 """set location of the 'hg' executable"""
930 930 global _hgexecutable
931 931 _hgexecutable = path
932 932
933 933 def _isstdout(f):
934 934 fileno = getattr(f, 'fileno', None)
935 935 return fileno and fileno() == sys.__stdout__.fileno()
936 936
937 937 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
938 938 '''enhanced shell command execution.
939 939 run with environment maybe modified, maybe in different dir.
940 940
941 941 if command fails and onerr is None, return status, else raise onerr
942 942 object as exception.
943 943
944 944 if out is specified, it is assumed to be a file-like object that has a
945 945 write() method. stdout and stderr will be redirected to out.'''
946 946 if environ is None:
947 947 environ = {}
948 948 try:
949 949 sys.stdout.flush()
950 950 except Exception:
951 951 pass
952 952 def py2shell(val):
953 953 'convert python object into string that is useful to shell'
954 954 if val is None or val is False:
955 955 return '0'
956 956 if val is True:
957 957 return '1'
958 958 return str(val)
959 959 origcmd = cmd
960 960 cmd = quotecommand(cmd)
961 961 if sys.platform == 'plan9' and (sys.version_info[0] == 2
962 962 and sys.version_info[1] < 7):
963 963 # subprocess kludge to work around issues in half-baked Python
964 964 # ports, notably bichued/python:
965 965 if not cwd is None:
966 966 os.chdir(cwd)
967 967 rc = os.system(cmd)
968 968 else:
969 969 env = dict(os.environ)
970 970 env.update((k, py2shell(v)) for k, v in environ.iteritems())
971 971 env['HG'] = hgexecutable()
972 972 if out is None or _isstdout(out):
973 973 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
974 974 env=env, cwd=cwd)
975 975 else:
976 976 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
977 977 env=env, cwd=cwd, stdout=subprocess.PIPE,
978 978 stderr=subprocess.STDOUT)
979 979 while True:
980 980 line = proc.stdout.readline()
981 981 if not line:
982 982 break
983 983 out.write(line)
984 984 proc.wait()
985 985 rc = proc.returncode
986 986 if sys.platform == 'OpenVMS' and rc & 1:
987 987 rc = 0
988 988 if rc and onerr:
989 989 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
990 990 explainexit(rc)[0])
991 991 if errprefix:
992 992 errmsg = '%s: %s' % (errprefix, errmsg)
993 993 raise onerr(errmsg)
994 994 return rc
995 995
996 996 def checksignature(func):
997 997 '''wrap a function with code to check for calling errors'''
998 998 def check(*args, **kwargs):
999 999 try:
1000 1000 return func(*args, **kwargs)
1001 1001 except TypeError:
1002 1002 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1003 1003 raise error.SignatureError
1004 1004 raise
1005 1005
1006 1006 return check
1007 1007
1008 1008 def copyfile(src, dest, hardlink=False, copystat=False):
1009 1009 '''copy a file, preserving mode and optionally other stat info like
1010 1010 atime/mtime'''
1011 1011 if os.path.lexists(dest):
1012 1012 unlink(dest)
1013 1013 # hardlinks are problematic on CIFS, quietly ignore this flag
1014 1014 # until we find a way to work around it cleanly (issue4546)
1015 1015 if False and hardlink:
1016 1016 try:
1017 1017 oslink(src, dest)
1018 1018 return
1019 1019 except (IOError, OSError):
1020 1020 pass # fall back to normal copy
1021 1021 if os.path.islink(src):
1022 1022 os.symlink(os.readlink(src), dest)
1023 1023 # copytime is ignored for symlinks, but in general copytime isn't needed
1024 1024 # for them anyway
1025 1025 else:
1026 1026 try:
1027 1027 shutil.copyfile(src, dest)
1028 1028 if copystat:
1029 1029 # copystat also copies mode
1030 1030 shutil.copystat(src, dest)
1031 1031 else:
1032 1032 shutil.copymode(src, dest)
1033 1033 except shutil.Error as inst:
1034 1034 raise Abort(str(inst))
1035 1035
1036 1036 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1037 1037 """Copy a directory tree using hardlinks if possible."""
1038 1038 num = 0
1039 1039
1040 1040 if hardlink is None:
1041 1041 hardlink = (os.stat(src).st_dev ==
1042 1042 os.stat(os.path.dirname(dst)).st_dev)
1043 1043 if hardlink:
1044 1044 topic = _('linking')
1045 1045 else:
1046 1046 topic = _('copying')
1047 1047
1048 1048 if os.path.isdir(src):
1049 1049 os.mkdir(dst)
1050 1050 for name, kind in osutil.listdir(src):
1051 1051 srcname = os.path.join(src, name)
1052 1052 dstname = os.path.join(dst, name)
1053 1053 def nprog(t, pos):
1054 1054 if pos is not None:
1055 1055 return progress(t, pos + num)
1056 1056 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1057 1057 num += n
1058 1058 else:
1059 1059 if hardlink:
1060 1060 try:
1061 1061 oslink(src, dst)
1062 1062 except (IOError, OSError):
1063 1063 hardlink = False
1064 1064 shutil.copy(src, dst)
1065 1065 else:
1066 1066 shutil.copy(src, dst)
1067 1067 num += 1
1068 1068 progress(topic, num)
1069 1069 progress(topic, None)
1070 1070
1071 1071 return hardlink, num
1072 1072
1073 1073 _winreservednames = '''con prn aux nul
1074 1074 com1 com2 com3 com4 com5 com6 com7 com8 com9
1075 1075 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1076 1076 _winreservedchars = ':*?"<>|'
1077 1077 def checkwinfilename(path):
1078 1078 r'''Check that the base-relative path is a valid filename on Windows.
1079 1079 Returns None if the path is ok, or a UI string describing the problem.
1080 1080
1081 1081 >>> checkwinfilename("just/a/normal/path")
1082 1082 >>> checkwinfilename("foo/bar/con.xml")
1083 1083 "filename contains 'con', which is reserved on Windows"
1084 1084 >>> checkwinfilename("foo/con.xml/bar")
1085 1085 "filename contains 'con', which is reserved on Windows"
1086 1086 >>> checkwinfilename("foo/bar/xml.con")
1087 1087 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1088 1088 "filename contains 'AUX', which is reserved on Windows"
1089 1089 >>> checkwinfilename("foo/bar/bla:.txt")
1090 1090 "filename contains ':', which is reserved on Windows"
1091 1091 >>> checkwinfilename("foo/bar/b\07la.txt")
1092 1092 "filename contains '\\x07', which is invalid on Windows"
1093 1093 >>> checkwinfilename("foo/bar/bla ")
1094 1094 "filename ends with ' ', which is not allowed on Windows"
1095 1095 >>> checkwinfilename("../bar")
1096 1096 >>> checkwinfilename("foo\\")
1097 1097 "filename ends with '\\', which is invalid on Windows"
1098 1098 >>> checkwinfilename("foo\\/bar")
1099 1099 "directory name ends with '\\', which is invalid on Windows"
1100 1100 '''
1101 1101 if path.endswith('\\'):
1102 1102 return _("filename ends with '\\', which is invalid on Windows")
1103 1103 if '\\/' in path:
1104 1104 return _("directory name ends with '\\', which is invalid on Windows")
1105 1105 for n in path.replace('\\', '/').split('/'):
1106 1106 if not n:
1107 1107 continue
1108 1108 for c in n:
1109 1109 if c in _winreservedchars:
1110 1110 return _("filename contains '%s', which is reserved "
1111 1111 "on Windows") % c
1112 1112 if ord(c) <= 31:
1113 1113 return _("filename contains %r, which is invalid "
1114 1114 "on Windows") % c
1115 1115 base = n.split('.')[0]
1116 1116 if base and base.lower() in _winreservednames:
1117 1117 return _("filename contains '%s', which is reserved "
1118 1118 "on Windows") % base
1119 1119 t = n[-1]
1120 1120 if t in '. ' and n not in '..':
1121 1121 return _("filename ends with '%s', which is not allowed "
1122 1122 "on Windows") % t
1123 1123
1124 1124 if os.name == 'nt':
1125 1125 checkosfilename = checkwinfilename
1126 1126 else:
1127 1127 checkosfilename = platform.checkosfilename
1128 1128
1129 1129 def makelock(info, pathname):
1130 1130 try:
1131 1131 return os.symlink(info, pathname)
1132 1132 except OSError as why:
1133 1133 if why.errno == errno.EEXIST:
1134 1134 raise
1135 1135 except AttributeError: # no symlink in os
1136 1136 pass
1137 1137
1138 1138 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1139 1139 os.write(ld, info)
1140 1140 os.close(ld)
1141 1141
1142 1142 def readlock(pathname):
1143 1143 try:
1144 1144 return os.readlink(pathname)
1145 1145 except OSError as why:
1146 1146 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1147 1147 raise
1148 1148 except AttributeError: # no symlink in os
1149 1149 pass
1150 1150 fp = posixfile(pathname)
1151 1151 r = fp.read()
1152 1152 fp.close()
1153 1153 return r
1154 1154
1155 1155 def fstat(fp):
1156 1156 '''stat file object that may not have fileno method.'''
1157 1157 try:
1158 1158 return os.fstat(fp.fileno())
1159 1159 except AttributeError:
1160 1160 return os.stat(fp.name)
1161 1161
1162 1162 # File system features
1163 1163
1164 1164 def checkcase(path):
1165 1165 """
1166 1166 Return true if the given path is on a case-sensitive filesystem
1167 1167
1168 1168 Requires a path (like /foo/.hg) ending with a foldable final
1169 1169 directory component.
1170 1170 """
1171 1171 s1 = os.lstat(path)
1172 1172 d, b = os.path.split(path)
1173 1173 b2 = b.upper()
1174 1174 if b == b2:
1175 1175 b2 = b.lower()
1176 1176 if b == b2:
1177 1177 return True # no evidence against case sensitivity
1178 1178 p2 = os.path.join(d, b2)
1179 1179 try:
1180 1180 s2 = os.lstat(p2)
1181 1181 if s2 == s1:
1182 1182 return False
1183 1183 return True
1184 1184 except OSError:
1185 1185 return True
1186 1186
1187 1187 try:
1188 1188 import re2
1189 1189 _re2 = None
1190 1190 except ImportError:
1191 1191 _re2 = False
1192 1192
1193 1193 class _re(object):
1194 1194 def _checkre2(self):
1195 1195 global _re2
1196 1196 try:
1197 1197 # check if match works, see issue3964
1198 1198 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1199 1199 except ImportError:
1200 1200 _re2 = False
1201 1201
1202 1202 def compile(self, pat, flags=0):
1203 1203 '''Compile a regular expression, using re2 if possible
1204 1204
1205 1205 For best performance, use only re2-compatible regexp features. The
1206 1206 only flags from the re module that are re2-compatible are
1207 1207 IGNORECASE and MULTILINE.'''
1208 1208 if _re2 is None:
1209 1209 self._checkre2()
1210 1210 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1211 1211 if flags & remod.IGNORECASE:
1212 1212 pat = '(?i)' + pat
1213 1213 if flags & remod.MULTILINE:
1214 1214 pat = '(?m)' + pat
1215 1215 try:
1216 1216 return re2.compile(pat)
1217 1217 except re2.error:
1218 1218 pass
1219 1219 return remod.compile(pat, flags)
1220 1220
1221 1221 @propertycache
1222 1222 def escape(self):
1223 1223 '''Return the version of escape corresponding to self.compile.
1224 1224
1225 1225 This is imperfect because whether re2 or re is used for a particular
1226 1226 function depends on the flags, etc, but it's the best we can do.
1227 1227 '''
1228 1228 global _re2
1229 1229 if _re2 is None:
1230 1230 self._checkre2()
1231 1231 if _re2:
1232 1232 return re2.escape
1233 1233 else:
1234 1234 return remod.escape
1235 1235
1236 1236 re = _re()
1237 1237
1238 1238 _fspathcache = {}
1239 1239 def fspath(name, root):
1240 1240 '''Get name in the case stored in the filesystem
1241 1241
1242 1242 The name should be relative to root, and be normcase-ed for efficiency.
1243 1243
1244 1244 Note that this function is unnecessary, and should not be
1245 1245 called, for case-sensitive filesystems (simply because it's expensive).
1246 1246
1247 1247 The root should be normcase-ed, too.
1248 1248 '''
1249 1249 def _makefspathcacheentry(dir):
1250 1250 return dict((normcase(n), n) for n in os.listdir(dir))
1251 1251
1252 1252 seps = os.sep
1253 1253 if os.altsep:
1254 1254 seps = seps + os.altsep
1255 1255 # Protect backslashes. This gets silly very quickly.
1256 1256 seps.replace('\\','\\\\')
1257 1257 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1258 1258 dir = os.path.normpath(root)
1259 1259 result = []
1260 1260 for part, sep in pattern.findall(name):
1261 1261 if sep:
1262 1262 result.append(sep)
1263 1263 continue
1264 1264
1265 1265 if dir not in _fspathcache:
1266 1266 _fspathcache[dir] = _makefspathcacheentry(dir)
1267 1267 contents = _fspathcache[dir]
1268 1268
1269 1269 found = contents.get(part)
1270 1270 if not found:
1271 1271 # retry "once per directory" per "dirstate.walk" which
1272 1272 # may take place for each patches of "hg qpush", for example
1273 1273 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1274 1274 found = contents.get(part)
1275 1275
1276 1276 result.append(found or part)
1277 1277 dir = os.path.join(dir, part)
1278 1278
1279 1279 return ''.join(result)
1280 1280
1281 1281 def checknlink(testfile):
1282 1282 '''check whether hardlink count reporting works properly'''
1283 1283
1284 1284 # testfile may be open, so we need a separate file for checking to
1285 1285 # work around issue2543 (or testfile may get lost on Samba shares)
1286 1286 f1 = testfile + ".hgtmp1"
1287 1287 if os.path.lexists(f1):
1288 1288 return False
1289 1289 try:
1290 1290 posixfile(f1, 'w').close()
1291 1291 except IOError:
1292 1292 return False
1293 1293
1294 1294 f2 = testfile + ".hgtmp2"
1295 1295 fd = None
1296 1296 try:
1297 1297 oslink(f1, f2)
1298 1298 # nlinks() may behave differently for files on Windows shares if
1299 1299 # the file is open.
1300 1300 fd = posixfile(f2)
1301 1301 return nlinks(f2) > 1
1302 1302 except OSError:
1303 1303 return False
1304 1304 finally:
1305 1305 if fd is not None:
1306 1306 fd.close()
1307 1307 for f in (f1, f2):
1308 1308 try:
1309 1309 os.unlink(f)
1310 1310 except OSError:
1311 1311 pass
1312 1312
1313 1313 def endswithsep(path):
1314 1314 '''Check path ends with os.sep or os.altsep.'''
1315 1315 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1316 1316
1317 1317 def splitpath(path):
1318 1318 '''Split path by os.sep.
1319 1319 Note that this function does not use os.altsep because this is
1320 1320 an alternative of simple "xxx.split(os.sep)".
1321 1321 It is recommended to use os.path.normpath() before using this
1322 1322 function if need.'''
1323 1323 return path.split(os.sep)
1324 1324
1325 1325 def gui():
1326 1326 '''Are we running in a GUI?'''
1327 1327 if sys.platform == 'darwin':
1328 1328 if 'SSH_CONNECTION' in os.environ:
1329 1329 # handle SSH access to a box where the user is logged in
1330 1330 return False
1331 1331 elif getattr(osutil, 'isgui', None):
1332 1332 # check if a CoreGraphics session is available
1333 1333 return osutil.isgui()
1334 1334 else:
1335 1335 # pure build; use a safe default
1336 1336 return True
1337 1337 else:
1338 1338 return os.name == "nt" or os.environ.get("DISPLAY")
1339 1339
1340 1340 def mktempcopy(name, emptyok=False, createmode=None):
1341 1341 """Create a temporary file with the same contents from name
1342 1342
1343 1343 The permission bits are copied from the original file.
1344 1344
1345 1345 If the temporary file is going to be truncated immediately, you
1346 1346 can use emptyok=True as an optimization.
1347 1347
1348 1348 Returns the name of the temporary file.
1349 1349 """
1350 1350 d, fn = os.path.split(name)
1351 1351 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1352 1352 os.close(fd)
1353 1353 # Temporary files are created with mode 0600, which is usually not
1354 1354 # what we want. If the original file already exists, just copy
1355 1355 # its mode. Otherwise, manually obey umask.
1356 1356 copymode(name, temp, createmode)
1357 1357 if emptyok:
1358 1358 return temp
1359 1359 try:
1360 1360 try:
1361 1361 ifp = posixfile(name, "rb")
1362 1362 except IOError as inst:
1363 1363 if inst.errno == errno.ENOENT:
1364 1364 return temp
1365 1365 if not getattr(inst, 'filename', None):
1366 1366 inst.filename = name
1367 1367 raise
1368 1368 ofp = posixfile(temp, "wb")
1369 1369 for chunk in filechunkiter(ifp):
1370 1370 ofp.write(chunk)
1371 1371 ifp.close()
1372 1372 ofp.close()
1373 1373 except: # re-raises
1374 1374 try: os.unlink(temp)
1375 1375 except OSError: pass
1376 1376 raise
1377 1377 return temp
1378 1378
1379 1379 class atomictempfile(object):
1380 1380 '''writable file object that atomically updates a file
1381 1381
1382 1382 All writes will go to a temporary copy of the original file. Call
1383 1383 close() when you are done writing, and atomictempfile will rename
1384 1384 the temporary copy to the original name, making the changes
1385 1385 visible. If the object is destroyed without being closed, all your
1386 1386 writes are discarded.
1387 1387 '''
1388 1388 def __init__(self, name, mode='w+b', createmode=None):
1389 1389 self.__name = name # permanent name
1390 1390 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1391 1391 createmode=createmode)
1392 1392 self._fp = posixfile(self._tempname, mode)
1393 1393
1394 1394 # delegated methods
1395 1395 self.write = self._fp.write
1396 1396 self.seek = self._fp.seek
1397 1397 self.tell = self._fp.tell
1398 1398 self.fileno = self._fp.fileno
1399 1399
1400 1400 def close(self):
1401 1401 if not self._fp.closed:
1402 1402 self._fp.close()
1403 1403 rename(self._tempname, localpath(self.__name))
1404 1404
1405 1405 def discard(self):
1406 1406 if not self._fp.closed:
1407 1407 try:
1408 1408 os.unlink(self._tempname)
1409 1409 except OSError:
1410 1410 pass
1411 1411 self._fp.close()
1412 1412
1413 1413 def __del__(self):
1414 1414 if safehasattr(self, '_fp'): # constructor actually did something
1415 1415 self.discard()
1416 1416
1417 1417 def makedirs(name, mode=None, notindexed=False):
1418 1418 """recursive directory creation with parent mode inheritance"""
1419 1419 try:
1420 1420 makedir(name, notindexed)
1421 1421 except OSError as err:
1422 1422 if err.errno == errno.EEXIST:
1423 1423 return
1424 1424 if err.errno != errno.ENOENT or not name:
1425 1425 raise
1426 1426 parent = os.path.dirname(os.path.abspath(name))
1427 1427 if parent == name:
1428 1428 raise
1429 1429 makedirs(parent, mode, notindexed)
1430 1430 makedir(name, notindexed)
1431 1431 if mode is not None:
1432 1432 os.chmod(name, mode)
1433 1433
1434 1434 def ensuredirs(name, mode=None, notindexed=False):
1435 1435 """race-safe recursive directory creation
1436 1436
1437 1437 Newly created directories are marked as "not to be indexed by
1438 1438 the content indexing service", if ``notindexed`` is specified
1439 1439 for "write" mode access.
1440 1440 """
1441 1441 if os.path.isdir(name):
1442 1442 return
1443 1443 parent = os.path.dirname(os.path.abspath(name))
1444 1444 if parent != name:
1445 1445 ensuredirs(parent, mode, notindexed)
1446 1446 try:
1447 1447 makedir(name, notindexed)
1448 1448 except OSError as err:
1449 1449 if err.errno == errno.EEXIST and os.path.isdir(name):
1450 1450 # someone else seems to have won a directory creation race
1451 1451 return
1452 1452 raise
1453 1453 if mode is not None:
1454 1454 os.chmod(name, mode)
1455 1455
1456 1456 def readfile(path):
1457 1457 with open(path, 'rb') as fp:
1458 1458 return fp.read()
1459 1459
1460 1460 def writefile(path, text):
1461 1461 with open(path, 'wb') as fp:
1462 1462 fp.write(text)
1463 1463
1464 1464 def appendfile(path, text):
1465 1465 with open(path, 'ab') as fp:
1466 1466 fp.write(text)
1467 1467
1468 1468 class chunkbuffer(object):
1469 1469 """Allow arbitrary sized chunks of data to be efficiently read from an
1470 1470 iterator over chunks of arbitrary size."""
1471 1471
1472 1472 def __init__(self, in_iter):
1473 1473 """in_iter is the iterator that's iterating over the input chunks.
1474 1474 targetsize is how big a buffer to try to maintain."""
1475 1475 def splitbig(chunks):
1476 1476 for chunk in chunks:
1477 1477 if len(chunk) > 2**20:
1478 1478 pos = 0
1479 1479 while pos < len(chunk):
1480 1480 end = pos + 2 ** 18
1481 1481 yield chunk[pos:end]
1482 1482 pos = end
1483 1483 else:
1484 1484 yield chunk
1485 1485 self.iter = splitbig(in_iter)
1486 1486 self._queue = collections.deque()
1487 1487 self._chunkoffset = 0
1488 1488
1489 1489 def read(self, l=None):
1490 1490 """Read L bytes of data from the iterator of chunks of data.
1491 1491 Returns less than L bytes if the iterator runs dry.
1492 1492
1493 1493 If size parameter is omitted, read everything"""
1494 1494 if l is None:
1495 1495 return ''.join(self.iter)
1496 1496
1497 1497 left = l
1498 1498 buf = []
1499 1499 queue = self._queue
1500 1500 while left > 0:
1501 1501 # refill the queue
1502 1502 if not queue:
1503 1503 target = 2**18
1504 1504 for chunk in self.iter:
1505 1505 queue.append(chunk)
1506 1506 target -= len(chunk)
1507 1507 if target <= 0:
1508 1508 break
1509 1509 if not queue:
1510 1510 break
1511 1511
1512 1512 # The easy way to do this would be to queue.popleft(), modify the
1513 1513 # chunk (if necessary), then queue.appendleft(). However, for cases
1514 1514 # where we read partial chunk content, this incurs 2 dequeue
1515 1515 # mutations and creates a new str for the remaining chunk in the
1516 1516 # queue. Our code below avoids this overhead.
1517 1517
1518 1518 chunk = queue[0]
1519 1519 chunkl = len(chunk)
1520 1520 offset = self._chunkoffset
1521 1521
1522 1522 # Use full chunk.
1523 1523 if offset == 0 and left >= chunkl:
1524 1524 left -= chunkl
1525 1525 queue.popleft()
1526 1526 buf.append(chunk)
1527 1527 # self._chunkoffset remains at 0.
1528 1528 continue
1529 1529
1530 1530 chunkremaining = chunkl - offset
1531 1531
1532 1532 # Use all of unconsumed part of chunk.
1533 1533 if left >= chunkremaining:
1534 1534 left -= chunkremaining
1535 1535 queue.popleft()
1536 1536 # offset == 0 is enabled by block above, so this won't merely
1537 1537 # copy via ``chunk[0:]``.
1538 1538 buf.append(chunk[offset:])
1539 1539 self._chunkoffset = 0
1540 1540
1541 1541 # Partial chunk needed.
1542 1542 else:
1543 1543 buf.append(chunk[offset:offset + left])
1544 1544 self._chunkoffset += left
1545 1545 left -= chunkremaining
1546 1546
1547 1547 return ''.join(buf)
1548 1548
1549 1549 def filechunkiter(f, size=65536, limit=None):
1550 1550 """Create a generator that produces the data in the file size
1551 1551 (default 65536) bytes at a time, up to optional limit (default is
1552 1552 to read all data). Chunks may be less than size bytes if the
1553 1553 chunk is the last chunk in the file, or the file is a socket or
1554 1554 some other type of file that sometimes reads less data than is
1555 1555 requested."""
1556 1556 assert size >= 0
1557 1557 assert limit is None or limit >= 0
1558 1558 while True:
1559 1559 if limit is None:
1560 1560 nbytes = size
1561 1561 else:
1562 1562 nbytes = min(limit, size)
1563 1563 s = nbytes and f.read(nbytes)
1564 1564 if not s:
1565 1565 break
1566 1566 if limit:
1567 1567 limit -= len(s)
1568 1568 yield s
1569 1569
1570 1570 def makedate(timestamp=None):
1571 1571 '''Return a unix timestamp (or the current time) as a (unixtime,
1572 1572 offset) tuple based off the local timezone.'''
1573 1573 if timestamp is None:
1574 1574 timestamp = time.time()
1575 1575 if timestamp < 0:
1576 1576 hint = _("check your clock")
1577 1577 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1578 1578 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1579 1579 datetime.datetime.fromtimestamp(timestamp))
1580 1580 tz = delta.days * 86400 + delta.seconds
1581 1581 return timestamp, tz
1582 1582
1583 1583 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1584 1584 """represent a (unixtime, offset) tuple as a localized time.
1585 1585 unixtime is seconds since the epoch, and offset is the time zone's
1586 1586 number of seconds away from UTC."""
1587 1587 t, tz = date or makedate()
1588 1588 if "%1" in format or "%2" in format or "%z" in format:
1589 1589 sign = (tz > 0) and "-" or "+"
1590 1590 minutes = abs(tz) // 60
1591 1591 q, r = divmod(minutes, 60)
1592 1592 format = format.replace("%z", "%1%2")
1593 1593 format = format.replace("%1", "%c%02d" % (sign, q))
1594 1594 format = format.replace("%2", "%02d" % r)
1595 1595 d = t - tz
1596 1596 if d > 0x7fffffff:
1597 1597 d = 0x7fffffff
1598 elif d < -0x7fffffff:
1599 d = -0x7fffffff
1598 elif d < -0x80000000:
1599 d = -0x80000000
1600 1600 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1601 1601 # because they use the gmtime() system call which is buggy on Windows
1602 1602 # for negative values.
1603 1603 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1604 1604 s = t.strftime(format)
1605 1605 return s
1606 1606
1607 1607 def shortdate(date=None):
1608 1608 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1609 1609 return datestr(date, format='%Y-%m-%d')
1610 1610
1611 1611 def parsetimezone(tz):
1612 1612 """parse a timezone string and return an offset integer"""
1613 1613 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1614 1614 sign = (tz[0] == "+") and 1 or -1
1615 1615 hours = int(tz[1:3])
1616 1616 minutes = int(tz[3:5])
1617 1617 return -sign * (hours * 60 + minutes) * 60
1618 1618 if tz == "GMT" or tz == "UTC":
1619 1619 return 0
1620 1620 return None
1621 1621
1622 1622 def strdate(string, format, defaults=[]):
1623 1623 """parse a localized time string and return a (unixtime, offset) tuple.
1624 1624 if the string cannot be parsed, ValueError is raised."""
1625 1625 # NOTE: unixtime = localunixtime + offset
1626 1626 offset, date = parsetimezone(string.split()[-1]), string
1627 1627 if offset is not None:
1628 1628 date = " ".join(string.split()[:-1])
1629 1629
1630 1630 # add missing elements from defaults
1631 1631 usenow = False # default to using biased defaults
1632 1632 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1633 1633 found = [True for p in part if ("%"+p) in format]
1634 1634 if not found:
1635 1635 date += "@" + defaults[part][usenow]
1636 1636 format += "@%" + part[0]
1637 1637 else:
1638 1638 # We've found a specific time element, less specific time
1639 1639 # elements are relative to today
1640 1640 usenow = True
1641 1641
1642 1642 timetuple = time.strptime(date, format)
1643 1643 localunixtime = int(calendar.timegm(timetuple))
1644 1644 if offset is None:
1645 1645 # local timezone
1646 1646 unixtime = int(time.mktime(timetuple))
1647 1647 offset = unixtime - localunixtime
1648 1648 else:
1649 1649 unixtime = localunixtime + offset
1650 1650 return unixtime, offset
1651 1651
1652 1652 def parsedate(date, formats=None, bias=None):
1653 1653 """parse a localized date/time and return a (unixtime, offset) tuple.
1654 1654
1655 1655 The date may be a "unixtime offset" string or in one of the specified
1656 1656 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1657 1657
1658 1658 >>> parsedate(' today ') == parsedate(\
1659 1659 datetime.date.today().strftime('%b %d'))
1660 1660 True
1661 1661 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1662 1662 datetime.timedelta(days=1)\
1663 1663 ).strftime('%b %d'))
1664 1664 True
1665 1665 >>> now, tz = makedate()
1666 1666 >>> strnow, strtz = parsedate('now')
1667 1667 >>> (strnow - now) < 1
1668 1668 True
1669 1669 >>> tz == strtz
1670 1670 True
1671 1671 """
1672 1672 if bias is None:
1673 1673 bias = {}
1674 1674 if not date:
1675 1675 return 0, 0
1676 1676 if isinstance(date, tuple) and len(date) == 2:
1677 1677 return date
1678 1678 if not formats:
1679 1679 formats = defaultdateformats
1680 1680 date = date.strip()
1681 1681
1682 1682 if date == 'now' or date == _('now'):
1683 1683 return makedate()
1684 1684 if date == 'today' or date == _('today'):
1685 1685 date = datetime.date.today().strftime('%b %d')
1686 1686 elif date == 'yesterday' or date == _('yesterday'):
1687 1687 date = (datetime.date.today() -
1688 1688 datetime.timedelta(days=1)).strftime('%b %d')
1689 1689
1690 1690 try:
1691 1691 when, offset = map(int, date.split(' '))
1692 1692 except ValueError:
1693 1693 # fill out defaults
1694 1694 now = makedate()
1695 1695 defaults = {}
1696 1696 for part in ("d", "mb", "yY", "HI", "M", "S"):
1697 1697 # this piece is for rounding the specific end of unknowns
1698 1698 b = bias.get(part)
1699 1699 if b is None:
1700 1700 if part[0] in "HMS":
1701 1701 b = "00"
1702 1702 else:
1703 1703 b = "0"
1704 1704
1705 1705 # this piece is for matching the generic end to today's date
1706 1706 n = datestr(now, "%" + part[0])
1707 1707
1708 1708 defaults[part] = (b, n)
1709 1709
1710 1710 for format in formats:
1711 1711 try:
1712 1712 when, offset = strdate(date, format, defaults)
1713 1713 except (ValueError, OverflowError):
1714 1714 pass
1715 1715 else:
1716 1716 break
1717 1717 else:
1718 1718 raise Abort(_('invalid date: %r') % date)
1719 1719 # validate explicit (probably user-specified) date and
1720 1720 # time zone offset. values must fit in signed 32 bits for
1721 1721 # current 32-bit linux runtimes. timezones go from UTC-12
1722 1722 # to UTC+14
1723 if abs(when) > 0x7fffffff:
1723 if when < -0x80000000 or when > 0x7fffffff:
1724 1724 raise Abort(_('date exceeds 32 bits: %d') % when)
1725 1725 if offset < -50400 or offset > 43200:
1726 1726 raise Abort(_('impossible time zone offset: %d') % offset)
1727 1727 return when, offset
1728 1728
1729 1729 def matchdate(date):
1730 1730 """Return a function that matches a given date match specifier
1731 1731
1732 1732 Formats include:
1733 1733
1734 1734 '{date}' match a given date to the accuracy provided
1735 1735
1736 1736 '<{date}' on or before a given date
1737 1737
1738 1738 '>{date}' on or after a given date
1739 1739
1740 1740 >>> p1 = parsedate("10:29:59")
1741 1741 >>> p2 = parsedate("10:30:00")
1742 1742 >>> p3 = parsedate("10:30:59")
1743 1743 >>> p4 = parsedate("10:31:00")
1744 1744 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1745 1745 >>> f = matchdate("10:30")
1746 1746 >>> f(p1[0])
1747 1747 False
1748 1748 >>> f(p2[0])
1749 1749 True
1750 1750 >>> f(p3[0])
1751 1751 True
1752 1752 >>> f(p4[0])
1753 1753 False
1754 1754 >>> f(p5[0])
1755 1755 False
1756 1756 """
1757 1757
1758 1758 def lower(date):
1759 1759 d = {'mb': "1", 'd': "1"}
1760 1760 return parsedate(date, extendeddateformats, d)[0]
1761 1761
1762 1762 def upper(date):
1763 1763 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1764 1764 for days in ("31", "30", "29"):
1765 1765 try:
1766 1766 d["d"] = days
1767 1767 return parsedate(date, extendeddateformats, d)[0]
1768 1768 except Abort:
1769 1769 pass
1770 1770 d["d"] = "28"
1771 1771 return parsedate(date, extendeddateformats, d)[0]
1772 1772
1773 1773 date = date.strip()
1774 1774
1775 1775 if not date:
1776 1776 raise Abort(_("dates cannot consist entirely of whitespace"))
1777 1777 elif date[0] == "<":
1778 1778 if not date[1:]:
1779 1779 raise Abort(_("invalid day spec, use '<DATE'"))
1780 1780 when = upper(date[1:])
1781 1781 return lambda x: x <= when
1782 1782 elif date[0] == ">":
1783 1783 if not date[1:]:
1784 1784 raise Abort(_("invalid day spec, use '>DATE'"))
1785 1785 when = lower(date[1:])
1786 1786 return lambda x: x >= when
1787 1787 elif date[0] == "-":
1788 1788 try:
1789 1789 days = int(date[1:])
1790 1790 except ValueError:
1791 1791 raise Abort(_("invalid day spec: %s") % date[1:])
1792 1792 if days < 0:
1793 1793 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1794 1794 % date[1:])
1795 1795 when = makedate()[0] - days * 3600 * 24
1796 1796 return lambda x: x >= when
1797 1797 elif " to " in date:
1798 1798 a, b = date.split(" to ")
1799 1799 start, stop = lower(a), upper(b)
1800 1800 return lambda x: x >= start and x <= stop
1801 1801 else:
1802 1802 start, stop = lower(date), upper(date)
1803 1803 return lambda x: x >= start and x <= stop
1804 1804
1805 1805 def stringmatcher(pattern):
1806 1806 """
1807 1807 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1808 1808 returns the matcher name, pattern, and matcher function.
1809 1809 missing or unknown prefixes are treated as literal matches.
1810 1810
1811 1811 helper for tests:
1812 1812 >>> def test(pattern, *tests):
1813 1813 ... kind, pattern, matcher = stringmatcher(pattern)
1814 1814 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1815 1815
1816 1816 exact matching (no prefix):
1817 1817 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1818 1818 ('literal', 'abcdefg', [False, False, True])
1819 1819
1820 1820 regex matching ('re:' prefix)
1821 1821 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1822 1822 ('re', 'a.+b', [False, False, True])
1823 1823
1824 1824 force exact matches ('literal:' prefix)
1825 1825 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1826 1826 ('literal', 're:foobar', [False, True])
1827 1827
1828 1828 unknown prefixes are ignored and treated as literals
1829 1829 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1830 1830 ('literal', 'foo:bar', [False, False, True])
1831 1831 """
1832 1832 if pattern.startswith('re:'):
1833 1833 pattern = pattern[3:]
1834 1834 try:
1835 1835 regex = remod.compile(pattern)
1836 1836 except remod.error as e:
1837 1837 raise error.ParseError(_('invalid regular expression: %s')
1838 1838 % e)
1839 1839 return 're', pattern, regex.search
1840 1840 elif pattern.startswith('literal:'):
1841 1841 pattern = pattern[8:]
1842 1842 return 'literal', pattern, pattern.__eq__
1843 1843
1844 1844 def shortuser(user):
1845 1845 """Return a short representation of a user name or email address."""
1846 1846 f = user.find('@')
1847 1847 if f >= 0:
1848 1848 user = user[:f]
1849 1849 f = user.find('<')
1850 1850 if f >= 0:
1851 1851 user = user[f + 1:]
1852 1852 f = user.find(' ')
1853 1853 if f >= 0:
1854 1854 user = user[:f]
1855 1855 f = user.find('.')
1856 1856 if f >= 0:
1857 1857 user = user[:f]
1858 1858 return user
1859 1859
1860 1860 def emailuser(user):
1861 1861 """Return the user portion of an email address."""
1862 1862 f = user.find('@')
1863 1863 if f >= 0:
1864 1864 user = user[:f]
1865 1865 f = user.find('<')
1866 1866 if f >= 0:
1867 1867 user = user[f + 1:]
1868 1868 return user
1869 1869
1870 1870 def email(author):
1871 1871 '''get email of author.'''
1872 1872 r = author.find('>')
1873 1873 if r == -1:
1874 1874 r = None
1875 1875 return author[author.find('<') + 1:r]
1876 1876
1877 1877 def ellipsis(text, maxlength=400):
1878 1878 """Trim string to at most maxlength (default: 400) columns in display."""
1879 1879 return encoding.trim(text, maxlength, ellipsis='...')
1880 1880
1881 1881 def unitcountfn(*unittable):
1882 1882 '''return a function that renders a readable count of some quantity'''
1883 1883
1884 1884 def go(count):
1885 1885 for multiplier, divisor, format in unittable:
1886 1886 if count >= divisor * multiplier:
1887 1887 return format % (count / float(divisor))
1888 1888 return unittable[-1][2] % count
1889 1889
1890 1890 return go
1891 1891
1892 1892 bytecount = unitcountfn(
1893 1893 (100, 1 << 30, _('%.0f GB')),
1894 1894 (10, 1 << 30, _('%.1f GB')),
1895 1895 (1, 1 << 30, _('%.2f GB')),
1896 1896 (100, 1 << 20, _('%.0f MB')),
1897 1897 (10, 1 << 20, _('%.1f MB')),
1898 1898 (1, 1 << 20, _('%.2f MB')),
1899 1899 (100, 1 << 10, _('%.0f KB')),
1900 1900 (10, 1 << 10, _('%.1f KB')),
1901 1901 (1, 1 << 10, _('%.2f KB')),
1902 1902 (1, 1, _('%.0f bytes')),
1903 1903 )
1904 1904
1905 1905 def uirepr(s):
1906 1906 # Avoid double backslash in Windows path repr()
1907 1907 return repr(s).replace('\\\\', '\\')
1908 1908
1909 1909 # delay import of textwrap
1910 1910 def MBTextWrapper(**kwargs):
1911 1911 class tw(textwrap.TextWrapper):
1912 1912 """
1913 1913 Extend TextWrapper for width-awareness.
1914 1914
1915 1915 Neither number of 'bytes' in any encoding nor 'characters' is
1916 1916 appropriate to calculate terminal columns for specified string.
1917 1917
1918 1918 Original TextWrapper implementation uses built-in 'len()' directly,
1919 1919 so overriding is needed to use width information of each characters.
1920 1920
1921 1921 In addition, characters classified into 'ambiguous' width are
1922 1922 treated as wide in East Asian area, but as narrow in other.
1923 1923
1924 1924 This requires use decision to determine width of such characters.
1925 1925 """
1926 1926 def _cutdown(self, ucstr, space_left):
1927 1927 l = 0
1928 1928 colwidth = encoding.ucolwidth
1929 1929 for i in xrange(len(ucstr)):
1930 1930 l += colwidth(ucstr[i])
1931 1931 if space_left < l:
1932 1932 return (ucstr[:i], ucstr[i:])
1933 1933 return ucstr, ''
1934 1934
1935 1935 # overriding of base class
1936 1936 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1937 1937 space_left = max(width - cur_len, 1)
1938 1938
1939 1939 if self.break_long_words:
1940 1940 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1941 1941 cur_line.append(cut)
1942 1942 reversed_chunks[-1] = res
1943 1943 elif not cur_line:
1944 1944 cur_line.append(reversed_chunks.pop())
1945 1945
1946 1946 # this overriding code is imported from TextWrapper of Python 2.6
1947 1947 # to calculate columns of string by 'encoding.ucolwidth()'
1948 1948 def _wrap_chunks(self, chunks):
1949 1949 colwidth = encoding.ucolwidth
1950 1950
1951 1951 lines = []
1952 1952 if self.width <= 0:
1953 1953 raise ValueError("invalid width %r (must be > 0)" % self.width)
1954 1954
1955 1955 # Arrange in reverse order so items can be efficiently popped
1956 1956 # from a stack of chucks.
1957 1957 chunks.reverse()
1958 1958
1959 1959 while chunks:
1960 1960
1961 1961 # Start the list of chunks that will make up the current line.
1962 1962 # cur_len is just the length of all the chunks in cur_line.
1963 1963 cur_line = []
1964 1964 cur_len = 0
1965 1965
1966 1966 # Figure out which static string will prefix this line.
1967 1967 if lines:
1968 1968 indent = self.subsequent_indent
1969 1969 else:
1970 1970 indent = self.initial_indent
1971 1971
1972 1972 # Maximum width for this line.
1973 1973 width = self.width - len(indent)
1974 1974
1975 1975 # First chunk on line is whitespace -- drop it, unless this
1976 1976 # is the very beginning of the text (i.e. no lines started yet).
1977 1977 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1978 1978 del chunks[-1]
1979 1979
1980 1980 while chunks:
1981 1981 l = colwidth(chunks[-1])
1982 1982
1983 1983 # Can at least squeeze this chunk onto the current line.
1984 1984 if cur_len + l <= width:
1985 1985 cur_line.append(chunks.pop())
1986 1986 cur_len += l
1987 1987
1988 1988 # Nope, this line is full.
1989 1989 else:
1990 1990 break
1991 1991
1992 1992 # The current line is full, and the next chunk is too big to
1993 1993 # fit on *any* line (not just this one).
1994 1994 if chunks and colwidth(chunks[-1]) > width:
1995 1995 self._handle_long_word(chunks, cur_line, cur_len, width)
1996 1996
1997 1997 # If the last chunk on this line is all whitespace, drop it.
1998 1998 if (self.drop_whitespace and
1999 1999 cur_line and cur_line[-1].strip() == ''):
2000 2000 del cur_line[-1]
2001 2001
2002 2002 # Convert current line back to a string and store it in list
2003 2003 # of all lines (return value).
2004 2004 if cur_line:
2005 2005 lines.append(indent + ''.join(cur_line))
2006 2006
2007 2007 return lines
2008 2008
2009 2009 global MBTextWrapper
2010 2010 MBTextWrapper = tw
2011 2011 return tw(**kwargs)
2012 2012
2013 2013 def wrap(line, width, initindent='', hangindent=''):
2014 2014 maxindent = max(len(hangindent), len(initindent))
2015 2015 if width <= maxindent:
2016 2016 # adjust for weird terminal size
2017 2017 width = max(78, maxindent + 1)
2018 2018 line = line.decode(encoding.encoding, encoding.encodingmode)
2019 2019 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2020 2020 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2021 2021 wrapper = MBTextWrapper(width=width,
2022 2022 initial_indent=initindent,
2023 2023 subsequent_indent=hangindent)
2024 2024 return wrapper.fill(line).encode(encoding.encoding)
2025 2025
2026 2026 def iterlines(iterator):
2027 2027 for chunk in iterator:
2028 2028 for line in chunk.splitlines():
2029 2029 yield line
2030 2030
2031 2031 def expandpath(path):
2032 2032 return os.path.expanduser(os.path.expandvars(path))
2033 2033
2034 2034 def hgcmd():
2035 2035 """Return the command used to execute current hg
2036 2036
2037 2037 This is different from hgexecutable() because on Windows we want
2038 2038 to avoid things opening new shell windows like batch files, so we
2039 2039 get either the python call or current executable.
2040 2040 """
2041 2041 if mainfrozen():
2042 2042 if getattr(sys, 'frozen', None) == 'macosx_app':
2043 2043 # Env variable set by py2app
2044 2044 return [os.environ['EXECUTABLEPATH']]
2045 2045 else:
2046 2046 return [sys.executable]
2047 2047 return gethgcmd()
2048 2048
2049 2049 def rundetached(args, condfn):
2050 2050 """Execute the argument list in a detached process.
2051 2051
2052 2052 condfn is a callable which is called repeatedly and should return
2053 2053 True once the child process is known to have started successfully.
2054 2054 At this point, the child process PID is returned. If the child
2055 2055 process fails to start or finishes before condfn() evaluates to
2056 2056 True, return -1.
2057 2057 """
2058 2058 # Windows case is easier because the child process is either
2059 2059 # successfully starting and validating the condition or exiting
2060 2060 # on failure. We just poll on its PID. On Unix, if the child
2061 2061 # process fails to start, it will be left in a zombie state until
2062 2062 # the parent wait on it, which we cannot do since we expect a long
2063 2063 # running process on success. Instead we listen for SIGCHLD telling
2064 2064 # us our child process terminated.
2065 2065 terminated = set()
2066 2066 def handler(signum, frame):
2067 2067 terminated.add(os.wait())
2068 2068 prevhandler = None
2069 2069 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2070 2070 if SIGCHLD is not None:
2071 2071 prevhandler = signal.signal(SIGCHLD, handler)
2072 2072 try:
2073 2073 pid = spawndetached(args)
2074 2074 while not condfn():
2075 2075 if ((pid in terminated or not testpid(pid))
2076 2076 and not condfn()):
2077 2077 return -1
2078 2078 time.sleep(0.1)
2079 2079 return pid
2080 2080 finally:
2081 2081 if prevhandler is not None:
2082 2082 signal.signal(signal.SIGCHLD, prevhandler)
2083 2083
2084 2084 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2085 2085 """Return the result of interpolating items in the mapping into string s.
2086 2086
2087 2087 prefix is a single character string, or a two character string with
2088 2088 a backslash as the first character if the prefix needs to be escaped in
2089 2089 a regular expression.
2090 2090
2091 2091 fn is an optional function that will be applied to the replacement text
2092 2092 just before replacement.
2093 2093
2094 2094 escape_prefix is an optional flag that allows using doubled prefix for
2095 2095 its escaping.
2096 2096 """
2097 2097 fn = fn or (lambda s: s)
2098 2098 patterns = '|'.join(mapping.keys())
2099 2099 if escape_prefix:
2100 2100 patterns += '|' + prefix
2101 2101 if len(prefix) > 1:
2102 2102 prefix_char = prefix[1:]
2103 2103 else:
2104 2104 prefix_char = prefix
2105 2105 mapping[prefix_char] = prefix_char
2106 2106 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2107 2107 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2108 2108
2109 2109 def getport(port):
2110 2110 """Return the port for a given network service.
2111 2111
2112 2112 If port is an integer, it's returned as is. If it's a string, it's
2113 2113 looked up using socket.getservbyname(). If there's no matching
2114 2114 service, error.Abort is raised.
2115 2115 """
2116 2116 try:
2117 2117 return int(port)
2118 2118 except ValueError:
2119 2119 pass
2120 2120
2121 2121 try:
2122 2122 return socket.getservbyname(port)
2123 2123 except socket.error:
2124 2124 raise Abort(_("no port number associated with service '%s'") % port)
2125 2125
2126 2126 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2127 2127 '0': False, 'no': False, 'false': False, 'off': False,
2128 2128 'never': False}
2129 2129
2130 2130 def parsebool(s):
2131 2131 """Parse s into a boolean.
2132 2132
2133 2133 If s is not a valid boolean, returns None.
2134 2134 """
2135 2135 return _booleans.get(s.lower(), None)
2136 2136
2137 2137 _hexdig = '0123456789ABCDEFabcdef'
2138 2138 _hextochr = dict((a + b, chr(int(a + b, 16)))
2139 2139 for a in _hexdig for b in _hexdig)
2140 2140
2141 2141 def _urlunquote(s):
2142 2142 """Decode HTTP/HTML % encoding.
2143 2143
2144 2144 >>> _urlunquote('abc%20def')
2145 2145 'abc def'
2146 2146 """
2147 2147 res = s.split('%')
2148 2148 # fastpath
2149 2149 if len(res) == 1:
2150 2150 return s
2151 2151 s = res[0]
2152 2152 for item in res[1:]:
2153 2153 try:
2154 2154 s += _hextochr[item[:2]] + item[2:]
2155 2155 except KeyError:
2156 2156 s += '%' + item
2157 2157 except UnicodeDecodeError:
2158 2158 s += unichr(int(item[:2], 16)) + item[2:]
2159 2159 return s
2160 2160
2161 2161 class url(object):
2162 2162 r"""Reliable URL parser.
2163 2163
2164 2164 This parses URLs and provides attributes for the following
2165 2165 components:
2166 2166
2167 2167 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2168 2168
2169 2169 Missing components are set to None. The only exception is
2170 2170 fragment, which is set to '' if present but empty.
2171 2171
2172 2172 If parsefragment is False, fragment is included in query. If
2173 2173 parsequery is False, query is included in path. If both are
2174 2174 False, both fragment and query are included in path.
2175 2175
2176 2176 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2177 2177
2178 2178 Note that for backward compatibility reasons, bundle URLs do not
2179 2179 take host names. That means 'bundle://../' has a path of '../'.
2180 2180
2181 2181 Examples:
2182 2182
2183 2183 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2184 2184 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2185 2185 >>> url('ssh://[::1]:2200//home/joe/repo')
2186 2186 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2187 2187 >>> url('file:///home/joe/repo')
2188 2188 <url scheme: 'file', path: '/home/joe/repo'>
2189 2189 >>> url('file:///c:/temp/foo/')
2190 2190 <url scheme: 'file', path: 'c:/temp/foo/'>
2191 2191 >>> url('bundle:foo')
2192 2192 <url scheme: 'bundle', path: 'foo'>
2193 2193 >>> url('bundle://../foo')
2194 2194 <url scheme: 'bundle', path: '../foo'>
2195 2195 >>> url(r'c:\foo\bar')
2196 2196 <url path: 'c:\\foo\\bar'>
2197 2197 >>> url(r'\\blah\blah\blah')
2198 2198 <url path: '\\\\blah\\blah\\blah'>
2199 2199 >>> url(r'\\blah\blah\blah#baz')
2200 2200 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2201 2201 >>> url(r'file:///C:\users\me')
2202 2202 <url scheme: 'file', path: 'C:\\users\\me'>
2203 2203
2204 2204 Authentication credentials:
2205 2205
2206 2206 >>> url('ssh://joe:xyz@x/repo')
2207 2207 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2208 2208 >>> url('ssh://joe@x/repo')
2209 2209 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2210 2210
2211 2211 Query strings and fragments:
2212 2212
2213 2213 >>> url('http://host/a?b#c')
2214 2214 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2215 2215 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2216 2216 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2217 2217 """
2218 2218
2219 2219 _safechars = "!~*'()+"
2220 2220 _safepchars = "/!~*'()+:\\"
2221 2221 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2222 2222
2223 2223 def __init__(self, path, parsequery=True, parsefragment=True):
2224 2224 # We slowly chomp away at path until we have only the path left
2225 2225 self.scheme = self.user = self.passwd = self.host = None
2226 2226 self.port = self.path = self.query = self.fragment = None
2227 2227 self._localpath = True
2228 2228 self._hostport = ''
2229 2229 self._origpath = path
2230 2230
2231 2231 if parsefragment and '#' in path:
2232 2232 path, self.fragment = path.split('#', 1)
2233 2233 if not path:
2234 2234 path = None
2235 2235
2236 2236 # special case for Windows drive letters and UNC paths
2237 2237 if hasdriveletter(path) or path.startswith(r'\\'):
2238 2238 self.path = path
2239 2239 return
2240 2240
2241 2241 # For compatibility reasons, we can't handle bundle paths as
2242 2242 # normal URLS
2243 2243 if path.startswith('bundle:'):
2244 2244 self.scheme = 'bundle'
2245 2245 path = path[7:]
2246 2246 if path.startswith('//'):
2247 2247 path = path[2:]
2248 2248 self.path = path
2249 2249 return
2250 2250
2251 2251 if self._matchscheme(path):
2252 2252 parts = path.split(':', 1)
2253 2253 if parts[0]:
2254 2254 self.scheme, path = parts
2255 2255 self._localpath = False
2256 2256
2257 2257 if not path:
2258 2258 path = None
2259 2259 if self._localpath:
2260 2260 self.path = ''
2261 2261 return
2262 2262 else:
2263 2263 if self._localpath:
2264 2264 self.path = path
2265 2265 return
2266 2266
2267 2267 if parsequery and '?' in path:
2268 2268 path, self.query = path.split('?', 1)
2269 2269 if not path:
2270 2270 path = None
2271 2271 if not self.query:
2272 2272 self.query = None
2273 2273
2274 2274 # // is required to specify a host/authority
2275 2275 if path and path.startswith('//'):
2276 2276 parts = path[2:].split('/', 1)
2277 2277 if len(parts) > 1:
2278 2278 self.host, path = parts
2279 2279 else:
2280 2280 self.host = parts[0]
2281 2281 path = None
2282 2282 if not self.host:
2283 2283 self.host = None
2284 2284 # path of file:///d is /d
2285 2285 # path of file:///d:/ is d:/, not /d:/
2286 2286 if path and not hasdriveletter(path):
2287 2287 path = '/' + path
2288 2288
2289 2289 if self.host and '@' in self.host:
2290 2290 self.user, self.host = self.host.rsplit('@', 1)
2291 2291 if ':' in self.user:
2292 2292 self.user, self.passwd = self.user.split(':', 1)
2293 2293 if not self.host:
2294 2294 self.host = None
2295 2295
2296 2296 # Don't split on colons in IPv6 addresses without ports
2297 2297 if (self.host and ':' in self.host and
2298 2298 not (self.host.startswith('[') and self.host.endswith(']'))):
2299 2299 self._hostport = self.host
2300 2300 self.host, self.port = self.host.rsplit(':', 1)
2301 2301 if not self.host:
2302 2302 self.host = None
2303 2303
2304 2304 if (self.host and self.scheme == 'file' and
2305 2305 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2306 2306 raise Abort(_('file:// URLs can only refer to localhost'))
2307 2307
2308 2308 self.path = path
2309 2309
2310 2310 # leave the query string escaped
2311 2311 for a in ('user', 'passwd', 'host', 'port',
2312 2312 'path', 'fragment'):
2313 2313 v = getattr(self, a)
2314 2314 if v is not None:
2315 2315 setattr(self, a, _urlunquote(v))
2316 2316
2317 2317 def __repr__(self):
2318 2318 attrs = []
2319 2319 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2320 2320 'query', 'fragment'):
2321 2321 v = getattr(self, a)
2322 2322 if v is not None:
2323 2323 attrs.append('%s: %r' % (a, v))
2324 2324 return '<url %s>' % ', '.join(attrs)
2325 2325
2326 2326 def __str__(self):
2327 2327 r"""Join the URL's components back into a URL string.
2328 2328
2329 2329 Examples:
2330 2330
2331 2331 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2332 2332 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2333 2333 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2334 2334 'http://user:pw@host:80/?foo=bar&baz=42'
2335 2335 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2336 2336 'http://user:pw@host:80/?foo=bar%3dbaz'
2337 2337 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2338 2338 'ssh://user:pw@[::1]:2200//home/joe#'
2339 2339 >>> str(url('http://localhost:80//'))
2340 2340 'http://localhost:80//'
2341 2341 >>> str(url('http://localhost:80/'))
2342 2342 'http://localhost:80/'
2343 2343 >>> str(url('http://localhost:80'))
2344 2344 'http://localhost:80/'
2345 2345 >>> str(url('bundle:foo'))
2346 2346 'bundle:foo'
2347 2347 >>> str(url('bundle://../foo'))
2348 2348 'bundle:../foo'
2349 2349 >>> str(url('path'))
2350 2350 'path'
2351 2351 >>> str(url('file:///tmp/foo/bar'))
2352 2352 'file:///tmp/foo/bar'
2353 2353 >>> str(url('file:///c:/tmp/foo/bar'))
2354 2354 'file:///c:/tmp/foo/bar'
2355 2355 >>> print url(r'bundle:foo\bar')
2356 2356 bundle:foo\bar
2357 2357 >>> print url(r'file:///D:\data\hg')
2358 2358 file:///D:\data\hg
2359 2359 """
2360 2360 if self._localpath:
2361 2361 s = self.path
2362 2362 if self.scheme == 'bundle':
2363 2363 s = 'bundle:' + s
2364 2364 if self.fragment:
2365 2365 s += '#' + self.fragment
2366 2366 return s
2367 2367
2368 2368 s = self.scheme + ':'
2369 2369 if self.user or self.passwd or self.host:
2370 2370 s += '//'
2371 2371 elif self.scheme and (not self.path or self.path.startswith('/')
2372 2372 or hasdriveletter(self.path)):
2373 2373 s += '//'
2374 2374 if hasdriveletter(self.path):
2375 2375 s += '/'
2376 2376 if self.user:
2377 2377 s += urllib.quote(self.user, safe=self._safechars)
2378 2378 if self.passwd:
2379 2379 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2380 2380 if self.user or self.passwd:
2381 2381 s += '@'
2382 2382 if self.host:
2383 2383 if not (self.host.startswith('[') and self.host.endswith(']')):
2384 2384 s += urllib.quote(self.host)
2385 2385 else:
2386 2386 s += self.host
2387 2387 if self.port:
2388 2388 s += ':' + urllib.quote(self.port)
2389 2389 if self.host:
2390 2390 s += '/'
2391 2391 if self.path:
2392 2392 # TODO: similar to the query string, we should not unescape the
2393 2393 # path when we store it, the path might contain '%2f' = '/',
2394 2394 # which we should *not* escape.
2395 2395 s += urllib.quote(self.path, safe=self._safepchars)
2396 2396 if self.query:
2397 2397 # we store the query in escaped form.
2398 2398 s += '?' + self.query
2399 2399 if self.fragment is not None:
2400 2400 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2401 2401 return s
2402 2402
2403 2403 def authinfo(self):
2404 2404 user, passwd = self.user, self.passwd
2405 2405 try:
2406 2406 self.user, self.passwd = None, None
2407 2407 s = str(self)
2408 2408 finally:
2409 2409 self.user, self.passwd = user, passwd
2410 2410 if not self.user:
2411 2411 return (s, None)
2412 2412 # authinfo[1] is passed to urllib2 password manager, and its
2413 2413 # URIs must not contain credentials. The host is passed in the
2414 2414 # URIs list because Python < 2.4.3 uses only that to search for
2415 2415 # a password.
2416 2416 return (s, (None, (s, self.host),
2417 2417 self.user, self.passwd or ''))
2418 2418
2419 2419 def isabs(self):
2420 2420 if self.scheme and self.scheme != 'file':
2421 2421 return True # remote URL
2422 2422 if hasdriveletter(self.path):
2423 2423 return True # absolute for our purposes - can't be joined()
2424 2424 if self.path.startswith(r'\\'):
2425 2425 return True # Windows UNC path
2426 2426 if self.path.startswith('/'):
2427 2427 return True # POSIX-style
2428 2428 return False
2429 2429
2430 2430 def localpath(self):
2431 2431 if self.scheme == 'file' or self.scheme == 'bundle':
2432 2432 path = self.path or '/'
2433 2433 # For Windows, we need to promote hosts containing drive
2434 2434 # letters to paths with drive letters.
2435 2435 if hasdriveletter(self._hostport):
2436 2436 path = self._hostport + '/' + self.path
2437 2437 elif (self.host is not None and self.path
2438 2438 and not hasdriveletter(path)):
2439 2439 path = '/' + path
2440 2440 return path
2441 2441 return self._origpath
2442 2442
2443 2443 def islocal(self):
2444 2444 '''whether localpath will return something that posixfile can open'''
2445 2445 return (not self.scheme or self.scheme == 'file'
2446 2446 or self.scheme == 'bundle')
2447 2447
2448 2448 def hasscheme(path):
2449 2449 return bool(url(path).scheme)
2450 2450
2451 2451 def hasdriveletter(path):
2452 2452 return path and path[1:2] == ':' and path[0:1].isalpha()
2453 2453
2454 2454 def urllocalpath(path):
2455 2455 return url(path, parsequery=False, parsefragment=False).localpath()
2456 2456
2457 2457 def hidepassword(u):
2458 2458 '''hide user credential in a url string'''
2459 2459 u = url(u)
2460 2460 if u.passwd:
2461 2461 u.passwd = '***'
2462 2462 return str(u)
2463 2463
2464 2464 def removeauth(u):
2465 2465 '''remove all authentication information from a url string'''
2466 2466 u = url(u)
2467 2467 u.user = u.passwd = None
2468 2468 return str(u)
2469 2469
2470 2470 def isatty(fp):
2471 2471 try:
2472 2472 return fp.isatty()
2473 2473 except AttributeError:
2474 2474 return False
2475 2475
2476 2476 timecount = unitcountfn(
2477 2477 (1, 1e3, _('%.0f s')),
2478 2478 (100, 1, _('%.1f s')),
2479 2479 (10, 1, _('%.2f s')),
2480 2480 (1, 1, _('%.3f s')),
2481 2481 (100, 0.001, _('%.1f ms')),
2482 2482 (10, 0.001, _('%.2f ms')),
2483 2483 (1, 0.001, _('%.3f ms')),
2484 2484 (100, 0.000001, _('%.1f us')),
2485 2485 (10, 0.000001, _('%.2f us')),
2486 2486 (1, 0.000001, _('%.3f us')),
2487 2487 (100, 0.000000001, _('%.1f ns')),
2488 2488 (10, 0.000000001, _('%.2f ns')),
2489 2489 (1, 0.000000001, _('%.3f ns')),
2490 2490 )
2491 2491
2492 2492 _timenesting = [0]
2493 2493
2494 2494 def timed(func):
2495 2495 '''Report the execution time of a function call to stderr.
2496 2496
2497 2497 During development, use as a decorator when you need to measure
2498 2498 the cost of a function, e.g. as follows:
2499 2499
2500 2500 @util.timed
2501 2501 def foo(a, b, c):
2502 2502 pass
2503 2503 '''
2504 2504
2505 2505 def wrapper(*args, **kwargs):
2506 2506 start = time.time()
2507 2507 indent = 2
2508 2508 _timenesting[0] += indent
2509 2509 try:
2510 2510 return func(*args, **kwargs)
2511 2511 finally:
2512 2512 elapsed = time.time() - start
2513 2513 _timenesting[0] -= indent
2514 2514 sys.stderr.write('%s%s: %s\n' %
2515 2515 (' ' * _timenesting[0], func.__name__,
2516 2516 timecount(elapsed)))
2517 2517 return wrapper
2518 2518
2519 2519 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2520 2520 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2521 2521
2522 2522 def sizetoint(s):
2523 2523 '''Convert a space specifier to a byte count.
2524 2524
2525 2525 >>> sizetoint('30')
2526 2526 30
2527 2527 >>> sizetoint('2.2kb')
2528 2528 2252
2529 2529 >>> sizetoint('6M')
2530 2530 6291456
2531 2531 '''
2532 2532 t = s.strip().lower()
2533 2533 try:
2534 2534 for k, u in _sizeunits:
2535 2535 if t.endswith(k):
2536 2536 return int(float(t[:-len(k)]) * u)
2537 2537 return int(t)
2538 2538 except ValueError:
2539 2539 raise error.ParseError(_("couldn't parse size: %s") % s)
2540 2540
2541 2541 class hooks(object):
2542 2542 '''A collection of hook functions that can be used to extend a
2543 2543 function's behavior. Hooks are called in lexicographic order,
2544 2544 based on the names of their sources.'''
2545 2545
2546 2546 def __init__(self):
2547 2547 self._hooks = []
2548 2548
2549 2549 def add(self, source, hook):
2550 2550 self._hooks.append((source, hook))
2551 2551
2552 2552 def __call__(self, *args):
2553 2553 self._hooks.sort(key=lambda x: x[0])
2554 2554 results = []
2555 2555 for source, hook in self._hooks:
2556 2556 results.append(hook(*args))
2557 2557 return results
2558 2558
2559 2559 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2560 2560 '''Yields lines for a nicely formatted stacktrace.
2561 2561 Skips the 'skip' last entries.
2562 2562 Each file+linenumber is formatted according to fileline.
2563 2563 Each line is formatted according to line.
2564 2564 If line is None, it yields:
2565 2565 length of longest filepath+line number,
2566 2566 filepath+linenumber,
2567 2567 function
2568 2568
2569 2569 Not be used in production code but very convenient while developing.
2570 2570 '''
2571 2571 entries = [(fileline % (fn, ln), func)
2572 2572 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2573 2573 if entries:
2574 2574 fnmax = max(len(entry[0]) for entry in entries)
2575 2575 for fnln, func in entries:
2576 2576 if line is None:
2577 2577 yield (fnmax, fnln, func)
2578 2578 else:
2579 2579 yield line % (fnmax, fnln, func)
2580 2580
2581 2581 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2582 2582 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2583 2583 Skips the 'skip' last entries. By default it will flush stdout first.
2584 2584 It can be used everywhere and intentionally does not require an ui object.
2585 2585 Not be used in production code but very convenient while developing.
2586 2586 '''
2587 2587 if otherf:
2588 2588 otherf.flush()
2589 2589 f.write('%s at:\n' % msg)
2590 2590 for line in getstackframes(skip + 1):
2591 2591 f.write(line)
2592 2592 f.flush()
2593 2593
2594 2594 class dirs(object):
2595 2595 '''a multiset of directory names from a dirstate or manifest'''
2596 2596
2597 2597 def __init__(self, map, skip=None):
2598 2598 self._dirs = {}
2599 2599 addpath = self.addpath
2600 2600 if safehasattr(map, 'iteritems') and skip is not None:
2601 2601 for f, s in map.iteritems():
2602 2602 if s[0] != skip:
2603 2603 addpath(f)
2604 2604 else:
2605 2605 for f in map:
2606 2606 addpath(f)
2607 2607
2608 2608 def addpath(self, path):
2609 2609 dirs = self._dirs
2610 2610 for base in finddirs(path):
2611 2611 if base in dirs:
2612 2612 dirs[base] += 1
2613 2613 return
2614 2614 dirs[base] = 1
2615 2615
2616 2616 def delpath(self, path):
2617 2617 dirs = self._dirs
2618 2618 for base in finddirs(path):
2619 2619 if dirs[base] > 1:
2620 2620 dirs[base] -= 1
2621 2621 return
2622 2622 del dirs[base]
2623 2623
2624 2624 def __iter__(self):
2625 2625 return self._dirs.iterkeys()
2626 2626
2627 2627 def __contains__(self, d):
2628 2628 return d in self._dirs
2629 2629
2630 2630 if safehasattr(parsers, 'dirs'):
2631 2631 dirs = parsers.dirs
2632 2632
2633 2633 def finddirs(path):
2634 2634 pos = path.rfind('/')
2635 2635 while pos != -1:
2636 2636 yield path[:pos]
2637 2637 pos = path.rfind('/', 0, pos)
2638 2638
2639 2639 # compression utility
2640 2640
2641 2641 class nocompress(object):
2642 2642 def compress(self, x):
2643 2643 return x
2644 2644 def flush(self):
2645 2645 return ""
2646 2646
2647 2647 compressors = {
2648 2648 None: nocompress,
2649 2649 # lambda to prevent early import
2650 2650 'BZ': lambda: bz2.BZ2Compressor(),
2651 2651 'GZ': lambda: zlib.compressobj(),
2652 2652 }
2653 2653 # also support the old form by courtesies
2654 2654 compressors['UN'] = compressors[None]
2655 2655
2656 2656 def _makedecompressor(decompcls):
2657 2657 def generator(f):
2658 2658 d = decompcls()
2659 2659 for chunk in filechunkiter(f):
2660 2660 yield d.decompress(chunk)
2661 2661 def func(fh):
2662 2662 return chunkbuffer(generator(fh))
2663 2663 return func
2664 2664
2665 2665 class ctxmanager(object):
2666 2666 '''A context manager for use in 'with' blocks to allow multiple
2667 2667 contexts to be entered at once. This is both safer and more
2668 2668 flexible than contextlib.nested.
2669 2669
2670 2670 Once Mercurial supports Python 2.7+, this will become mostly
2671 2671 unnecessary.
2672 2672 '''
2673 2673
2674 2674 def __init__(self, *args):
2675 2675 '''Accepts a list of no-argument functions that return context
2676 2676 managers. These will be invoked at __call__ time.'''
2677 2677 self._pending = args
2678 2678 self._atexit = []
2679 2679
2680 2680 def __enter__(self):
2681 2681 return self
2682 2682
2683 2683 def enter(self):
2684 2684 '''Create and enter context managers in the order in which they were
2685 2685 passed to the constructor.'''
2686 2686 values = []
2687 2687 for func in self._pending:
2688 2688 obj = func()
2689 2689 values.append(obj.__enter__())
2690 2690 self._atexit.append(obj.__exit__)
2691 2691 del self._pending
2692 2692 return values
2693 2693
2694 2694 def atexit(self, func, *args, **kwargs):
2695 2695 '''Add a function to call when this context manager exits. The
2696 2696 ordering of multiple atexit calls is unspecified, save that
2697 2697 they will happen before any __exit__ functions.'''
2698 2698 def wrapper(exc_type, exc_val, exc_tb):
2699 2699 func(*args, **kwargs)
2700 2700 self._atexit.append(wrapper)
2701 2701 return func
2702 2702
2703 2703 def __exit__(self, exc_type, exc_val, exc_tb):
2704 2704 '''Context managers are exited in the reverse order from which
2705 2705 they were created.'''
2706 2706 received = exc_type is not None
2707 2707 suppressed = False
2708 2708 pending = None
2709 2709 self._atexit.reverse()
2710 2710 for exitfunc in self._atexit:
2711 2711 try:
2712 2712 if exitfunc(exc_type, exc_val, exc_tb):
2713 2713 suppressed = True
2714 2714 exc_type = None
2715 2715 exc_val = None
2716 2716 exc_tb = None
2717 2717 except BaseException:
2718 2718 pending = sys.exc_info()
2719 2719 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2720 2720 del self._atexit
2721 2721 if pending:
2722 2722 raise exc_val
2723 2723 return received and suppressed
2724 2724
2725 2725 def _bz2():
2726 2726 d = bz2.BZ2Decompressor()
2727 2727 # Bzip2 stream start with BZ, but we stripped it.
2728 2728 # we put it back for good measure.
2729 2729 d.decompress('BZ')
2730 2730 return d
2731 2731
2732 2732 decompressors = {None: lambda fh: fh,
2733 2733 '_truncatedBZ': _makedecompressor(_bz2),
2734 2734 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2735 2735 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2736 2736 }
2737 2737 # also support the old form by courtesies
2738 2738 decompressors['UN'] = decompressors[None]
2739 2739
2740 2740 # convenient shortcut
2741 2741 dst = debugstacktrace
@@ -1,692 +1,692
1 1 commit date test
2 2
3 3 $ hg init test
4 4 $ cd test
5 5 $ echo foo > foo
6 6 $ hg add foo
7 7 $ cat > $TESTTMP/checkeditform.sh <<EOF
8 8 > env | grep HGEDITFORM
9 9 > true
10 10 > EOF
11 11 $ HGEDITOR="sh $TESTTMP/checkeditform.sh" hg commit -m ""
12 12 HGEDITFORM=commit.normal.normal
13 13 abort: empty commit message
14 14 [255]
15 15 $ hg commit -d '0 0' -m commit-1
16 16 $ echo foo >> foo
17 17 $ hg commit -d '1 4444444' -m commit-3
18 18 abort: impossible time zone offset: 4444444
19 19 [255]
20 20 $ hg commit -d '1 15.1' -m commit-4
21 21 abort: invalid date: '1\t15.1'
22 22 [255]
23 23 $ hg commit -d 'foo bar' -m commit-5
24 24 abort: invalid date: 'foo bar'
25 25 [255]
26 26 $ hg commit -d ' 1 4444' -m commit-6
27 27 $ hg commit -d '111111111111 0' -m commit-7
28 28 abort: date exceeds 32 bits: 111111111111
29 29 [255]
30 30 $ hg commit -d '-111111111111 0' -m commit-7
31 31 abort: date exceeds 32 bits: -111111111111
32 32 [255]
33 33 $ echo foo >> foo
34 $ hg commit -d '1901-12-13 20:45:53 +0000' -m commit-7-2
34 $ hg commit -d '1901-12-13 20:45:52 +0000' -m commit-7-2
35 35 $ echo foo >> foo
36 $ hg commit -d '-2147483647 0' -m commit-7-3
36 $ hg commit -d '-2147483648 0' -m commit-7-3
37 37 $ hg log -T '{rev} {date|isodatesec}\n' -l2
38 3 1901-12-13 20:45:53 +0000
39 2 1901-12-13 20:45:53 +0000
40 $ hg commit -d '1901-12-13 20:45:52 +0000' -m commit-7
41 abort: date exceeds 32 bits: -2147483648
38 3 1901-12-13 20:45:52 +0000
39 2 1901-12-13 20:45:52 +0000
40 $ hg commit -d '1901-12-13 20:45:51 +0000' -m commit-7
41 abort: date exceeds 32 bits: -2147483649
42 42 [255]
43 $ hg commit -d '-2147483648 0' -m commit-7
44 abort: date exceeds 32 bits: -2147483648
43 $ hg commit -d '-2147483649 0' -m commit-7
44 abort: date exceeds 32 bits: -2147483649
45 45 [255]
46 46
47 47 commit added file that has been deleted
48 48
49 49 $ echo bar > bar
50 50 $ hg add bar
51 51 $ rm bar
52 52 $ hg commit -m commit-8
53 53 nothing changed (1 missing files, see 'hg status')
54 54 [1]
55 55 $ hg commit -m commit-8-2 bar
56 56 abort: bar: file not found!
57 57 [255]
58 58
59 59 $ hg -q revert -a --no-backup
60 60
61 61 $ mkdir dir
62 62 $ echo boo > dir/file
63 63 $ hg add
64 64 adding dir/file (glob)
65 65 $ hg -v commit -m commit-9 dir
66 66 committing files:
67 67 dir/file
68 68 committing manifest
69 69 committing changelog
70 committed changeset 4:76aab26859d7
70 committed changeset 4:1957363f1ced
71 71
72 72 $ echo > dir.file
73 73 $ hg add
74 74 adding dir.file
75 75 $ hg commit -m commit-10 dir dir.file
76 76 abort: dir: no match under directory!
77 77 [255]
78 78
79 79 $ echo >> dir/file
80 80 $ mkdir bleh
81 81 $ mkdir dir2
82 82 $ cd bleh
83 83 $ hg commit -m commit-11 .
84 84 abort: bleh: no match under directory!
85 85 [255]
86 86 $ hg commit -m commit-12 ../dir ../dir2
87 87 abort: dir2: no match under directory!
88 88 [255]
89 89 $ hg -v commit -m commit-13 ../dir
90 90 committing files:
91 91 dir/file
92 92 committing manifest
93 93 committing changelog
94 committed changeset 5:9a50557f1baf
94 committed changeset 5:a31d8f87544a
95 95 $ cd ..
96 96
97 97 $ hg commit -m commit-14 does-not-exist
98 98 abort: does-not-exist: * (glob)
99 99 [255]
100 100
101 101 #if symlink
102 102 $ ln -s foo baz
103 103 $ hg commit -m commit-15 baz
104 104 abort: baz: file not tracked!
105 105 [255]
106 106 #endif
107 107
108 108 $ touch quux
109 109 $ hg commit -m commit-16 quux
110 110 abort: quux: file not tracked!
111 111 [255]
112 112 $ echo >> dir/file
113 113 $ hg -v commit -m commit-17 dir/file
114 114 committing files:
115 115 dir/file
116 116 committing manifest
117 117 committing changelog
118 committed changeset 6:4b4c75bf422d
118 committed changeset 6:32d054c9d085
119 119
120 120 An empty date was interpreted as epoch origin
121 121
122 122 $ echo foo >> foo
123 123 $ hg commit -d '' -m commit-no-date
124 124 $ hg tip --template '{date|isodate}\n' | grep '1970'
125 125 [1]
126 126
127 127 Make sure we do not obscure unknown requires file entries (issue2649)
128 128
129 129 $ echo foo >> foo
130 130 $ echo fake >> .hg/requires
131 131 $ hg commit -m bla
132 132 abort: repository requires features unknown to this Mercurial: fake!
133 133 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
134 134 [255]
135 135
136 136 $ cd ..
137 137
138 138
139 139 partial subdir commit test
140 140
141 141 $ hg init test2
142 142 $ cd test2
143 143 $ mkdir foo
144 144 $ echo foo > foo/foo
145 145 $ mkdir bar
146 146 $ echo bar > bar/bar
147 147 $ hg add
148 148 adding bar/bar (glob)
149 149 adding foo/foo (glob)
150 150 $ HGEDITOR=cat hg ci -e -m commit-subdir-1 foo
151 151 commit-subdir-1
152 152
153 153
154 154 HG: Enter commit message. Lines beginning with 'HG:' are removed.
155 155 HG: Leave message empty to abort commit.
156 156 HG: --
157 157 HG: user: test
158 158 HG: branch 'default'
159 159 HG: added foo/foo
160 160
161 161
162 162 $ hg ci -m commit-subdir-2 bar
163 163
164 164 subdir log 1
165 165
166 166 $ hg log -v foo
167 167 changeset: 0:f97e73a25882
168 168 user: test
169 169 date: Thu Jan 01 00:00:00 1970 +0000
170 170 files: foo/foo
171 171 description:
172 172 commit-subdir-1
173 173
174 174
175 175
176 176 subdir log 2
177 177
178 178 $ hg log -v bar
179 179 changeset: 1:aa809156d50d
180 180 tag: tip
181 181 user: test
182 182 date: Thu Jan 01 00:00:00 1970 +0000
183 183 files: bar/bar
184 184 description:
185 185 commit-subdir-2
186 186
187 187
188 188
189 189 full log
190 190
191 191 $ hg log -v
192 192 changeset: 1:aa809156d50d
193 193 tag: tip
194 194 user: test
195 195 date: Thu Jan 01 00:00:00 1970 +0000
196 196 files: bar/bar
197 197 description:
198 198 commit-subdir-2
199 199
200 200
201 201 changeset: 0:f97e73a25882
202 202 user: test
203 203 date: Thu Jan 01 00:00:00 1970 +0000
204 204 files: foo/foo
205 205 description:
206 206 commit-subdir-1
207 207
208 208
209 209 $ cd ..
210 210
211 211
212 212 dot and subdir commit test
213 213
214 214 $ hg init test3
215 215 $ echo commit-foo-subdir > commit-log-test
216 216 $ cd test3
217 217 $ mkdir foo
218 218 $ echo foo content > foo/plain-file
219 219 $ hg add foo/plain-file
220 220 $ HGEDITOR=cat hg ci --edit -l ../commit-log-test foo
221 221 commit-foo-subdir
222 222
223 223
224 224 HG: Enter commit message. Lines beginning with 'HG:' are removed.
225 225 HG: Leave message empty to abort commit.
226 226 HG: --
227 227 HG: user: test
228 228 HG: branch 'default'
229 229 HG: added foo/plain-file
230 230
231 231
232 232 $ echo modified foo content > foo/plain-file
233 233 $ hg ci -m commit-foo-dot .
234 234
235 235 full log
236 236
237 237 $ hg log -v
238 238 changeset: 1:95b38e3a5b2e
239 239 tag: tip
240 240 user: test
241 241 date: Thu Jan 01 00:00:00 1970 +0000
242 242 files: foo/plain-file
243 243 description:
244 244 commit-foo-dot
245 245
246 246
247 247 changeset: 0:65d4e9386227
248 248 user: test
249 249 date: Thu Jan 01 00:00:00 1970 +0000
250 250 files: foo/plain-file
251 251 description:
252 252 commit-foo-subdir
253 253
254 254
255 255
256 256 subdir log
257 257
258 258 $ cd foo
259 259 $ hg log .
260 260 changeset: 1:95b38e3a5b2e
261 261 tag: tip
262 262 user: test
263 263 date: Thu Jan 01 00:00:00 1970 +0000
264 264 summary: commit-foo-dot
265 265
266 266 changeset: 0:65d4e9386227
267 267 user: test
268 268 date: Thu Jan 01 00:00:00 1970 +0000
269 269 summary: commit-foo-subdir
270 270
271 271 $ cd ..
272 272 $ cd ..
273 273
274 274 Issue1049: Hg permits partial commit of merge without warning
275 275
276 276 $ hg init issue1049
277 277 $ cd issue1049
278 278 $ echo a > a
279 279 $ hg ci -Ama
280 280 adding a
281 281 $ echo a >> a
282 282 $ hg ci -mb
283 283 $ hg up 0
284 284 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
285 285 $ echo b >> a
286 286 $ hg ci -mc
287 287 created new head
288 288 $ HGMERGE=true hg merge
289 289 merging a
290 290 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
291 291 (branch merge, don't forget to commit)
292 292
293 293 should fail because we are specifying a file name
294 294
295 295 $ hg ci -mmerge a
296 296 abort: cannot partially commit a merge (do not specify files or patterns)
297 297 [255]
298 298
299 299 should fail because we are specifying a pattern
300 300
301 301 $ hg ci -mmerge -I a
302 302 abort: cannot partially commit a merge (do not specify files or patterns)
303 303 [255]
304 304
305 305 should succeed
306 306
307 307 $ HGEDITOR="sh $TESTTMP/checkeditform.sh" hg ci -mmerge --edit
308 308 HGEDITFORM=commit.normal.merge
309 309 $ cd ..
310 310
311 311
312 312 test commit message content
313 313
314 314 $ hg init commitmsg
315 315 $ cd commitmsg
316 316 $ echo changed > changed
317 317 $ echo removed > removed
318 318 $ hg book activebookmark
319 319 $ hg ci -qAm init
320 320
321 321 $ hg rm removed
322 322 $ echo changed >> changed
323 323 $ echo added > added
324 324 $ hg add added
325 325 $ HGEDITOR=cat hg ci -A
326 326
327 327
328 328 HG: Enter commit message. Lines beginning with 'HG:' are removed.
329 329 HG: Leave message empty to abort commit.
330 330 HG: --
331 331 HG: user: test
332 332 HG: branch 'default'
333 333 HG: bookmark 'activebookmark'
334 334 HG: added added
335 335 HG: changed changed
336 336 HG: removed removed
337 337 abort: empty commit message
338 338 [255]
339 339
340 340 test saving last-message.txt
341 341
342 342 $ hg init sub
343 343 $ echo a > sub/a
344 344 $ hg -R sub add sub/a
345 345 $ cat > sub/.hg/hgrc <<EOF
346 346 > [hooks]
347 347 > precommit.test-saving-last-message = false
348 348 > EOF
349 349
350 350 $ echo 'sub = sub' > .hgsub
351 351 $ hg add .hgsub
352 352
353 353 $ cat > $TESTTMP/editor.sh <<EOF
354 354 > echo "==== before editing:"
355 355 > cat \$1
356 356 > echo "===="
357 357 > echo "test saving last-message.txt" >> \$1
358 358 > EOF
359 359
360 360 $ rm -f .hg/last-message.txt
361 361 $ HGEDITOR="sh $TESTTMP/editor.sh" hg commit -S -q
362 362 ==== before editing:
363 363
364 364
365 365 HG: Enter commit message. Lines beginning with 'HG:' are removed.
366 366 HG: Leave message empty to abort commit.
367 367 HG: --
368 368 HG: user: test
369 369 HG: branch 'default'
370 370 HG: bookmark 'activebookmark'
371 371 HG: subrepo sub
372 372 HG: added .hgsub
373 373 HG: added added
374 374 HG: changed .hgsubstate
375 375 HG: changed changed
376 376 HG: removed removed
377 377 ====
378 378 abort: precommit.test-saving-last-message hook exited with status 1 (in subrepo sub)
379 379 [255]
380 380 $ cat .hg/last-message.txt
381 381
382 382
383 383 test saving last-message.txt
384 384
385 385 test that '[committemplate] changeset' definition and commit log
386 386 specific template keywords work well
387 387
388 388 $ cat >> .hg/hgrc <<EOF
389 389 > [committemplate]
390 390 > changeset.commit.normal = HG: this is "commit.normal" template
391 391 > HG: {extramsg}
392 392 > {if(activebookmark,
393 393 > "HG: bookmark '{activebookmark}' is activated\n",
394 394 > "HG: no bookmark is activated\n")}{subrepos %
395 395 > "HG: subrepo '{subrepo}' is changed\n"}
396 396 >
397 397 > changeset.commit = HG: this is "commit" template
398 398 > HG: {extramsg}
399 399 > {if(activebookmark,
400 400 > "HG: bookmark '{activebookmark}' is activated\n",
401 401 > "HG: no bookmark is activated\n")}{subrepos %
402 402 > "HG: subrepo '{subrepo}' is changed\n"}
403 403 >
404 404 > changeset = HG: this is customized commit template
405 405 > HG: {extramsg}
406 406 > {if(activebookmark,
407 407 > "HG: bookmark '{activebookmark}' is activated\n",
408 408 > "HG: no bookmark is activated\n")}{subrepos %
409 409 > "HG: subrepo '{subrepo}' is changed\n"}
410 410 > EOF
411 411
412 412 $ hg init sub2
413 413 $ echo a > sub2/a
414 414 $ hg -R sub2 add sub2/a
415 415 $ echo 'sub2 = sub2' >> .hgsub
416 416
417 417 $ HGEDITOR=cat hg commit -S -q
418 418 HG: this is "commit.normal" template
419 419 HG: Leave message empty to abort commit.
420 420 HG: bookmark 'activebookmark' is activated
421 421 HG: subrepo 'sub' is changed
422 422 HG: subrepo 'sub2' is changed
423 423 abort: empty commit message
424 424 [255]
425 425
426 426 $ cat >> .hg/hgrc <<EOF
427 427 > [committemplate]
428 428 > changeset.commit.normal =
429 429 > # now, "changeset.commit" should be chosen for "hg commit"
430 430 > EOF
431 431
432 432 $ hg bookmark --inactive activebookmark
433 433 $ hg forget .hgsub
434 434 $ HGEDITOR=cat hg commit -q
435 435 HG: this is "commit" template
436 436 HG: Leave message empty to abort commit.
437 437 HG: no bookmark is activated
438 438 abort: empty commit message
439 439 [255]
440 440
441 441 $ cat >> .hg/hgrc <<EOF
442 442 > [committemplate]
443 443 > changeset.commit =
444 444 > # now, "changeset" should be chosen for "hg commit"
445 445 > EOF
446 446
447 447 $ HGEDITOR=cat hg commit -q
448 448 HG: this is customized commit template
449 449 HG: Leave message empty to abort commit.
450 450 HG: no bookmark is activated
451 451 abort: empty commit message
452 452 [255]
453 453
454 454 $ cat >> .hg/hgrc <<EOF
455 455 > [committemplate]
456 456 > changeset = {desc}
457 457 > HG: mods={file_mods}
458 458 > HG: adds={file_adds}
459 459 > HG: dels={file_dels}
460 460 > HG: files={files}
461 461 > HG:
462 462 > {splitlines(diff()) % 'HG: {line}\n'
463 463 > }HG:
464 464 > HG: mods={file_mods}
465 465 > HG: adds={file_adds}
466 466 > HG: dels={file_dels}
467 467 > HG: files={files}\n
468 468 > EOF
469 469 $ hg status -amr
470 470 M changed
471 471 A added
472 472 R removed
473 473 $ HGEDITOR=cat hg commit -q -e -m "foo bar" changed
474 474 foo bar
475 475 HG: mods=changed
476 476 HG: adds=
477 477 HG: dels=
478 478 HG: files=changed
479 479 HG:
480 480 HG: --- a/changed Thu Jan 01 00:00:00 1970 +0000
481 481 HG: +++ b/changed Thu Jan 01 00:00:00 1970 +0000
482 482 HG: @@ -1,1 +1,2 @@
483 483 HG: changed
484 484 HG: +changed
485 485 HG:
486 486 HG: mods=changed
487 487 HG: adds=
488 488 HG: dels=
489 489 HG: files=changed
490 490 $ hg status -amr
491 491 A added
492 492 R removed
493 493 $ hg parents --template "M {file_mods}\nA {file_adds}\nR {file_dels}\n"
494 494 M changed
495 495 A
496 496 R
497 497 $ hg rollback -q
498 498
499 499 $ cat >> .hg/hgrc <<EOF
500 500 > [committemplate]
501 501 > changeset = {desc}
502 502 > HG: mods={file_mods}
503 503 > HG: adds={file_adds}
504 504 > HG: dels={file_dels}
505 505 > HG: files={files}
506 506 > HG:
507 507 > {splitlines(diff("changed")) % 'HG: {line}\n'
508 508 > }HG:
509 509 > HG: mods={file_mods}
510 510 > HG: adds={file_adds}
511 511 > HG: dels={file_dels}
512 512 > HG: files={files}
513 513 > HG:
514 514 > {splitlines(diff("added")) % 'HG: {line}\n'
515 515 > }HG:
516 516 > HG: mods={file_mods}
517 517 > HG: adds={file_adds}
518 518 > HG: dels={file_dels}
519 519 > HG: files={files}
520 520 > HG:
521 521 > {splitlines(diff("removed")) % 'HG: {line}\n'
522 522 > }HG:
523 523 > HG: mods={file_mods}
524 524 > HG: adds={file_adds}
525 525 > HG: dels={file_dels}
526 526 > HG: files={files}\n
527 527 > EOF
528 528 $ HGEDITOR=cat hg commit -q -e -m "foo bar" added removed
529 529 foo bar
530 530 HG: mods=
531 531 HG: adds=added
532 532 HG: dels=removed
533 533 HG: files=added removed
534 534 HG:
535 535 HG:
536 536 HG: mods=
537 537 HG: adds=added
538 538 HG: dels=removed
539 539 HG: files=added removed
540 540 HG:
541 541 HG: --- /dev/null Thu Jan 01 00:00:00 1970 +0000
542 542 HG: +++ b/added Thu Jan 01 00:00:00 1970 +0000
543 543 HG: @@ -0,0 +1,1 @@
544 544 HG: +added
545 545 HG:
546 546 HG: mods=
547 547 HG: adds=added
548 548 HG: dels=removed
549 549 HG: files=added removed
550 550 HG:
551 551 HG: --- a/removed Thu Jan 01 00:00:00 1970 +0000
552 552 HG: +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
553 553 HG: @@ -1,1 +0,0 @@
554 554 HG: -removed
555 555 HG:
556 556 HG: mods=
557 557 HG: adds=added
558 558 HG: dels=removed
559 559 HG: files=added removed
560 560 $ hg status -amr
561 561 M changed
562 562 $ hg parents --template "M {file_mods}\nA {file_adds}\nR {file_dels}\n"
563 563 M
564 564 A added
565 565 R removed
566 566 $ hg rollback -q
567 567
568 568 $ cat >> .hg/hgrc <<EOF
569 569 > # disable customizing for subsequent tests
570 570 > [committemplate]
571 571 > changeset =
572 572 > EOF
573 573
574 574 $ cd ..
575 575
576 576
577 577 commit copy
578 578
579 579 $ hg init dir2
580 580 $ cd dir2
581 581 $ echo bleh > bar
582 582 $ hg add bar
583 583 $ hg ci -m 'add bar'
584 584
585 585 $ hg cp bar foo
586 586 $ echo >> bar
587 587 $ hg ci -m 'cp bar foo; change bar'
588 588
589 589 $ hg debugrename foo
590 590 foo renamed from bar:26d3ca0dfd18e44d796b564e38dd173c9668d3a9
591 591 $ hg debugindex bar
592 592 rev offset length ..... linkrev nodeid p1 p2 (re)
593 593 0 0 6 ..... 0 26d3ca0dfd18 000000000000 000000000000 (re)
594 594 1 6 7 ..... 1 d267bddd54f7 26d3ca0dfd18 000000000000 (re)
595 595
596 596 Test making empty commits
597 597 $ hg commit --config ui.allowemptycommit=True -m "empty commit"
598 598 $ hg log -r . -v --stat
599 599 changeset: 2:d809f3644287
600 600 tag: tip
601 601 user: test
602 602 date: Thu Jan 01 00:00:00 1970 +0000
603 603 description:
604 604 empty commit
605 605
606 606
607 607
608 608 verify pathauditor blocks evil filepaths
609 609 $ cat > evil-commit.py <<EOF
610 610 > from mercurial import ui, hg, context, node
611 611 > notrc = u".h\u200cg".encode('utf-8') + '/hgrc'
612 612 > u = ui.ui()
613 613 > r = hg.repository(u, '.')
614 614 > def filectxfn(repo, memctx, path):
615 615 > return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned')
616 616 > c = context.memctx(r, [r['tip'].node(), node.nullid],
617 617 > 'evil', [notrc], filectxfn, 0)
618 618 > r.commitctx(c)
619 619 > EOF
620 620 $ $PYTHON evil-commit.py
621 621 #if windows
622 622 $ hg co --clean tip
623 623 abort: path contains illegal component: .h\xe2\x80\x8cg\\hgrc (esc)
624 624 [255]
625 625 #else
626 626 $ hg co --clean tip
627 627 abort: path contains illegal component: .h\xe2\x80\x8cg/hgrc (esc)
628 628 [255]
629 629 #endif
630 630
631 631 $ hg rollback -f
632 632 repository tip rolled back to revision 2 (undo commit)
633 633 $ cat > evil-commit.py <<EOF
634 634 > from mercurial import ui, hg, context, node
635 635 > notrc = "HG~1/hgrc"
636 636 > u = ui.ui()
637 637 > r = hg.repository(u, '.')
638 638 > def filectxfn(repo, memctx, path):
639 639 > return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned')
640 640 > c = context.memctx(r, [r['tip'].node(), node.nullid],
641 641 > 'evil', [notrc], filectxfn, 0)
642 642 > r.commitctx(c)
643 643 > EOF
644 644 $ $PYTHON evil-commit.py
645 645 $ hg co --clean tip
646 646 abort: path contains illegal component: HG~1/hgrc (glob)
647 647 [255]
648 648
649 649 $ hg rollback -f
650 650 repository tip rolled back to revision 2 (undo commit)
651 651 $ cat > evil-commit.py <<EOF
652 652 > from mercurial import ui, hg, context, node
653 653 > notrc = "HG8B6C~2/hgrc"
654 654 > u = ui.ui()
655 655 > r = hg.repository(u, '.')
656 656 > def filectxfn(repo, memctx, path):
657 657 > return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned')
658 658 > c = context.memctx(r, [r['tip'].node(), node.nullid],
659 659 > 'evil', [notrc], filectxfn, 0)
660 660 > r.commitctx(c)
661 661 > EOF
662 662 $ $PYTHON evil-commit.py
663 663 $ hg co --clean tip
664 664 abort: path contains illegal component: HG8B6C~2/hgrc (glob)
665 665 [255]
666 666
667 667 # test that an unmodified commit template message aborts
668 668
669 669 $ hg init unmodified_commit_template
670 670 $ cd unmodified_commit_template
671 671 $ echo foo > foo
672 672 $ hg add foo
673 673 $ hg commit -m "foo"
674 674 $ cat >> .hg/hgrc <<EOF
675 675 > [committemplate]
676 676 > changeset.commit = HI THIS IS NOT STRIPPED
677 677 > HG: this is customized commit template
678 678 > HG: {extramsg}
679 679 > {if(activebookmark,
680 680 > "HG: bookmark '{activebookmark}' is activated\n",
681 681 > "HG: no bookmark is activated\n")}{subrepos %
682 682 > "HG: subrepo '{subrepo}' is changed\n"}
683 683 > EOF
684 684 $ cat > $TESTTMP/notouching.sh <<EOF
685 685 > true
686 686 > EOF
687 687 $ echo foo2 > foo2
688 688 $ hg add foo2
689 689 $ HGEDITOR="sh $TESTTMP/notouching.sh" hg commit
690 690 abort: commit message unchanged
691 691 [255]
692 692 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now