##// END OF EJS Templates
date: accept broader range of ISO 8601 time specs...
Matt Mackall -
r29638:491ee264 stable
parent child Browse files
Show More
@@ -1,2894 +1,2901 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import collections
21 21 import datetime
22 22 import errno
23 23 import gc
24 24 import hashlib
25 25 import imp
26 26 import os
27 27 import re as remod
28 28 import shutil
29 29 import signal
30 30 import socket
31 31 import subprocess
32 32 import sys
33 33 import tempfile
34 34 import textwrap
35 35 import time
36 36 import traceback
37 37 import zlib
38 38
39 39 from . import (
40 40 encoding,
41 41 error,
42 42 i18n,
43 43 osutil,
44 44 parsers,
45 45 pycompat,
46 46 )
47 47
48 48 for attr in (
49 49 'empty',
50 50 'httplib',
51 51 'httpserver',
52 52 'pickle',
53 53 'queue',
54 54 'urlerr',
55 55 'urlparse',
56 56 # we do import urlreq, but we do it outside the loop
57 57 #'urlreq',
58 58 'stringio',
59 59 'socketserver',
60 60 'xmlrpclib',
61 61 ):
62 62 globals()[attr] = getattr(pycompat, attr)
63 63
64 64 # This line is to make pyflakes happy:
65 65 urlreq = pycompat.urlreq
66 66
67 67 if os.name == 'nt':
68 68 from . import windows as platform
69 69 else:
70 70 from . import posix as platform
71 71
72 72 _ = i18n._
73 73
74 74 bindunixsocket = platform.bindunixsocket
75 75 cachestat = platform.cachestat
76 76 checkexec = platform.checkexec
77 77 checklink = platform.checklink
78 78 copymode = platform.copymode
79 79 executablepath = platform.executablepath
80 80 expandglobs = platform.expandglobs
81 81 explainexit = platform.explainexit
82 82 findexe = platform.findexe
83 83 gethgcmd = platform.gethgcmd
84 84 getuser = platform.getuser
85 85 getpid = os.getpid
86 86 groupmembers = platform.groupmembers
87 87 groupname = platform.groupname
88 88 hidewindow = platform.hidewindow
89 89 isexec = platform.isexec
90 90 isowner = platform.isowner
91 91 localpath = platform.localpath
92 92 lookupreg = platform.lookupreg
93 93 makedir = platform.makedir
94 94 nlinks = platform.nlinks
95 95 normpath = platform.normpath
96 96 normcase = platform.normcase
97 97 normcasespec = platform.normcasespec
98 98 normcasefallback = platform.normcasefallback
99 99 openhardlinks = platform.openhardlinks
100 100 oslink = platform.oslink
101 101 parsepatchoutput = platform.parsepatchoutput
102 102 pconvert = platform.pconvert
103 103 poll = platform.poll
104 104 popen = platform.popen
105 105 posixfile = platform.posixfile
106 106 quotecommand = platform.quotecommand
107 107 readpipe = platform.readpipe
108 108 rename = platform.rename
109 109 removedirs = platform.removedirs
110 110 samedevice = platform.samedevice
111 111 samefile = platform.samefile
112 112 samestat = platform.samestat
113 113 setbinary = platform.setbinary
114 114 setflags = platform.setflags
115 115 setsignalhandler = platform.setsignalhandler
116 116 shellquote = platform.shellquote
117 117 spawndetached = platform.spawndetached
118 118 split = platform.split
119 119 sshargs = platform.sshargs
120 120 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
121 121 statisexec = platform.statisexec
122 122 statislink = platform.statislink
123 123 termwidth = platform.termwidth
124 124 testpid = platform.testpid
125 125 umask = platform.umask
126 126 unlink = platform.unlink
127 127 unlinkpath = platform.unlinkpath
128 128 username = platform.username
129 129
130 130 # Python compatibility
131 131
132 132 _notset = object()
133 133
134 134 # disable Python's problematic floating point timestamps (issue4836)
135 135 # (Python hypocritically says you shouldn't change this behavior in
136 136 # libraries, and sure enough Mercurial is not a library.)
137 137 os.stat_float_times(False)
138 138
139 139 def safehasattr(thing, attr):
140 140 return getattr(thing, attr, _notset) is not _notset
141 141
142 142 DIGESTS = {
143 143 'md5': hashlib.md5,
144 144 'sha1': hashlib.sha1,
145 145 'sha512': hashlib.sha512,
146 146 }
147 147 # List of digest types from strongest to weakest
148 148 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
149 149
150 150 for k in DIGESTS_BY_STRENGTH:
151 151 assert k in DIGESTS
152 152
153 153 class digester(object):
154 154 """helper to compute digests.
155 155
156 156 This helper can be used to compute one or more digests given their name.
157 157
158 158 >>> d = digester(['md5', 'sha1'])
159 159 >>> d.update('foo')
160 160 >>> [k for k in sorted(d)]
161 161 ['md5', 'sha1']
162 162 >>> d['md5']
163 163 'acbd18db4cc2f85cedef654fccc4a4d8'
164 164 >>> d['sha1']
165 165 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
166 166 >>> digester.preferred(['md5', 'sha1'])
167 167 'sha1'
168 168 """
169 169
170 170 def __init__(self, digests, s=''):
171 171 self._hashes = {}
172 172 for k in digests:
173 173 if k not in DIGESTS:
174 174 raise Abort(_('unknown digest type: %s') % k)
175 175 self._hashes[k] = DIGESTS[k]()
176 176 if s:
177 177 self.update(s)
178 178
179 179 def update(self, data):
180 180 for h in self._hashes.values():
181 181 h.update(data)
182 182
183 183 def __getitem__(self, key):
184 184 if key not in DIGESTS:
185 185 raise Abort(_('unknown digest type: %s') % k)
186 186 return self._hashes[key].hexdigest()
187 187
188 188 def __iter__(self):
189 189 return iter(self._hashes)
190 190
191 191 @staticmethod
192 192 def preferred(supported):
193 193 """returns the strongest digest type in both supported and DIGESTS."""
194 194
195 195 for k in DIGESTS_BY_STRENGTH:
196 196 if k in supported:
197 197 return k
198 198 return None
199 199
200 200 class digestchecker(object):
201 201 """file handle wrapper that additionally checks content against a given
202 202 size and digests.
203 203
204 204 d = digestchecker(fh, size, {'md5': '...'})
205 205
206 206 When multiple digests are given, all of them are validated.
207 207 """
208 208
209 209 def __init__(self, fh, size, digests):
210 210 self._fh = fh
211 211 self._size = size
212 212 self._got = 0
213 213 self._digests = dict(digests)
214 214 self._digester = digester(self._digests.keys())
215 215
216 216 def read(self, length=-1):
217 217 content = self._fh.read(length)
218 218 self._digester.update(content)
219 219 self._got += len(content)
220 220 return content
221 221
222 222 def validate(self):
223 223 if self._size != self._got:
224 224 raise Abort(_('size mismatch: expected %d, got %d') %
225 225 (self._size, self._got))
226 226 for k, v in self._digests.items():
227 227 if v != self._digester[k]:
228 228 # i18n: first parameter is a digest name
229 229 raise Abort(_('%s mismatch: expected %s, got %s') %
230 230 (k, v, self._digester[k]))
231 231
232 232 try:
233 233 buffer = buffer
234 234 except NameError:
235 235 if sys.version_info[0] < 3:
236 236 def buffer(sliceable, offset=0):
237 237 return sliceable[offset:]
238 238 else:
239 239 def buffer(sliceable, offset=0):
240 240 return memoryview(sliceable)[offset:]
241 241
242 242 closefds = os.name == 'posix'
243 243
244 244 _chunksize = 4096
245 245
246 246 class bufferedinputpipe(object):
247 247 """a manually buffered input pipe
248 248
249 249 Python will not let us use buffered IO and lazy reading with 'polling' at
250 250 the same time. We cannot probe the buffer state and select will not detect
251 251 that data are ready to read if they are already buffered.
252 252
253 253 This class let us work around that by implementing its own buffering
254 254 (allowing efficient readline) while offering a way to know if the buffer is
255 255 empty from the output (allowing collaboration of the buffer with polling).
256 256
257 257 This class lives in the 'util' module because it makes use of the 'os'
258 258 module from the python stdlib.
259 259 """
260 260
261 261 def __init__(self, input):
262 262 self._input = input
263 263 self._buffer = []
264 264 self._eof = False
265 265 self._lenbuf = 0
266 266
267 267 @property
268 268 def hasbuffer(self):
269 269 """True is any data is currently buffered
270 270
271 271 This will be used externally a pre-step for polling IO. If there is
272 272 already data then no polling should be set in place."""
273 273 return bool(self._buffer)
274 274
275 275 @property
276 276 def closed(self):
277 277 return self._input.closed
278 278
279 279 def fileno(self):
280 280 return self._input.fileno()
281 281
282 282 def close(self):
283 283 return self._input.close()
284 284
285 285 def read(self, size):
286 286 while (not self._eof) and (self._lenbuf < size):
287 287 self._fillbuffer()
288 288 return self._frombuffer(size)
289 289
290 290 def readline(self, *args, **kwargs):
291 291 if 1 < len(self._buffer):
292 292 # this should not happen because both read and readline end with a
293 293 # _frombuffer call that collapse it.
294 294 self._buffer = [''.join(self._buffer)]
295 295 self._lenbuf = len(self._buffer[0])
296 296 lfi = -1
297 297 if self._buffer:
298 298 lfi = self._buffer[-1].find('\n')
299 299 while (not self._eof) and lfi < 0:
300 300 self._fillbuffer()
301 301 if self._buffer:
302 302 lfi = self._buffer[-1].find('\n')
303 303 size = lfi + 1
304 304 if lfi < 0: # end of file
305 305 size = self._lenbuf
306 306 elif 1 < len(self._buffer):
307 307 # we need to take previous chunks into account
308 308 size += self._lenbuf - len(self._buffer[-1])
309 309 return self._frombuffer(size)
310 310
311 311 def _frombuffer(self, size):
312 312 """return at most 'size' data from the buffer
313 313
314 314 The data are removed from the buffer."""
315 315 if size == 0 or not self._buffer:
316 316 return ''
317 317 buf = self._buffer[0]
318 318 if 1 < len(self._buffer):
319 319 buf = ''.join(self._buffer)
320 320
321 321 data = buf[:size]
322 322 buf = buf[len(data):]
323 323 if buf:
324 324 self._buffer = [buf]
325 325 self._lenbuf = len(buf)
326 326 else:
327 327 self._buffer = []
328 328 self._lenbuf = 0
329 329 return data
330 330
331 331 def _fillbuffer(self):
332 332 """read data to the buffer"""
333 333 data = os.read(self._input.fileno(), _chunksize)
334 334 if not data:
335 335 self._eof = True
336 336 else:
337 337 self._lenbuf += len(data)
338 338 self._buffer.append(data)
339 339
340 340 def popen2(cmd, env=None, newlines=False):
341 341 # Setting bufsize to -1 lets the system decide the buffer size.
342 342 # The default for bufsize is 0, meaning unbuffered. This leads to
343 343 # poor performance on Mac OS X: http://bugs.python.org/issue4194
344 344 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
345 345 close_fds=closefds,
346 346 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
347 347 universal_newlines=newlines,
348 348 env=env)
349 349 return p.stdin, p.stdout
350 350
351 351 def popen3(cmd, env=None, newlines=False):
352 352 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
353 353 return stdin, stdout, stderr
354 354
355 355 def popen4(cmd, env=None, newlines=False, bufsize=-1):
356 356 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
357 357 close_fds=closefds,
358 358 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
359 359 stderr=subprocess.PIPE,
360 360 universal_newlines=newlines,
361 361 env=env)
362 362 return p.stdin, p.stdout, p.stderr, p
363 363
364 364 def version():
365 365 """Return version information if available."""
366 366 try:
367 367 from . import __version__
368 368 return __version__.version
369 369 except ImportError:
370 370 return 'unknown'
371 371
372 372 def versiontuple(v=None, n=4):
373 373 """Parses a Mercurial version string into an N-tuple.
374 374
375 375 The version string to be parsed is specified with the ``v`` argument.
376 376 If it isn't defined, the current Mercurial version string will be parsed.
377 377
378 378 ``n`` can be 2, 3, or 4. Here is how some version strings map to
379 379 returned values:
380 380
381 381 >>> v = '3.6.1+190-df9b73d2d444'
382 382 >>> versiontuple(v, 2)
383 383 (3, 6)
384 384 >>> versiontuple(v, 3)
385 385 (3, 6, 1)
386 386 >>> versiontuple(v, 4)
387 387 (3, 6, 1, '190-df9b73d2d444')
388 388
389 389 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
390 390 (3, 6, 1, '190-df9b73d2d444+20151118')
391 391
392 392 >>> v = '3.6'
393 393 >>> versiontuple(v, 2)
394 394 (3, 6)
395 395 >>> versiontuple(v, 3)
396 396 (3, 6, None)
397 397 >>> versiontuple(v, 4)
398 398 (3, 6, None, None)
399 399
400 400 >>> v = '3.9-rc'
401 401 >>> versiontuple(v, 2)
402 402 (3, 9)
403 403 >>> versiontuple(v, 3)
404 404 (3, 9, None)
405 405 >>> versiontuple(v, 4)
406 406 (3, 9, None, 'rc')
407 407
408 408 >>> v = '3.9-rc+2-02a8fea4289b'
409 409 >>> versiontuple(v, 2)
410 410 (3, 9)
411 411 >>> versiontuple(v, 3)
412 412 (3, 9, None)
413 413 >>> versiontuple(v, 4)
414 414 (3, 9, None, 'rc+2-02a8fea4289b')
415 415 """
416 416 if not v:
417 417 v = version()
418 418 parts = remod.split('[\+-]', v, 1)
419 419 if len(parts) == 1:
420 420 vparts, extra = parts[0], None
421 421 else:
422 422 vparts, extra = parts
423 423
424 424 vints = []
425 425 for i in vparts.split('.'):
426 426 try:
427 427 vints.append(int(i))
428 428 except ValueError:
429 429 break
430 430 # (3, 6) -> (3, 6, None)
431 431 while len(vints) < 3:
432 432 vints.append(None)
433 433
434 434 if n == 2:
435 435 return (vints[0], vints[1])
436 436 if n == 3:
437 437 return (vints[0], vints[1], vints[2])
438 438 if n == 4:
439 439 return (vints[0], vints[1], vints[2], extra)
440 440
441 441 # used by parsedate
442 442 defaultdateformats = (
443 '%Y-%m-%d %H:%M:%S',
443 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
444 '%Y-%m-%dT%H:%M', # without seconds
445 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
446 '%Y-%m-%dT%H%M', # without seconds
447 '%Y-%m-%d %H:%M:%S', # our common legal variant
448 '%Y-%m-%d %H:%M', # without seconds
449 '%Y-%m-%d %H%M%S', # without :
450 '%Y-%m-%d %H%M', # without seconds
444 451 '%Y-%m-%d %I:%M:%S%p',
445 452 '%Y-%m-%d %H:%M',
446 453 '%Y-%m-%d %I:%M%p',
447 454 '%Y-%m-%d',
448 455 '%m-%d',
449 456 '%m/%d',
450 457 '%m/%d/%y',
451 458 '%m/%d/%Y',
452 459 '%a %b %d %H:%M:%S %Y',
453 460 '%a %b %d %I:%M:%S%p %Y',
454 461 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
455 462 '%b %d %H:%M:%S %Y',
456 463 '%b %d %I:%M:%S%p %Y',
457 464 '%b %d %H:%M:%S',
458 465 '%b %d %I:%M:%S%p',
459 466 '%b %d %H:%M',
460 467 '%b %d %I:%M%p',
461 468 '%b %d %Y',
462 469 '%b %d',
463 470 '%H:%M:%S',
464 471 '%I:%M:%S%p',
465 472 '%H:%M',
466 473 '%I:%M%p',
467 474 )
468 475
469 476 extendeddateformats = defaultdateformats + (
470 477 "%Y",
471 478 "%Y-%m",
472 479 "%b",
473 480 "%b %Y",
474 481 )
475 482
476 483 def cachefunc(func):
477 484 '''cache the result of function calls'''
478 485 # XXX doesn't handle keywords args
479 486 if func.__code__.co_argcount == 0:
480 487 cache = []
481 488 def f():
482 489 if len(cache) == 0:
483 490 cache.append(func())
484 491 return cache[0]
485 492 return f
486 493 cache = {}
487 494 if func.__code__.co_argcount == 1:
488 495 # we gain a small amount of time because
489 496 # we don't need to pack/unpack the list
490 497 def f(arg):
491 498 if arg not in cache:
492 499 cache[arg] = func(arg)
493 500 return cache[arg]
494 501 else:
495 502 def f(*args):
496 503 if args not in cache:
497 504 cache[args] = func(*args)
498 505 return cache[args]
499 506
500 507 return f
501 508
502 509 class sortdict(dict):
503 510 '''a simple sorted dictionary'''
504 511 def __init__(self, data=None):
505 512 self._list = []
506 513 if data:
507 514 self.update(data)
508 515 def copy(self):
509 516 return sortdict(self)
510 517 def __setitem__(self, key, val):
511 518 if key in self:
512 519 self._list.remove(key)
513 520 self._list.append(key)
514 521 dict.__setitem__(self, key, val)
515 522 def __iter__(self):
516 523 return self._list.__iter__()
517 524 def update(self, src):
518 525 if isinstance(src, dict):
519 526 src = src.iteritems()
520 527 for k, v in src:
521 528 self[k] = v
522 529 def clear(self):
523 530 dict.clear(self)
524 531 self._list = []
525 532 def items(self):
526 533 return [(k, self[k]) for k in self._list]
527 534 def __delitem__(self, key):
528 535 dict.__delitem__(self, key)
529 536 self._list.remove(key)
530 537 def pop(self, key, *args, **kwargs):
531 538 dict.pop(self, key, *args, **kwargs)
532 539 try:
533 540 self._list.remove(key)
534 541 except ValueError:
535 542 pass
536 543 def keys(self):
537 544 return self._list
538 545 def iterkeys(self):
539 546 return self._list.__iter__()
540 547 def iteritems(self):
541 548 for k in self._list:
542 549 yield k, self[k]
543 550 def insert(self, index, key, val):
544 551 self._list.insert(index, key)
545 552 dict.__setitem__(self, key, val)
546 553 def __repr__(self):
547 554 if not self:
548 555 return '%s()' % self.__class__.__name__
549 556 return '%s(%r)' % (self.__class__.__name__, self.items())
550 557
551 558 class _lrucachenode(object):
552 559 """A node in a doubly linked list.
553 560
554 561 Holds a reference to nodes on either side as well as a key-value
555 562 pair for the dictionary entry.
556 563 """
557 564 __slots__ = ('next', 'prev', 'key', 'value')
558 565
559 566 def __init__(self):
560 567 self.next = None
561 568 self.prev = None
562 569
563 570 self.key = _notset
564 571 self.value = None
565 572
566 573 def markempty(self):
567 574 """Mark the node as emptied."""
568 575 self.key = _notset
569 576
570 577 class lrucachedict(object):
571 578 """Dict that caches most recent accesses and sets.
572 579
573 580 The dict consists of an actual backing dict - indexed by original
574 581 key - and a doubly linked circular list defining the order of entries in
575 582 the cache.
576 583
577 584 The head node is the newest entry in the cache. If the cache is full,
578 585 we recycle head.prev and make it the new head. Cache accesses result in
579 586 the node being moved to before the existing head and being marked as the
580 587 new head node.
581 588 """
582 589 def __init__(self, max):
583 590 self._cache = {}
584 591
585 592 self._head = head = _lrucachenode()
586 593 head.prev = head
587 594 head.next = head
588 595 self._size = 1
589 596 self._capacity = max
590 597
591 598 def __len__(self):
592 599 return len(self._cache)
593 600
594 601 def __contains__(self, k):
595 602 return k in self._cache
596 603
597 604 def __iter__(self):
598 605 # We don't have to iterate in cache order, but why not.
599 606 n = self._head
600 607 for i in range(len(self._cache)):
601 608 yield n.key
602 609 n = n.next
603 610
604 611 def __getitem__(self, k):
605 612 node = self._cache[k]
606 613 self._movetohead(node)
607 614 return node.value
608 615
609 616 def __setitem__(self, k, v):
610 617 node = self._cache.get(k)
611 618 # Replace existing value and mark as newest.
612 619 if node is not None:
613 620 node.value = v
614 621 self._movetohead(node)
615 622 return
616 623
617 624 if self._size < self._capacity:
618 625 node = self._addcapacity()
619 626 else:
620 627 # Grab the last/oldest item.
621 628 node = self._head.prev
622 629
623 630 # At capacity. Kill the old entry.
624 631 if node.key is not _notset:
625 632 del self._cache[node.key]
626 633
627 634 node.key = k
628 635 node.value = v
629 636 self._cache[k] = node
630 637 # And mark it as newest entry. No need to adjust order since it
631 638 # is already self._head.prev.
632 639 self._head = node
633 640
634 641 def __delitem__(self, k):
635 642 node = self._cache.pop(k)
636 643 node.markempty()
637 644
638 645 # Temporarily mark as newest item before re-adjusting head to make
639 646 # this node the oldest item.
640 647 self._movetohead(node)
641 648 self._head = node.next
642 649
643 650 # Additional dict methods.
644 651
645 652 def get(self, k, default=None):
646 653 try:
647 654 return self._cache[k]
648 655 except KeyError:
649 656 return default
650 657
651 658 def clear(self):
652 659 n = self._head
653 660 while n.key is not _notset:
654 661 n.markempty()
655 662 n = n.next
656 663
657 664 self._cache.clear()
658 665
659 666 def copy(self):
660 667 result = lrucachedict(self._capacity)
661 668 n = self._head.prev
662 669 # Iterate in oldest-to-newest order, so the copy has the right ordering
663 670 for i in range(len(self._cache)):
664 671 result[n.key] = n.value
665 672 n = n.prev
666 673 return result
667 674
668 675 def _movetohead(self, node):
669 676 """Mark a node as the newest, making it the new head.
670 677
671 678 When a node is accessed, it becomes the freshest entry in the LRU
672 679 list, which is denoted by self._head.
673 680
674 681 Visually, let's make ``N`` the new head node (* denotes head):
675 682
676 683 previous/oldest <-> head <-> next/next newest
677 684
678 685 ----<->--- A* ---<->-----
679 686 | |
680 687 E <-> D <-> N <-> C <-> B
681 688
682 689 To:
683 690
684 691 ----<->--- N* ---<->-----
685 692 | |
686 693 E <-> D <-> C <-> B <-> A
687 694
688 695 This requires the following moves:
689 696
690 697 C.next = D (node.prev.next = node.next)
691 698 D.prev = C (node.next.prev = node.prev)
692 699 E.next = N (head.prev.next = node)
693 700 N.prev = E (node.prev = head.prev)
694 701 N.next = A (node.next = head)
695 702 A.prev = N (head.prev = node)
696 703 """
697 704 head = self._head
698 705 # C.next = D
699 706 node.prev.next = node.next
700 707 # D.prev = C
701 708 node.next.prev = node.prev
702 709 # N.prev = E
703 710 node.prev = head.prev
704 711 # N.next = A
705 712 # It is tempting to do just "head" here, however if node is
706 713 # adjacent to head, this will do bad things.
707 714 node.next = head.prev.next
708 715 # E.next = N
709 716 node.next.prev = node
710 717 # A.prev = N
711 718 node.prev.next = node
712 719
713 720 self._head = node
714 721
715 722 def _addcapacity(self):
716 723 """Add a node to the circular linked list.
717 724
718 725 The new node is inserted before the head node.
719 726 """
720 727 head = self._head
721 728 node = _lrucachenode()
722 729 head.prev.next = node
723 730 node.prev = head.prev
724 731 node.next = head
725 732 head.prev = node
726 733 self._size += 1
727 734 return node
728 735
729 736 def lrucachefunc(func):
730 737 '''cache most recent results of function calls'''
731 738 cache = {}
732 739 order = collections.deque()
733 740 if func.__code__.co_argcount == 1:
734 741 def f(arg):
735 742 if arg not in cache:
736 743 if len(cache) > 20:
737 744 del cache[order.popleft()]
738 745 cache[arg] = func(arg)
739 746 else:
740 747 order.remove(arg)
741 748 order.append(arg)
742 749 return cache[arg]
743 750 else:
744 751 def f(*args):
745 752 if args not in cache:
746 753 if len(cache) > 20:
747 754 del cache[order.popleft()]
748 755 cache[args] = func(*args)
749 756 else:
750 757 order.remove(args)
751 758 order.append(args)
752 759 return cache[args]
753 760
754 761 return f
755 762
756 763 class propertycache(object):
757 764 def __init__(self, func):
758 765 self.func = func
759 766 self.name = func.__name__
760 767 def __get__(self, obj, type=None):
761 768 result = self.func(obj)
762 769 self.cachevalue(obj, result)
763 770 return result
764 771
765 772 def cachevalue(self, obj, value):
766 773 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
767 774 obj.__dict__[self.name] = value
768 775
769 776 def pipefilter(s, cmd):
770 777 '''filter string S through command CMD, returning its output'''
771 778 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
772 779 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
773 780 pout, perr = p.communicate(s)
774 781 return pout
775 782
776 783 def tempfilter(s, cmd):
777 784 '''filter string S through a pair of temporary files with CMD.
778 785 CMD is used as a template to create the real command to be run,
779 786 with the strings INFILE and OUTFILE replaced by the real names of
780 787 the temporary files generated.'''
781 788 inname, outname = None, None
782 789 try:
783 790 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
784 791 fp = os.fdopen(infd, 'wb')
785 792 fp.write(s)
786 793 fp.close()
787 794 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
788 795 os.close(outfd)
789 796 cmd = cmd.replace('INFILE', inname)
790 797 cmd = cmd.replace('OUTFILE', outname)
791 798 code = os.system(cmd)
792 799 if sys.platform == 'OpenVMS' and code & 1:
793 800 code = 0
794 801 if code:
795 802 raise Abort(_("command '%s' failed: %s") %
796 803 (cmd, explainexit(code)))
797 804 return readfile(outname)
798 805 finally:
799 806 try:
800 807 if inname:
801 808 os.unlink(inname)
802 809 except OSError:
803 810 pass
804 811 try:
805 812 if outname:
806 813 os.unlink(outname)
807 814 except OSError:
808 815 pass
809 816
810 817 filtertable = {
811 818 'tempfile:': tempfilter,
812 819 'pipe:': pipefilter,
813 820 }
814 821
815 822 def filter(s, cmd):
816 823 "filter a string through a command that transforms its input to its output"
817 824 for name, fn in filtertable.iteritems():
818 825 if cmd.startswith(name):
819 826 return fn(s, cmd[len(name):].lstrip())
820 827 return pipefilter(s, cmd)
821 828
822 829 def binary(s):
823 830 """return true if a string is binary data"""
824 831 return bool(s and '\0' in s)
825 832
826 833 def increasingchunks(source, min=1024, max=65536):
827 834 '''return no less than min bytes per chunk while data remains,
828 835 doubling min after each chunk until it reaches max'''
829 836 def log2(x):
830 837 if not x:
831 838 return 0
832 839 i = 0
833 840 while x:
834 841 x >>= 1
835 842 i += 1
836 843 return i - 1
837 844
838 845 buf = []
839 846 blen = 0
840 847 for chunk in source:
841 848 buf.append(chunk)
842 849 blen += len(chunk)
843 850 if blen >= min:
844 851 if min < max:
845 852 min = min << 1
846 853 nmin = 1 << log2(blen)
847 854 if nmin > min:
848 855 min = nmin
849 856 if min > max:
850 857 min = max
851 858 yield ''.join(buf)
852 859 blen = 0
853 860 buf = []
854 861 if buf:
855 862 yield ''.join(buf)
856 863
857 864 Abort = error.Abort
858 865
859 866 def always(fn):
860 867 return True
861 868
862 869 def never(fn):
863 870 return False
864 871
865 872 def nogc(func):
866 873 """disable garbage collector
867 874
868 875 Python's garbage collector triggers a GC each time a certain number of
869 876 container objects (the number being defined by gc.get_threshold()) are
870 877 allocated even when marked not to be tracked by the collector. Tracking has
871 878 no effect on when GCs are triggered, only on what objects the GC looks
872 879 into. As a workaround, disable GC while building complex (huge)
873 880 containers.
874 881
875 882 This garbage collector issue have been fixed in 2.7.
876 883 """
877 884 def wrapper(*args, **kwargs):
878 885 gcenabled = gc.isenabled()
879 886 gc.disable()
880 887 try:
881 888 return func(*args, **kwargs)
882 889 finally:
883 890 if gcenabled:
884 891 gc.enable()
885 892 return wrapper
886 893
887 894 def pathto(root, n1, n2):
888 895 '''return the relative path from one place to another.
889 896 root should use os.sep to separate directories
890 897 n1 should use os.sep to separate directories
891 898 n2 should use "/" to separate directories
892 899 returns an os.sep-separated path.
893 900
894 901 If n1 is a relative path, it's assumed it's
895 902 relative to root.
896 903 n2 should always be relative to root.
897 904 '''
898 905 if not n1:
899 906 return localpath(n2)
900 907 if os.path.isabs(n1):
901 908 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
902 909 return os.path.join(root, localpath(n2))
903 910 n2 = '/'.join((pconvert(root), n2))
904 911 a, b = splitpath(n1), n2.split('/')
905 912 a.reverse()
906 913 b.reverse()
907 914 while a and b and a[-1] == b[-1]:
908 915 a.pop()
909 916 b.pop()
910 917 b.reverse()
911 918 return os.sep.join((['..'] * len(a)) + b) or '.'
912 919
913 920 def mainfrozen():
914 921 """return True if we are a frozen executable.
915 922
916 923 The code supports py2exe (most common, Windows only) and tools/freeze
917 924 (portable, not much used).
918 925 """
919 926 return (safehasattr(sys, "frozen") or # new py2exe
920 927 safehasattr(sys, "importers") or # old py2exe
921 928 imp.is_frozen("__main__")) # tools/freeze
922 929
923 930 # the location of data files matching the source code
924 931 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
925 932 # executable version (py2exe) doesn't support __file__
926 933 datapath = os.path.dirname(sys.executable)
927 934 else:
928 935 datapath = os.path.dirname(__file__)
929 936
930 937 i18n.setdatapath(datapath)
931 938
932 939 _hgexecutable = None
933 940
934 941 def hgexecutable():
935 942 """return location of the 'hg' executable.
936 943
937 944 Defaults to $HG or 'hg' in the search path.
938 945 """
939 946 if _hgexecutable is None:
940 947 hg = os.environ.get('HG')
941 948 mainmod = sys.modules['__main__']
942 949 if hg:
943 950 _sethgexecutable(hg)
944 951 elif mainfrozen():
945 952 if getattr(sys, 'frozen', None) == 'macosx_app':
946 953 # Env variable set by py2app
947 954 _sethgexecutable(os.environ['EXECUTABLEPATH'])
948 955 else:
949 956 _sethgexecutable(sys.executable)
950 957 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
951 958 _sethgexecutable(mainmod.__file__)
952 959 else:
953 960 exe = findexe('hg') or os.path.basename(sys.argv[0])
954 961 _sethgexecutable(exe)
955 962 return _hgexecutable
956 963
957 964 def _sethgexecutable(path):
958 965 """set location of the 'hg' executable"""
959 966 global _hgexecutable
960 967 _hgexecutable = path
961 968
962 969 def _isstdout(f):
963 970 fileno = getattr(f, 'fileno', None)
964 971 return fileno and fileno() == sys.__stdout__.fileno()
965 972
966 973 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
967 974 '''enhanced shell command execution.
968 975 run with environment maybe modified, maybe in different dir.
969 976
970 977 if command fails and onerr is None, return status, else raise onerr
971 978 object as exception.
972 979
973 980 if out is specified, it is assumed to be a file-like object that has a
974 981 write() method. stdout and stderr will be redirected to out.'''
975 982 if environ is None:
976 983 environ = {}
977 984 try:
978 985 sys.stdout.flush()
979 986 except Exception:
980 987 pass
981 988 def py2shell(val):
982 989 'convert python object into string that is useful to shell'
983 990 if val is None or val is False:
984 991 return '0'
985 992 if val is True:
986 993 return '1'
987 994 return str(val)
988 995 origcmd = cmd
989 996 cmd = quotecommand(cmd)
990 997 if sys.platform == 'plan9' and (sys.version_info[0] == 2
991 998 and sys.version_info[1] < 7):
992 999 # subprocess kludge to work around issues in half-baked Python
993 1000 # ports, notably bichued/python:
994 1001 if not cwd is None:
995 1002 os.chdir(cwd)
996 1003 rc = os.system(cmd)
997 1004 else:
998 1005 env = dict(os.environ)
999 1006 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1000 1007 env['HG'] = hgexecutable()
1001 1008 if out is None or _isstdout(out):
1002 1009 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1003 1010 env=env, cwd=cwd)
1004 1011 else:
1005 1012 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1006 1013 env=env, cwd=cwd, stdout=subprocess.PIPE,
1007 1014 stderr=subprocess.STDOUT)
1008 1015 while True:
1009 1016 line = proc.stdout.readline()
1010 1017 if not line:
1011 1018 break
1012 1019 out.write(line)
1013 1020 proc.wait()
1014 1021 rc = proc.returncode
1015 1022 if sys.platform == 'OpenVMS' and rc & 1:
1016 1023 rc = 0
1017 1024 if rc and onerr:
1018 1025 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1019 1026 explainexit(rc)[0])
1020 1027 if errprefix:
1021 1028 errmsg = '%s: %s' % (errprefix, errmsg)
1022 1029 raise onerr(errmsg)
1023 1030 return rc
1024 1031
1025 1032 def checksignature(func):
1026 1033 '''wrap a function with code to check for calling errors'''
1027 1034 def check(*args, **kwargs):
1028 1035 try:
1029 1036 return func(*args, **kwargs)
1030 1037 except TypeError:
1031 1038 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1032 1039 raise error.SignatureError
1033 1040 raise
1034 1041
1035 1042 return check
1036 1043
1037 1044 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1038 1045 '''copy a file, preserving mode and optionally other stat info like
1039 1046 atime/mtime
1040 1047
1041 1048 checkambig argument is used with filestat, and is useful only if
1042 1049 destination file is guarded by any lock (e.g. repo.lock or
1043 1050 repo.wlock).
1044 1051
1045 1052 copystat and checkambig should be exclusive.
1046 1053 '''
1047 1054 assert not (copystat and checkambig)
1048 1055 oldstat = None
1049 1056 if os.path.lexists(dest):
1050 1057 if checkambig:
1051 1058 oldstat = checkambig and filestat(dest)
1052 1059 unlink(dest)
1053 1060 # hardlinks are problematic on CIFS, quietly ignore this flag
1054 1061 # until we find a way to work around it cleanly (issue4546)
1055 1062 if False and hardlink:
1056 1063 try:
1057 1064 oslink(src, dest)
1058 1065 return
1059 1066 except (IOError, OSError):
1060 1067 pass # fall back to normal copy
1061 1068 if os.path.islink(src):
1062 1069 os.symlink(os.readlink(src), dest)
1063 1070 # copytime is ignored for symlinks, but in general copytime isn't needed
1064 1071 # for them anyway
1065 1072 else:
1066 1073 try:
1067 1074 shutil.copyfile(src, dest)
1068 1075 if copystat:
1069 1076 # copystat also copies mode
1070 1077 shutil.copystat(src, dest)
1071 1078 else:
1072 1079 shutil.copymode(src, dest)
1073 1080 if oldstat and oldstat.stat:
1074 1081 newstat = filestat(dest)
1075 1082 if newstat.isambig(oldstat):
1076 1083 # stat of copied file is ambiguous to original one
1077 1084 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1078 1085 os.utime(dest, (advanced, advanced))
1079 1086 except shutil.Error as inst:
1080 1087 raise Abort(str(inst))
1081 1088
1082 1089 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1083 1090 """Copy a directory tree using hardlinks if possible."""
1084 1091 num = 0
1085 1092
1086 1093 if hardlink is None:
1087 1094 hardlink = (os.stat(src).st_dev ==
1088 1095 os.stat(os.path.dirname(dst)).st_dev)
1089 1096 if hardlink:
1090 1097 topic = _('linking')
1091 1098 else:
1092 1099 topic = _('copying')
1093 1100
1094 1101 if os.path.isdir(src):
1095 1102 os.mkdir(dst)
1096 1103 for name, kind in osutil.listdir(src):
1097 1104 srcname = os.path.join(src, name)
1098 1105 dstname = os.path.join(dst, name)
1099 1106 def nprog(t, pos):
1100 1107 if pos is not None:
1101 1108 return progress(t, pos + num)
1102 1109 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1103 1110 num += n
1104 1111 else:
1105 1112 if hardlink:
1106 1113 try:
1107 1114 oslink(src, dst)
1108 1115 except (IOError, OSError):
1109 1116 hardlink = False
1110 1117 shutil.copy(src, dst)
1111 1118 else:
1112 1119 shutil.copy(src, dst)
1113 1120 num += 1
1114 1121 progress(topic, num)
1115 1122 progress(topic, None)
1116 1123
1117 1124 return hardlink, num
1118 1125
1119 1126 _winreservednames = '''con prn aux nul
1120 1127 com1 com2 com3 com4 com5 com6 com7 com8 com9
1121 1128 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1122 1129 _winreservedchars = ':*?"<>|'
1123 1130 def checkwinfilename(path):
1124 1131 r'''Check that the base-relative path is a valid filename on Windows.
1125 1132 Returns None if the path is ok, or a UI string describing the problem.
1126 1133
1127 1134 >>> checkwinfilename("just/a/normal/path")
1128 1135 >>> checkwinfilename("foo/bar/con.xml")
1129 1136 "filename contains 'con', which is reserved on Windows"
1130 1137 >>> checkwinfilename("foo/con.xml/bar")
1131 1138 "filename contains 'con', which is reserved on Windows"
1132 1139 >>> checkwinfilename("foo/bar/xml.con")
1133 1140 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1134 1141 "filename contains 'AUX', which is reserved on Windows"
1135 1142 >>> checkwinfilename("foo/bar/bla:.txt")
1136 1143 "filename contains ':', which is reserved on Windows"
1137 1144 >>> checkwinfilename("foo/bar/b\07la.txt")
1138 1145 "filename contains '\\x07', which is invalid on Windows"
1139 1146 >>> checkwinfilename("foo/bar/bla ")
1140 1147 "filename ends with ' ', which is not allowed on Windows"
1141 1148 >>> checkwinfilename("../bar")
1142 1149 >>> checkwinfilename("foo\\")
1143 1150 "filename ends with '\\', which is invalid on Windows"
1144 1151 >>> checkwinfilename("foo\\/bar")
1145 1152 "directory name ends with '\\', which is invalid on Windows"
1146 1153 '''
1147 1154 if path.endswith('\\'):
1148 1155 return _("filename ends with '\\', which is invalid on Windows")
1149 1156 if '\\/' in path:
1150 1157 return _("directory name ends with '\\', which is invalid on Windows")
1151 1158 for n in path.replace('\\', '/').split('/'):
1152 1159 if not n:
1153 1160 continue
1154 1161 for c in n:
1155 1162 if c in _winreservedchars:
1156 1163 return _("filename contains '%s', which is reserved "
1157 1164 "on Windows") % c
1158 1165 if ord(c) <= 31:
1159 1166 return _("filename contains %r, which is invalid "
1160 1167 "on Windows") % c
1161 1168 base = n.split('.')[0]
1162 1169 if base and base.lower() in _winreservednames:
1163 1170 return _("filename contains '%s', which is reserved "
1164 1171 "on Windows") % base
1165 1172 t = n[-1]
1166 1173 if t in '. ' and n not in '..':
1167 1174 return _("filename ends with '%s', which is not allowed "
1168 1175 "on Windows") % t
1169 1176
1170 1177 if os.name == 'nt':
1171 1178 checkosfilename = checkwinfilename
1172 1179 else:
1173 1180 checkosfilename = platform.checkosfilename
1174 1181
1175 1182 def makelock(info, pathname):
1176 1183 try:
1177 1184 return os.symlink(info, pathname)
1178 1185 except OSError as why:
1179 1186 if why.errno == errno.EEXIST:
1180 1187 raise
1181 1188 except AttributeError: # no symlink in os
1182 1189 pass
1183 1190
1184 1191 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1185 1192 os.write(ld, info)
1186 1193 os.close(ld)
1187 1194
1188 1195 def readlock(pathname):
1189 1196 try:
1190 1197 return os.readlink(pathname)
1191 1198 except OSError as why:
1192 1199 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1193 1200 raise
1194 1201 except AttributeError: # no symlink in os
1195 1202 pass
1196 1203 fp = posixfile(pathname)
1197 1204 r = fp.read()
1198 1205 fp.close()
1199 1206 return r
1200 1207
1201 1208 def fstat(fp):
1202 1209 '''stat file object that may not have fileno method.'''
1203 1210 try:
1204 1211 return os.fstat(fp.fileno())
1205 1212 except AttributeError:
1206 1213 return os.stat(fp.name)
1207 1214
1208 1215 # File system features
1209 1216
1210 1217 def checkcase(path):
1211 1218 """
1212 1219 Return true if the given path is on a case-sensitive filesystem
1213 1220
1214 1221 Requires a path (like /foo/.hg) ending with a foldable final
1215 1222 directory component.
1216 1223 """
1217 1224 s1 = os.lstat(path)
1218 1225 d, b = os.path.split(path)
1219 1226 b2 = b.upper()
1220 1227 if b == b2:
1221 1228 b2 = b.lower()
1222 1229 if b == b2:
1223 1230 return True # no evidence against case sensitivity
1224 1231 p2 = os.path.join(d, b2)
1225 1232 try:
1226 1233 s2 = os.lstat(p2)
1227 1234 if s2 == s1:
1228 1235 return False
1229 1236 return True
1230 1237 except OSError:
1231 1238 return True
1232 1239
1233 1240 try:
1234 1241 import re2
1235 1242 _re2 = None
1236 1243 except ImportError:
1237 1244 _re2 = False
1238 1245
1239 1246 class _re(object):
1240 1247 def _checkre2(self):
1241 1248 global _re2
1242 1249 try:
1243 1250 # check if match works, see issue3964
1244 1251 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1245 1252 except ImportError:
1246 1253 _re2 = False
1247 1254
1248 1255 def compile(self, pat, flags=0):
1249 1256 '''Compile a regular expression, using re2 if possible
1250 1257
1251 1258 For best performance, use only re2-compatible regexp features. The
1252 1259 only flags from the re module that are re2-compatible are
1253 1260 IGNORECASE and MULTILINE.'''
1254 1261 if _re2 is None:
1255 1262 self._checkre2()
1256 1263 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1257 1264 if flags & remod.IGNORECASE:
1258 1265 pat = '(?i)' + pat
1259 1266 if flags & remod.MULTILINE:
1260 1267 pat = '(?m)' + pat
1261 1268 try:
1262 1269 return re2.compile(pat)
1263 1270 except re2.error:
1264 1271 pass
1265 1272 return remod.compile(pat, flags)
1266 1273
1267 1274 @propertycache
1268 1275 def escape(self):
1269 1276 '''Return the version of escape corresponding to self.compile.
1270 1277
1271 1278 This is imperfect because whether re2 or re is used for a particular
1272 1279 function depends on the flags, etc, but it's the best we can do.
1273 1280 '''
1274 1281 global _re2
1275 1282 if _re2 is None:
1276 1283 self._checkre2()
1277 1284 if _re2:
1278 1285 return re2.escape
1279 1286 else:
1280 1287 return remod.escape
1281 1288
1282 1289 re = _re()
1283 1290
1284 1291 _fspathcache = {}
1285 1292 def fspath(name, root):
1286 1293 '''Get name in the case stored in the filesystem
1287 1294
1288 1295 The name should be relative to root, and be normcase-ed for efficiency.
1289 1296
1290 1297 Note that this function is unnecessary, and should not be
1291 1298 called, for case-sensitive filesystems (simply because it's expensive).
1292 1299
1293 1300 The root should be normcase-ed, too.
1294 1301 '''
1295 1302 def _makefspathcacheentry(dir):
1296 1303 return dict((normcase(n), n) for n in os.listdir(dir))
1297 1304
1298 1305 seps = os.sep
1299 1306 if os.altsep:
1300 1307 seps = seps + os.altsep
1301 1308 # Protect backslashes. This gets silly very quickly.
1302 1309 seps.replace('\\','\\\\')
1303 1310 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1304 1311 dir = os.path.normpath(root)
1305 1312 result = []
1306 1313 for part, sep in pattern.findall(name):
1307 1314 if sep:
1308 1315 result.append(sep)
1309 1316 continue
1310 1317
1311 1318 if dir not in _fspathcache:
1312 1319 _fspathcache[dir] = _makefspathcacheentry(dir)
1313 1320 contents = _fspathcache[dir]
1314 1321
1315 1322 found = contents.get(part)
1316 1323 if not found:
1317 1324 # retry "once per directory" per "dirstate.walk" which
1318 1325 # may take place for each patches of "hg qpush", for example
1319 1326 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1320 1327 found = contents.get(part)
1321 1328
1322 1329 result.append(found or part)
1323 1330 dir = os.path.join(dir, part)
1324 1331
1325 1332 return ''.join(result)
1326 1333
1327 1334 def checknlink(testfile):
1328 1335 '''check whether hardlink count reporting works properly'''
1329 1336
1330 1337 # testfile may be open, so we need a separate file for checking to
1331 1338 # work around issue2543 (or testfile may get lost on Samba shares)
1332 1339 f1 = testfile + ".hgtmp1"
1333 1340 if os.path.lexists(f1):
1334 1341 return False
1335 1342 try:
1336 1343 posixfile(f1, 'w').close()
1337 1344 except IOError:
1338 1345 return False
1339 1346
1340 1347 f2 = testfile + ".hgtmp2"
1341 1348 fd = None
1342 1349 try:
1343 1350 oslink(f1, f2)
1344 1351 # nlinks() may behave differently for files on Windows shares if
1345 1352 # the file is open.
1346 1353 fd = posixfile(f2)
1347 1354 return nlinks(f2) > 1
1348 1355 except OSError:
1349 1356 return False
1350 1357 finally:
1351 1358 if fd is not None:
1352 1359 fd.close()
1353 1360 for f in (f1, f2):
1354 1361 try:
1355 1362 os.unlink(f)
1356 1363 except OSError:
1357 1364 pass
1358 1365
1359 1366 def endswithsep(path):
1360 1367 '''Check path ends with os.sep or os.altsep.'''
1361 1368 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1362 1369
1363 1370 def splitpath(path):
1364 1371 '''Split path by os.sep.
1365 1372 Note that this function does not use os.altsep because this is
1366 1373 an alternative of simple "xxx.split(os.sep)".
1367 1374 It is recommended to use os.path.normpath() before using this
1368 1375 function if need.'''
1369 1376 return path.split(os.sep)
1370 1377
1371 1378 def gui():
1372 1379 '''Are we running in a GUI?'''
1373 1380 if sys.platform == 'darwin':
1374 1381 if 'SSH_CONNECTION' in os.environ:
1375 1382 # handle SSH access to a box where the user is logged in
1376 1383 return False
1377 1384 elif getattr(osutil, 'isgui', None):
1378 1385 # check if a CoreGraphics session is available
1379 1386 return osutil.isgui()
1380 1387 else:
1381 1388 # pure build; use a safe default
1382 1389 return True
1383 1390 else:
1384 1391 return os.name == "nt" or os.environ.get("DISPLAY")
1385 1392
1386 1393 def mktempcopy(name, emptyok=False, createmode=None):
1387 1394 """Create a temporary file with the same contents from name
1388 1395
1389 1396 The permission bits are copied from the original file.
1390 1397
1391 1398 If the temporary file is going to be truncated immediately, you
1392 1399 can use emptyok=True as an optimization.
1393 1400
1394 1401 Returns the name of the temporary file.
1395 1402 """
1396 1403 d, fn = os.path.split(name)
1397 1404 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1398 1405 os.close(fd)
1399 1406 # Temporary files are created with mode 0600, which is usually not
1400 1407 # what we want. If the original file already exists, just copy
1401 1408 # its mode. Otherwise, manually obey umask.
1402 1409 copymode(name, temp, createmode)
1403 1410 if emptyok:
1404 1411 return temp
1405 1412 try:
1406 1413 try:
1407 1414 ifp = posixfile(name, "rb")
1408 1415 except IOError as inst:
1409 1416 if inst.errno == errno.ENOENT:
1410 1417 return temp
1411 1418 if not getattr(inst, 'filename', None):
1412 1419 inst.filename = name
1413 1420 raise
1414 1421 ofp = posixfile(temp, "wb")
1415 1422 for chunk in filechunkiter(ifp):
1416 1423 ofp.write(chunk)
1417 1424 ifp.close()
1418 1425 ofp.close()
1419 1426 except: # re-raises
1420 1427 try: os.unlink(temp)
1421 1428 except OSError: pass
1422 1429 raise
1423 1430 return temp
1424 1431
1425 1432 class filestat(object):
1426 1433 """help to exactly detect change of a file
1427 1434
1428 1435 'stat' attribute is result of 'os.stat()' if specified 'path'
1429 1436 exists. Otherwise, it is None. This can avoid preparative
1430 1437 'exists()' examination on client side of this class.
1431 1438 """
1432 1439 def __init__(self, path):
1433 1440 try:
1434 1441 self.stat = os.stat(path)
1435 1442 except OSError as err:
1436 1443 if err.errno != errno.ENOENT:
1437 1444 raise
1438 1445 self.stat = None
1439 1446
1440 1447 __hash__ = object.__hash__
1441 1448
1442 1449 def __eq__(self, old):
1443 1450 try:
1444 1451 # if ambiguity between stat of new and old file is
1445 1452 # avoided, comparision of size, ctime and mtime is enough
1446 1453 # to exactly detect change of a file regardless of platform
1447 1454 return (self.stat.st_size == old.stat.st_size and
1448 1455 self.stat.st_ctime == old.stat.st_ctime and
1449 1456 self.stat.st_mtime == old.stat.st_mtime)
1450 1457 except AttributeError:
1451 1458 return False
1452 1459
1453 1460 def isambig(self, old):
1454 1461 """Examine whether new (= self) stat is ambiguous against old one
1455 1462
1456 1463 "S[N]" below means stat of a file at N-th change:
1457 1464
1458 1465 - S[n-1].ctime < S[n].ctime: can detect change of a file
1459 1466 - S[n-1].ctime == S[n].ctime
1460 1467 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1461 1468 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1462 1469 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1463 1470 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1464 1471
1465 1472 Case (*2) above means that a file was changed twice or more at
1466 1473 same time in sec (= S[n-1].ctime), and comparison of timestamp
1467 1474 is ambiguous.
1468 1475
1469 1476 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1470 1477 timestamp is ambiguous".
1471 1478
1472 1479 But advancing mtime only in case (*2) doesn't work as
1473 1480 expected, because naturally advanced S[n].mtime in case (*1)
1474 1481 might be equal to manually advanced S[n-1 or earlier].mtime.
1475 1482
1476 1483 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1477 1484 treated as ambiguous regardless of mtime, to avoid overlooking
1478 1485 by confliction between such mtime.
1479 1486
1480 1487 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1481 1488 S[n].mtime", even if size of a file isn't changed.
1482 1489 """
1483 1490 try:
1484 1491 return (self.stat.st_ctime == old.stat.st_ctime)
1485 1492 except AttributeError:
1486 1493 return False
1487 1494
1488 1495 def __ne__(self, other):
1489 1496 return not self == other
1490 1497
1491 1498 class atomictempfile(object):
1492 1499 '''writable file object that atomically updates a file
1493 1500
1494 1501 All writes will go to a temporary copy of the original file. Call
1495 1502 close() when you are done writing, and atomictempfile will rename
1496 1503 the temporary copy to the original name, making the changes
1497 1504 visible. If the object is destroyed without being closed, all your
1498 1505 writes are discarded.
1499 1506
1500 1507 checkambig argument of constructor is used with filestat, and is
1501 1508 useful only if target file is guarded by any lock (e.g. repo.lock
1502 1509 or repo.wlock).
1503 1510 '''
1504 1511 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1505 1512 self.__name = name # permanent name
1506 1513 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1507 1514 createmode=createmode)
1508 1515 self._fp = posixfile(self._tempname, mode)
1509 1516 self._checkambig = checkambig
1510 1517
1511 1518 # delegated methods
1512 1519 self.read = self._fp.read
1513 1520 self.write = self._fp.write
1514 1521 self.seek = self._fp.seek
1515 1522 self.tell = self._fp.tell
1516 1523 self.fileno = self._fp.fileno
1517 1524
1518 1525 def close(self):
1519 1526 if not self._fp.closed:
1520 1527 self._fp.close()
1521 1528 filename = localpath(self.__name)
1522 1529 oldstat = self._checkambig and filestat(filename)
1523 1530 if oldstat and oldstat.stat:
1524 1531 rename(self._tempname, filename)
1525 1532 newstat = filestat(filename)
1526 1533 if newstat.isambig(oldstat):
1527 1534 # stat of changed file is ambiguous to original one
1528 1535 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1529 1536 os.utime(filename, (advanced, advanced))
1530 1537 else:
1531 1538 rename(self._tempname, filename)
1532 1539
1533 1540 def discard(self):
1534 1541 if not self._fp.closed:
1535 1542 try:
1536 1543 os.unlink(self._tempname)
1537 1544 except OSError:
1538 1545 pass
1539 1546 self._fp.close()
1540 1547
1541 1548 def __del__(self):
1542 1549 if safehasattr(self, '_fp'): # constructor actually did something
1543 1550 self.discard()
1544 1551
1545 1552 def __enter__(self):
1546 1553 return self
1547 1554
1548 1555 def __exit__(self, exctype, excvalue, traceback):
1549 1556 if exctype is not None:
1550 1557 self.discard()
1551 1558 else:
1552 1559 self.close()
1553 1560
1554 1561 def makedirs(name, mode=None, notindexed=False):
1555 1562 """recursive directory creation with parent mode inheritance
1556 1563
1557 1564 Newly created directories are marked as "not to be indexed by
1558 1565 the content indexing service", if ``notindexed`` is specified
1559 1566 for "write" mode access.
1560 1567 """
1561 1568 try:
1562 1569 makedir(name, notindexed)
1563 1570 except OSError as err:
1564 1571 if err.errno == errno.EEXIST:
1565 1572 return
1566 1573 if err.errno != errno.ENOENT or not name:
1567 1574 raise
1568 1575 parent = os.path.dirname(os.path.abspath(name))
1569 1576 if parent == name:
1570 1577 raise
1571 1578 makedirs(parent, mode, notindexed)
1572 1579 try:
1573 1580 makedir(name, notindexed)
1574 1581 except OSError as err:
1575 1582 # Catch EEXIST to handle races
1576 1583 if err.errno == errno.EEXIST:
1577 1584 return
1578 1585 raise
1579 1586 if mode is not None:
1580 1587 os.chmod(name, mode)
1581 1588
1582 1589 def readfile(path):
1583 1590 with open(path, 'rb') as fp:
1584 1591 return fp.read()
1585 1592
1586 1593 def writefile(path, text):
1587 1594 with open(path, 'wb') as fp:
1588 1595 fp.write(text)
1589 1596
1590 1597 def appendfile(path, text):
1591 1598 with open(path, 'ab') as fp:
1592 1599 fp.write(text)
1593 1600
1594 1601 class chunkbuffer(object):
1595 1602 """Allow arbitrary sized chunks of data to be efficiently read from an
1596 1603 iterator over chunks of arbitrary size."""
1597 1604
1598 1605 def __init__(self, in_iter):
1599 1606 """in_iter is the iterator that's iterating over the input chunks.
1600 1607 targetsize is how big a buffer to try to maintain."""
1601 1608 def splitbig(chunks):
1602 1609 for chunk in chunks:
1603 1610 if len(chunk) > 2**20:
1604 1611 pos = 0
1605 1612 while pos < len(chunk):
1606 1613 end = pos + 2 ** 18
1607 1614 yield chunk[pos:end]
1608 1615 pos = end
1609 1616 else:
1610 1617 yield chunk
1611 1618 self.iter = splitbig(in_iter)
1612 1619 self._queue = collections.deque()
1613 1620 self._chunkoffset = 0
1614 1621
1615 1622 def read(self, l=None):
1616 1623 """Read L bytes of data from the iterator of chunks of data.
1617 1624 Returns less than L bytes if the iterator runs dry.
1618 1625
1619 1626 If size parameter is omitted, read everything"""
1620 1627 if l is None:
1621 1628 return ''.join(self.iter)
1622 1629
1623 1630 left = l
1624 1631 buf = []
1625 1632 queue = self._queue
1626 1633 while left > 0:
1627 1634 # refill the queue
1628 1635 if not queue:
1629 1636 target = 2**18
1630 1637 for chunk in self.iter:
1631 1638 queue.append(chunk)
1632 1639 target -= len(chunk)
1633 1640 if target <= 0:
1634 1641 break
1635 1642 if not queue:
1636 1643 break
1637 1644
1638 1645 # The easy way to do this would be to queue.popleft(), modify the
1639 1646 # chunk (if necessary), then queue.appendleft(). However, for cases
1640 1647 # where we read partial chunk content, this incurs 2 dequeue
1641 1648 # mutations and creates a new str for the remaining chunk in the
1642 1649 # queue. Our code below avoids this overhead.
1643 1650
1644 1651 chunk = queue[0]
1645 1652 chunkl = len(chunk)
1646 1653 offset = self._chunkoffset
1647 1654
1648 1655 # Use full chunk.
1649 1656 if offset == 0 and left >= chunkl:
1650 1657 left -= chunkl
1651 1658 queue.popleft()
1652 1659 buf.append(chunk)
1653 1660 # self._chunkoffset remains at 0.
1654 1661 continue
1655 1662
1656 1663 chunkremaining = chunkl - offset
1657 1664
1658 1665 # Use all of unconsumed part of chunk.
1659 1666 if left >= chunkremaining:
1660 1667 left -= chunkremaining
1661 1668 queue.popleft()
1662 1669 # offset == 0 is enabled by block above, so this won't merely
1663 1670 # copy via ``chunk[0:]``.
1664 1671 buf.append(chunk[offset:])
1665 1672 self._chunkoffset = 0
1666 1673
1667 1674 # Partial chunk needed.
1668 1675 else:
1669 1676 buf.append(chunk[offset:offset + left])
1670 1677 self._chunkoffset += left
1671 1678 left -= chunkremaining
1672 1679
1673 1680 return ''.join(buf)
1674 1681
1675 1682 def filechunkiter(f, size=65536, limit=None):
1676 1683 """Create a generator that produces the data in the file size
1677 1684 (default 65536) bytes at a time, up to optional limit (default is
1678 1685 to read all data). Chunks may be less than size bytes if the
1679 1686 chunk is the last chunk in the file, or the file is a socket or
1680 1687 some other type of file that sometimes reads less data than is
1681 1688 requested."""
1682 1689 assert size >= 0
1683 1690 assert limit is None or limit >= 0
1684 1691 while True:
1685 1692 if limit is None:
1686 1693 nbytes = size
1687 1694 else:
1688 1695 nbytes = min(limit, size)
1689 1696 s = nbytes and f.read(nbytes)
1690 1697 if not s:
1691 1698 break
1692 1699 if limit:
1693 1700 limit -= len(s)
1694 1701 yield s
1695 1702
1696 1703 def makedate(timestamp=None):
1697 1704 '''Return a unix timestamp (or the current time) as a (unixtime,
1698 1705 offset) tuple based off the local timezone.'''
1699 1706 if timestamp is None:
1700 1707 timestamp = time.time()
1701 1708 if timestamp < 0:
1702 1709 hint = _("check your clock")
1703 1710 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1704 1711 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1705 1712 datetime.datetime.fromtimestamp(timestamp))
1706 1713 tz = delta.days * 86400 + delta.seconds
1707 1714 return timestamp, tz
1708 1715
1709 1716 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1710 1717 """represent a (unixtime, offset) tuple as a localized time.
1711 1718 unixtime is seconds since the epoch, and offset is the time zone's
1712 1719 number of seconds away from UTC.
1713 1720
1714 1721 >>> datestr((0, 0))
1715 1722 'Thu Jan 01 00:00:00 1970 +0000'
1716 1723 >>> datestr((42, 0))
1717 1724 'Thu Jan 01 00:00:42 1970 +0000'
1718 1725 >>> datestr((-42, 0))
1719 1726 'Wed Dec 31 23:59:18 1969 +0000'
1720 1727 >>> datestr((0x7fffffff, 0))
1721 1728 'Tue Jan 19 03:14:07 2038 +0000'
1722 1729 >>> datestr((-0x80000000, 0))
1723 1730 'Fri Dec 13 20:45:52 1901 +0000'
1724 1731 """
1725 1732 t, tz = date or makedate()
1726 1733 if "%1" in format or "%2" in format or "%z" in format:
1727 1734 sign = (tz > 0) and "-" or "+"
1728 1735 minutes = abs(tz) // 60
1729 1736 q, r = divmod(minutes, 60)
1730 1737 format = format.replace("%z", "%1%2")
1731 1738 format = format.replace("%1", "%c%02d" % (sign, q))
1732 1739 format = format.replace("%2", "%02d" % r)
1733 1740 d = t - tz
1734 1741 if d > 0x7fffffff:
1735 1742 d = 0x7fffffff
1736 1743 elif d < -0x80000000:
1737 1744 d = -0x80000000
1738 1745 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1739 1746 # because they use the gmtime() system call which is buggy on Windows
1740 1747 # for negative values.
1741 1748 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1742 1749 s = t.strftime(format)
1743 1750 return s
1744 1751
1745 1752 def shortdate(date=None):
1746 1753 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1747 1754 return datestr(date, format='%Y-%m-%d')
1748 1755
1749 1756 def parsetimezone(s):
1750 1757 """find a trailing timezone, if any, in string, and return a
1751 1758 (offset, remainder) pair"""
1752 1759
1753 1760 if s.endswith("GMT") or s.endswith("UTC"):
1754 1761 return 0, s[:-3].rstrip()
1755 1762
1756 1763 # Unix-style timezones [+-]hhmm
1757 1764 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1758 1765 sign = (s[-5] == "+") and 1 or -1
1759 1766 hours = int(s[-4:-2])
1760 1767 minutes = int(s[-2:])
1761 1768 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1762 1769
1763 1770 # ISO8601 trailing Z
1764 1771 if s.endswith("Z") and s[-2:-1].isdigit():
1765 1772 return 0, s[:-1]
1766 1773
1767 1774 # ISO8601-style [+-]hh:mm
1768 1775 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1769 1776 s[-5:-3].isdigit() and s[-2:].isdigit()):
1770 1777 sign = (s[-6] == "+") and 1 or -1
1771 1778 hours = int(s[-5:-3])
1772 1779 minutes = int(s[-2:])
1773 1780 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1774 1781
1775 1782 return None, s
1776 1783
1777 1784 def strdate(string, format, defaults=[]):
1778 1785 """parse a localized time string and return a (unixtime, offset) tuple.
1779 1786 if the string cannot be parsed, ValueError is raised."""
1780 1787 # NOTE: unixtime = localunixtime + offset
1781 1788 offset, date = parsetimezone(string)
1782 1789
1783 1790 # add missing elements from defaults
1784 1791 usenow = False # default to using biased defaults
1785 1792 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1786 1793 found = [True for p in part if ("%"+p) in format]
1787 1794 if not found:
1788 1795 date += "@" + defaults[part][usenow]
1789 1796 format += "@%" + part[0]
1790 1797 else:
1791 1798 # We've found a specific time element, less specific time
1792 1799 # elements are relative to today
1793 1800 usenow = True
1794 1801
1795 1802 timetuple = time.strptime(date, format)
1796 1803 localunixtime = int(calendar.timegm(timetuple))
1797 1804 if offset is None:
1798 1805 # local timezone
1799 1806 unixtime = int(time.mktime(timetuple))
1800 1807 offset = unixtime - localunixtime
1801 1808 else:
1802 1809 unixtime = localunixtime + offset
1803 1810 return unixtime, offset
1804 1811
1805 1812 def parsedate(date, formats=None, bias=None):
1806 1813 """parse a localized date/time and return a (unixtime, offset) tuple.
1807 1814
1808 1815 The date may be a "unixtime offset" string or in one of the specified
1809 1816 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1810 1817
1811 1818 >>> parsedate(' today ') == parsedate(\
1812 1819 datetime.date.today().strftime('%b %d'))
1813 1820 True
1814 1821 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1815 1822 datetime.timedelta(days=1)\
1816 1823 ).strftime('%b %d'))
1817 1824 True
1818 1825 >>> now, tz = makedate()
1819 1826 >>> strnow, strtz = parsedate('now')
1820 1827 >>> (strnow - now) < 1
1821 1828 True
1822 1829 >>> tz == strtz
1823 1830 True
1824 1831 """
1825 1832 if bias is None:
1826 1833 bias = {}
1827 1834 if not date:
1828 1835 return 0, 0
1829 1836 if isinstance(date, tuple) and len(date) == 2:
1830 1837 return date
1831 1838 if not formats:
1832 1839 formats = defaultdateformats
1833 1840 date = date.strip()
1834 1841
1835 1842 if date == 'now' or date == _('now'):
1836 1843 return makedate()
1837 1844 if date == 'today' or date == _('today'):
1838 1845 date = datetime.date.today().strftime('%b %d')
1839 1846 elif date == 'yesterday' or date == _('yesterday'):
1840 1847 date = (datetime.date.today() -
1841 1848 datetime.timedelta(days=1)).strftime('%b %d')
1842 1849
1843 1850 try:
1844 1851 when, offset = map(int, date.split(' '))
1845 1852 except ValueError:
1846 1853 # fill out defaults
1847 1854 now = makedate()
1848 1855 defaults = {}
1849 1856 for part in ("d", "mb", "yY", "HI", "M", "S"):
1850 1857 # this piece is for rounding the specific end of unknowns
1851 1858 b = bias.get(part)
1852 1859 if b is None:
1853 1860 if part[0] in "HMS":
1854 1861 b = "00"
1855 1862 else:
1856 1863 b = "0"
1857 1864
1858 1865 # this piece is for matching the generic end to today's date
1859 1866 n = datestr(now, "%" + part[0])
1860 1867
1861 1868 defaults[part] = (b, n)
1862 1869
1863 1870 for format in formats:
1864 1871 try:
1865 1872 when, offset = strdate(date, format, defaults)
1866 1873 except (ValueError, OverflowError):
1867 1874 pass
1868 1875 else:
1869 1876 break
1870 1877 else:
1871 1878 raise Abort(_('invalid date: %r') % date)
1872 1879 # validate explicit (probably user-specified) date and
1873 1880 # time zone offset. values must fit in signed 32 bits for
1874 1881 # current 32-bit linux runtimes. timezones go from UTC-12
1875 1882 # to UTC+14
1876 1883 if when < -0x80000000 or when > 0x7fffffff:
1877 1884 raise Abort(_('date exceeds 32 bits: %d') % when)
1878 1885 if offset < -50400 or offset > 43200:
1879 1886 raise Abort(_('impossible time zone offset: %d') % offset)
1880 1887 return when, offset
1881 1888
1882 1889 def matchdate(date):
1883 1890 """Return a function that matches a given date match specifier
1884 1891
1885 1892 Formats include:
1886 1893
1887 1894 '{date}' match a given date to the accuracy provided
1888 1895
1889 1896 '<{date}' on or before a given date
1890 1897
1891 1898 '>{date}' on or after a given date
1892 1899
1893 1900 >>> p1 = parsedate("10:29:59")
1894 1901 >>> p2 = parsedate("10:30:00")
1895 1902 >>> p3 = parsedate("10:30:59")
1896 1903 >>> p4 = parsedate("10:31:00")
1897 1904 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1898 1905 >>> f = matchdate("10:30")
1899 1906 >>> f(p1[0])
1900 1907 False
1901 1908 >>> f(p2[0])
1902 1909 True
1903 1910 >>> f(p3[0])
1904 1911 True
1905 1912 >>> f(p4[0])
1906 1913 False
1907 1914 >>> f(p5[0])
1908 1915 False
1909 1916 """
1910 1917
1911 1918 def lower(date):
1912 1919 d = {'mb': "1", 'd': "1"}
1913 1920 return parsedate(date, extendeddateformats, d)[0]
1914 1921
1915 1922 def upper(date):
1916 1923 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1917 1924 for days in ("31", "30", "29"):
1918 1925 try:
1919 1926 d["d"] = days
1920 1927 return parsedate(date, extendeddateformats, d)[0]
1921 1928 except Abort:
1922 1929 pass
1923 1930 d["d"] = "28"
1924 1931 return parsedate(date, extendeddateformats, d)[0]
1925 1932
1926 1933 date = date.strip()
1927 1934
1928 1935 if not date:
1929 1936 raise Abort(_("dates cannot consist entirely of whitespace"))
1930 1937 elif date[0] == "<":
1931 1938 if not date[1:]:
1932 1939 raise Abort(_("invalid day spec, use '<DATE'"))
1933 1940 when = upper(date[1:])
1934 1941 return lambda x: x <= when
1935 1942 elif date[0] == ">":
1936 1943 if not date[1:]:
1937 1944 raise Abort(_("invalid day spec, use '>DATE'"))
1938 1945 when = lower(date[1:])
1939 1946 return lambda x: x >= when
1940 1947 elif date[0] == "-":
1941 1948 try:
1942 1949 days = int(date[1:])
1943 1950 except ValueError:
1944 1951 raise Abort(_("invalid day spec: %s") % date[1:])
1945 1952 if days < 0:
1946 1953 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1947 1954 % date[1:])
1948 1955 when = makedate()[0] - days * 3600 * 24
1949 1956 return lambda x: x >= when
1950 1957 elif " to " in date:
1951 1958 a, b = date.split(" to ")
1952 1959 start, stop = lower(a), upper(b)
1953 1960 return lambda x: x >= start and x <= stop
1954 1961 else:
1955 1962 start, stop = lower(date), upper(date)
1956 1963 return lambda x: x >= start and x <= stop
1957 1964
1958 1965 def stringmatcher(pattern):
1959 1966 """
1960 1967 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1961 1968 returns the matcher name, pattern, and matcher function.
1962 1969 missing or unknown prefixes are treated as literal matches.
1963 1970
1964 1971 helper for tests:
1965 1972 >>> def test(pattern, *tests):
1966 1973 ... kind, pattern, matcher = stringmatcher(pattern)
1967 1974 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1968 1975
1969 1976 exact matching (no prefix):
1970 1977 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1971 1978 ('literal', 'abcdefg', [False, False, True])
1972 1979
1973 1980 regex matching ('re:' prefix)
1974 1981 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1975 1982 ('re', 'a.+b', [False, False, True])
1976 1983
1977 1984 force exact matches ('literal:' prefix)
1978 1985 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1979 1986 ('literal', 're:foobar', [False, True])
1980 1987
1981 1988 unknown prefixes are ignored and treated as literals
1982 1989 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1983 1990 ('literal', 'foo:bar', [False, False, True])
1984 1991 """
1985 1992 if pattern.startswith('re:'):
1986 1993 pattern = pattern[3:]
1987 1994 try:
1988 1995 regex = remod.compile(pattern)
1989 1996 except remod.error as e:
1990 1997 raise error.ParseError(_('invalid regular expression: %s')
1991 1998 % e)
1992 1999 return 're', pattern, regex.search
1993 2000 elif pattern.startswith('literal:'):
1994 2001 pattern = pattern[8:]
1995 2002 return 'literal', pattern, pattern.__eq__
1996 2003
1997 2004 def shortuser(user):
1998 2005 """Return a short representation of a user name or email address."""
1999 2006 f = user.find('@')
2000 2007 if f >= 0:
2001 2008 user = user[:f]
2002 2009 f = user.find('<')
2003 2010 if f >= 0:
2004 2011 user = user[f + 1:]
2005 2012 f = user.find(' ')
2006 2013 if f >= 0:
2007 2014 user = user[:f]
2008 2015 f = user.find('.')
2009 2016 if f >= 0:
2010 2017 user = user[:f]
2011 2018 return user
2012 2019
2013 2020 def emailuser(user):
2014 2021 """Return the user portion of an email address."""
2015 2022 f = user.find('@')
2016 2023 if f >= 0:
2017 2024 user = user[:f]
2018 2025 f = user.find('<')
2019 2026 if f >= 0:
2020 2027 user = user[f + 1:]
2021 2028 return user
2022 2029
2023 2030 def email(author):
2024 2031 '''get email of author.'''
2025 2032 r = author.find('>')
2026 2033 if r == -1:
2027 2034 r = None
2028 2035 return author[author.find('<') + 1:r]
2029 2036
2030 2037 def ellipsis(text, maxlength=400):
2031 2038 """Trim string to at most maxlength (default: 400) columns in display."""
2032 2039 return encoding.trim(text, maxlength, ellipsis='...')
2033 2040
2034 2041 def unitcountfn(*unittable):
2035 2042 '''return a function that renders a readable count of some quantity'''
2036 2043
2037 2044 def go(count):
2038 2045 for multiplier, divisor, format in unittable:
2039 2046 if count >= divisor * multiplier:
2040 2047 return format % (count / float(divisor))
2041 2048 return unittable[-1][2] % count
2042 2049
2043 2050 return go
2044 2051
2045 2052 bytecount = unitcountfn(
2046 2053 (100, 1 << 30, _('%.0f GB')),
2047 2054 (10, 1 << 30, _('%.1f GB')),
2048 2055 (1, 1 << 30, _('%.2f GB')),
2049 2056 (100, 1 << 20, _('%.0f MB')),
2050 2057 (10, 1 << 20, _('%.1f MB')),
2051 2058 (1, 1 << 20, _('%.2f MB')),
2052 2059 (100, 1 << 10, _('%.0f KB')),
2053 2060 (10, 1 << 10, _('%.1f KB')),
2054 2061 (1, 1 << 10, _('%.2f KB')),
2055 2062 (1, 1, _('%.0f bytes')),
2056 2063 )
2057 2064
2058 2065 def uirepr(s):
2059 2066 # Avoid double backslash in Windows path repr()
2060 2067 return repr(s).replace('\\\\', '\\')
2061 2068
2062 2069 # delay import of textwrap
2063 2070 def MBTextWrapper(**kwargs):
2064 2071 class tw(textwrap.TextWrapper):
2065 2072 """
2066 2073 Extend TextWrapper for width-awareness.
2067 2074
2068 2075 Neither number of 'bytes' in any encoding nor 'characters' is
2069 2076 appropriate to calculate terminal columns for specified string.
2070 2077
2071 2078 Original TextWrapper implementation uses built-in 'len()' directly,
2072 2079 so overriding is needed to use width information of each characters.
2073 2080
2074 2081 In addition, characters classified into 'ambiguous' width are
2075 2082 treated as wide in East Asian area, but as narrow in other.
2076 2083
2077 2084 This requires use decision to determine width of such characters.
2078 2085 """
2079 2086 def _cutdown(self, ucstr, space_left):
2080 2087 l = 0
2081 2088 colwidth = encoding.ucolwidth
2082 2089 for i in xrange(len(ucstr)):
2083 2090 l += colwidth(ucstr[i])
2084 2091 if space_left < l:
2085 2092 return (ucstr[:i], ucstr[i:])
2086 2093 return ucstr, ''
2087 2094
2088 2095 # overriding of base class
2089 2096 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2090 2097 space_left = max(width - cur_len, 1)
2091 2098
2092 2099 if self.break_long_words:
2093 2100 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2094 2101 cur_line.append(cut)
2095 2102 reversed_chunks[-1] = res
2096 2103 elif not cur_line:
2097 2104 cur_line.append(reversed_chunks.pop())
2098 2105
2099 2106 # this overriding code is imported from TextWrapper of Python 2.6
2100 2107 # to calculate columns of string by 'encoding.ucolwidth()'
2101 2108 def _wrap_chunks(self, chunks):
2102 2109 colwidth = encoding.ucolwidth
2103 2110
2104 2111 lines = []
2105 2112 if self.width <= 0:
2106 2113 raise ValueError("invalid width %r (must be > 0)" % self.width)
2107 2114
2108 2115 # Arrange in reverse order so items can be efficiently popped
2109 2116 # from a stack of chucks.
2110 2117 chunks.reverse()
2111 2118
2112 2119 while chunks:
2113 2120
2114 2121 # Start the list of chunks that will make up the current line.
2115 2122 # cur_len is just the length of all the chunks in cur_line.
2116 2123 cur_line = []
2117 2124 cur_len = 0
2118 2125
2119 2126 # Figure out which static string will prefix this line.
2120 2127 if lines:
2121 2128 indent = self.subsequent_indent
2122 2129 else:
2123 2130 indent = self.initial_indent
2124 2131
2125 2132 # Maximum width for this line.
2126 2133 width = self.width - len(indent)
2127 2134
2128 2135 # First chunk on line is whitespace -- drop it, unless this
2129 2136 # is the very beginning of the text (i.e. no lines started yet).
2130 2137 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2131 2138 del chunks[-1]
2132 2139
2133 2140 while chunks:
2134 2141 l = colwidth(chunks[-1])
2135 2142
2136 2143 # Can at least squeeze this chunk onto the current line.
2137 2144 if cur_len + l <= width:
2138 2145 cur_line.append(chunks.pop())
2139 2146 cur_len += l
2140 2147
2141 2148 # Nope, this line is full.
2142 2149 else:
2143 2150 break
2144 2151
2145 2152 # The current line is full, and the next chunk is too big to
2146 2153 # fit on *any* line (not just this one).
2147 2154 if chunks and colwidth(chunks[-1]) > width:
2148 2155 self._handle_long_word(chunks, cur_line, cur_len, width)
2149 2156
2150 2157 # If the last chunk on this line is all whitespace, drop it.
2151 2158 if (self.drop_whitespace and
2152 2159 cur_line and cur_line[-1].strip() == ''):
2153 2160 del cur_line[-1]
2154 2161
2155 2162 # Convert current line back to a string and store it in list
2156 2163 # of all lines (return value).
2157 2164 if cur_line:
2158 2165 lines.append(indent + ''.join(cur_line))
2159 2166
2160 2167 return lines
2161 2168
2162 2169 global MBTextWrapper
2163 2170 MBTextWrapper = tw
2164 2171 return tw(**kwargs)
2165 2172
2166 2173 def wrap(line, width, initindent='', hangindent=''):
2167 2174 maxindent = max(len(hangindent), len(initindent))
2168 2175 if width <= maxindent:
2169 2176 # adjust for weird terminal size
2170 2177 width = max(78, maxindent + 1)
2171 2178 line = line.decode(encoding.encoding, encoding.encodingmode)
2172 2179 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2173 2180 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2174 2181 wrapper = MBTextWrapper(width=width,
2175 2182 initial_indent=initindent,
2176 2183 subsequent_indent=hangindent)
2177 2184 return wrapper.fill(line).encode(encoding.encoding)
2178 2185
2179 2186 def iterlines(iterator):
2180 2187 for chunk in iterator:
2181 2188 for line in chunk.splitlines():
2182 2189 yield line
2183 2190
2184 2191 def expandpath(path):
2185 2192 return os.path.expanduser(os.path.expandvars(path))
2186 2193
2187 2194 def hgcmd():
2188 2195 """Return the command used to execute current hg
2189 2196
2190 2197 This is different from hgexecutable() because on Windows we want
2191 2198 to avoid things opening new shell windows like batch files, so we
2192 2199 get either the python call or current executable.
2193 2200 """
2194 2201 if mainfrozen():
2195 2202 if getattr(sys, 'frozen', None) == 'macosx_app':
2196 2203 # Env variable set by py2app
2197 2204 return [os.environ['EXECUTABLEPATH']]
2198 2205 else:
2199 2206 return [sys.executable]
2200 2207 return gethgcmd()
2201 2208
2202 2209 def rundetached(args, condfn):
2203 2210 """Execute the argument list in a detached process.
2204 2211
2205 2212 condfn is a callable which is called repeatedly and should return
2206 2213 True once the child process is known to have started successfully.
2207 2214 At this point, the child process PID is returned. If the child
2208 2215 process fails to start or finishes before condfn() evaluates to
2209 2216 True, return -1.
2210 2217 """
2211 2218 # Windows case is easier because the child process is either
2212 2219 # successfully starting and validating the condition or exiting
2213 2220 # on failure. We just poll on its PID. On Unix, if the child
2214 2221 # process fails to start, it will be left in a zombie state until
2215 2222 # the parent wait on it, which we cannot do since we expect a long
2216 2223 # running process on success. Instead we listen for SIGCHLD telling
2217 2224 # us our child process terminated.
2218 2225 terminated = set()
2219 2226 def handler(signum, frame):
2220 2227 terminated.add(os.wait())
2221 2228 prevhandler = None
2222 2229 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2223 2230 if SIGCHLD is not None:
2224 2231 prevhandler = signal.signal(SIGCHLD, handler)
2225 2232 try:
2226 2233 pid = spawndetached(args)
2227 2234 while not condfn():
2228 2235 if ((pid in terminated or not testpid(pid))
2229 2236 and not condfn()):
2230 2237 return -1
2231 2238 time.sleep(0.1)
2232 2239 return pid
2233 2240 finally:
2234 2241 if prevhandler is not None:
2235 2242 signal.signal(signal.SIGCHLD, prevhandler)
2236 2243
2237 2244 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2238 2245 """Return the result of interpolating items in the mapping into string s.
2239 2246
2240 2247 prefix is a single character string, or a two character string with
2241 2248 a backslash as the first character if the prefix needs to be escaped in
2242 2249 a regular expression.
2243 2250
2244 2251 fn is an optional function that will be applied to the replacement text
2245 2252 just before replacement.
2246 2253
2247 2254 escape_prefix is an optional flag that allows using doubled prefix for
2248 2255 its escaping.
2249 2256 """
2250 2257 fn = fn or (lambda s: s)
2251 2258 patterns = '|'.join(mapping.keys())
2252 2259 if escape_prefix:
2253 2260 patterns += '|' + prefix
2254 2261 if len(prefix) > 1:
2255 2262 prefix_char = prefix[1:]
2256 2263 else:
2257 2264 prefix_char = prefix
2258 2265 mapping[prefix_char] = prefix_char
2259 2266 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2260 2267 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2261 2268
2262 2269 def getport(port):
2263 2270 """Return the port for a given network service.
2264 2271
2265 2272 If port is an integer, it's returned as is. If it's a string, it's
2266 2273 looked up using socket.getservbyname(). If there's no matching
2267 2274 service, error.Abort is raised.
2268 2275 """
2269 2276 try:
2270 2277 return int(port)
2271 2278 except ValueError:
2272 2279 pass
2273 2280
2274 2281 try:
2275 2282 return socket.getservbyname(port)
2276 2283 except socket.error:
2277 2284 raise Abort(_("no port number associated with service '%s'") % port)
2278 2285
2279 2286 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2280 2287 '0': False, 'no': False, 'false': False, 'off': False,
2281 2288 'never': False}
2282 2289
2283 2290 def parsebool(s):
2284 2291 """Parse s into a boolean.
2285 2292
2286 2293 If s is not a valid boolean, returns None.
2287 2294 """
2288 2295 return _booleans.get(s.lower(), None)
2289 2296
2290 2297 _hexdig = '0123456789ABCDEFabcdef'
2291 2298 _hextochr = dict((a + b, chr(int(a + b, 16)))
2292 2299 for a in _hexdig for b in _hexdig)
2293 2300
2294 2301 def _urlunquote(s):
2295 2302 """Decode HTTP/HTML % encoding.
2296 2303
2297 2304 >>> _urlunquote('abc%20def')
2298 2305 'abc def'
2299 2306 """
2300 2307 res = s.split('%')
2301 2308 # fastpath
2302 2309 if len(res) == 1:
2303 2310 return s
2304 2311 s = res[0]
2305 2312 for item in res[1:]:
2306 2313 try:
2307 2314 s += _hextochr[item[:2]] + item[2:]
2308 2315 except KeyError:
2309 2316 s += '%' + item
2310 2317 except UnicodeDecodeError:
2311 2318 s += unichr(int(item[:2], 16)) + item[2:]
2312 2319 return s
2313 2320
2314 2321 class url(object):
2315 2322 r"""Reliable URL parser.
2316 2323
2317 2324 This parses URLs and provides attributes for the following
2318 2325 components:
2319 2326
2320 2327 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2321 2328
2322 2329 Missing components are set to None. The only exception is
2323 2330 fragment, which is set to '' if present but empty.
2324 2331
2325 2332 If parsefragment is False, fragment is included in query. If
2326 2333 parsequery is False, query is included in path. If both are
2327 2334 False, both fragment and query are included in path.
2328 2335
2329 2336 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2330 2337
2331 2338 Note that for backward compatibility reasons, bundle URLs do not
2332 2339 take host names. That means 'bundle://../' has a path of '../'.
2333 2340
2334 2341 Examples:
2335 2342
2336 2343 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2337 2344 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2338 2345 >>> url('ssh://[::1]:2200//home/joe/repo')
2339 2346 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2340 2347 >>> url('file:///home/joe/repo')
2341 2348 <url scheme: 'file', path: '/home/joe/repo'>
2342 2349 >>> url('file:///c:/temp/foo/')
2343 2350 <url scheme: 'file', path: 'c:/temp/foo/'>
2344 2351 >>> url('bundle:foo')
2345 2352 <url scheme: 'bundle', path: 'foo'>
2346 2353 >>> url('bundle://../foo')
2347 2354 <url scheme: 'bundle', path: '../foo'>
2348 2355 >>> url(r'c:\foo\bar')
2349 2356 <url path: 'c:\\foo\\bar'>
2350 2357 >>> url(r'\\blah\blah\blah')
2351 2358 <url path: '\\\\blah\\blah\\blah'>
2352 2359 >>> url(r'\\blah\blah\blah#baz')
2353 2360 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2354 2361 >>> url(r'file:///C:\users\me')
2355 2362 <url scheme: 'file', path: 'C:\\users\\me'>
2356 2363
2357 2364 Authentication credentials:
2358 2365
2359 2366 >>> url('ssh://joe:xyz@x/repo')
2360 2367 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2361 2368 >>> url('ssh://joe@x/repo')
2362 2369 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2363 2370
2364 2371 Query strings and fragments:
2365 2372
2366 2373 >>> url('http://host/a?b#c')
2367 2374 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2368 2375 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2369 2376 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2370 2377 """
2371 2378
2372 2379 _safechars = "!~*'()+"
2373 2380 _safepchars = "/!~*'()+:\\"
2374 2381 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2375 2382
2376 2383 def __init__(self, path, parsequery=True, parsefragment=True):
2377 2384 # We slowly chomp away at path until we have only the path left
2378 2385 self.scheme = self.user = self.passwd = self.host = None
2379 2386 self.port = self.path = self.query = self.fragment = None
2380 2387 self._localpath = True
2381 2388 self._hostport = ''
2382 2389 self._origpath = path
2383 2390
2384 2391 if parsefragment and '#' in path:
2385 2392 path, self.fragment = path.split('#', 1)
2386 2393 if not path:
2387 2394 path = None
2388 2395
2389 2396 # special case for Windows drive letters and UNC paths
2390 2397 if hasdriveletter(path) or path.startswith(r'\\'):
2391 2398 self.path = path
2392 2399 return
2393 2400
2394 2401 # For compatibility reasons, we can't handle bundle paths as
2395 2402 # normal URLS
2396 2403 if path.startswith('bundle:'):
2397 2404 self.scheme = 'bundle'
2398 2405 path = path[7:]
2399 2406 if path.startswith('//'):
2400 2407 path = path[2:]
2401 2408 self.path = path
2402 2409 return
2403 2410
2404 2411 if self._matchscheme(path):
2405 2412 parts = path.split(':', 1)
2406 2413 if parts[0]:
2407 2414 self.scheme, path = parts
2408 2415 self._localpath = False
2409 2416
2410 2417 if not path:
2411 2418 path = None
2412 2419 if self._localpath:
2413 2420 self.path = ''
2414 2421 return
2415 2422 else:
2416 2423 if self._localpath:
2417 2424 self.path = path
2418 2425 return
2419 2426
2420 2427 if parsequery and '?' in path:
2421 2428 path, self.query = path.split('?', 1)
2422 2429 if not path:
2423 2430 path = None
2424 2431 if not self.query:
2425 2432 self.query = None
2426 2433
2427 2434 # // is required to specify a host/authority
2428 2435 if path and path.startswith('//'):
2429 2436 parts = path[2:].split('/', 1)
2430 2437 if len(parts) > 1:
2431 2438 self.host, path = parts
2432 2439 else:
2433 2440 self.host = parts[0]
2434 2441 path = None
2435 2442 if not self.host:
2436 2443 self.host = None
2437 2444 # path of file:///d is /d
2438 2445 # path of file:///d:/ is d:/, not /d:/
2439 2446 if path and not hasdriveletter(path):
2440 2447 path = '/' + path
2441 2448
2442 2449 if self.host and '@' in self.host:
2443 2450 self.user, self.host = self.host.rsplit('@', 1)
2444 2451 if ':' in self.user:
2445 2452 self.user, self.passwd = self.user.split(':', 1)
2446 2453 if not self.host:
2447 2454 self.host = None
2448 2455
2449 2456 # Don't split on colons in IPv6 addresses without ports
2450 2457 if (self.host and ':' in self.host and
2451 2458 not (self.host.startswith('[') and self.host.endswith(']'))):
2452 2459 self._hostport = self.host
2453 2460 self.host, self.port = self.host.rsplit(':', 1)
2454 2461 if not self.host:
2455 2462 self.host = None
2456 2463
2457 2464 if (self.host and self.scheme == 'file' and
2458 2465 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2459 2466 raise Abort(_('file:// URLs can only refer to localhost'))
2460 2467
2461 2468 self.path = path
2462 2469
2463 2470 # leave the query string escaped
2464 2471 for a in ('user', 'passwd', 'host', 'port',
2465 2472 'path', 'fragment'):
2466 2473 v = getattr(self, a)
2467 2474 if v is not None:
2468 2475 setattr(self, a, _urlunquote(v))
2469 2476
2470 2477 def __repr__(self):
2471 2478 attrs = []
2472 2479 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2473 2480 'query', 'fragment'):
2474 2481 v = getattr(self, a)
2475 2482 if v is not None:
2476 2483 attrs.append('%s: %r' % (a, v))
2477 2484 return '<url %s>' % ', '.join(attrs)
2478 2485
2479 2486 def __str__(self):
2480 2487 r"""Join the URL's components back into a URL string.
2481 2488
2482 2489 Examples:
2483 2490
2484 2491 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2485 2492 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2486 2493 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2487 2494 'http://user:pw@host:80/?foo=bar&baz=42'
2488 2495 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2489 2496 'http://user:pw@host:80/?foo=bar%3dbaz'
2490 2497 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2491 2498 'ssh://user:pw@[::1]:2200//home/joe#'
2492 2499 >>> str(url('http://localhost:80//'))
2493 2500 'http://localhost:80//'
2494 2501 >>> str(url('http://localhost:80/'))
2495 2502 'http://localhost:80/'
2496 2503 >>> str(url('http://localhost:80'))
2497 2504 'http://localhost:80/'
2498 2505 >>> str(url('bundle:foo'))
2499 2506 'bundle:foo'
2500 2507 >>> str(url('bundle://../foo'))
2501 2508 'bundle:../foo'
2502 2509 >>> str(url('path'))
2503 2510 'path'
2504 2511 >>> str(url('file:///tmp/foo/bar'))
2505 2512 'file:///tmp/foo/bar'
2506 2513 >>> str(url('file:///c:/tmp/foo/bar'))
2507 2514 'file:///c:/tmp/foo/bar'
2508 2515 >>> print url(r'bundle:foo\bar')
2509 2516 bundle:foo\bar
2510 2517 >>> print url(r'file:///D:\data\hg')
2511 2518 file:///D:\data\hg
2512 2519 """
2513 2520 if self._localpath:
2514 2521 s = self.path
2515 2522 if self.scheme == 'bundle':
2516 2523 s = 'bundle:' + s
2517 2524 if self.fragment:
2518 2525 s += '#' + self.fragment
2519 2526 return s
2520 2527
2521 2528 s = self.scheme + ':'
2522 2529 if self.user or self.passwd or self.host:
2523 2530 s += '//'
2524 2531 elif self.scheme and (not self.path or self.path.startswith('/')
2525 2532 or hasdriveletter(self.path)):
2526 2533 s += '//'
2527 2534 if hasdriveletter(self.path):
2528 2535 s += '/'
2529 2536 if self.user:
2530 2537 s += urlreq.quote(self.user, safe=self._safechars)
2531 2538 if self.passwd:
2532 2539 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2533 2540 if self.user or self.passwd:
2534 2541 s += '@'
2535 2542 if self.host:
2536 2543 if not (self.host.startswith('[') and self.host.endswith(']')):
2537 2544 s += urlreq.quote(self.host)
2538 2545 else:
2539 2546 s += self.host
2540 2547 if self.port:
2541 2548 s += ':' + urlreq.quote(self.port)
2542 2549 if self.host:
2543 2550 s += '/'
2544 2551 if self.path:
2545 2552 # TODO: similar to the query string, we should not unescape the
2546 2553 # path when we store it, the path might contain '%2f' = '/',
2547 2554 # which we should *not* escape.
2548 2555 s += urlreq.quote(self.path, safe=self._safepchars)
2549 2556 if self.query:
2550 2557 # we store the query in escaped form.
2551 2558 s += '?' + self.query
2552 2559 if self.fragment is not None:
2553 2560 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2554 2561 return s
2555 2562
2556 2563 def authinfo(self):
2557 2564 user, passwd = self.user, self.passwd
2558 2565 try:
2559 2566 self.user, self.passwd = None, None
2560 2567 s = str(self)
2561 2568 finally:
2562 2569 self.user, self.passwd = user, passwd
2563 2570 if not self.user:
2564 2571 return (s, None)
2565 2572 # authinfo[1] is passed to urllib2 password manager, and its
2566 2573 # URIs must not contain credentials. The host is passed in the
2567 2574 # URIs list because Python < 2.4.3 uses only that to search for
2568 2575 # a password.
2569 2576 return (s, (None, (s, self.host),
2570 2577 self.user, self.passwd or ''))
2571 2578
2572 2579 def isabs(self):
2573 2580 if self.scheme and self.scheme != 'file':
2574 2581 return True # remote URL
2575 2582 if hasdriveletter(self.path):
2576 2583 return True # absolute for our purposes - can't be joined()
2577 2584 if self.path.startswith(r'\\'):
2578 2585 return True # Windows UNC path
2579 2586 if self.path.startswith('/'):
2580 2587 return True # POSIX-style
2581 2588 return False
2582 2589
2583 2590 def localpath(self):
2584 2591 if self.scheme == 'file' or self.scheme == 'bundle':
2585 2592 path = self.path or '/'
2586 2593 # For Windows, we need to promote hosts containing drive
2587 2594 # letters to paths with drive letters.
2588 2595 if hasdriveletter(self._hostport):
2589 2596 path = self._hostport + '/' + self.path
2590 2597 elif (self.host is not None and self.path
2591 2598 and not hasdriveletter(path)):
2592 2599 path = '/' + path
2593 2600 return path
2594 2601 return self._origpath
2595 2602
2596 2603 def islocal(self):
2597 2604 '''whether localpath will return something that posixfile can open'''
2598 2605 return (not self.scheme or self.scheme == 'file'
2599 2606 or self.scheme == 'bundle')
2600 2607
2601 2608 def hasscheme(path):
2602 2609 return bool(url(path).scheme)
2603 2610
2604 2611 def hasdriveletter(path):
2605 2612 return path and path[1:2] == ':' and path[0:1].isalpha()
2606 2613
2607 2614 def urllocalpath(path):
2608 2615 return url(path, parsequery=False, parsefragment=False).localpath()
2609 2616
2610 2617 def hidepassword(u):
2611 2618 '''hide user credential in a url string'''
2612 2619 u = url(u)
2613 2620 if u.passwd:
2614 2621 u.passwd = '***'
2615 2622 return str(u)
2616 2623
2617 2624 def removeauth(u):
2618 2625 '''remove all authentication information from a url string'''
2619 2626 u = url(u)
2620 2627 u.user = u.passwd = None
2621 2628 return str(u)
2622 2629
2623 2630 def isatty(fp):
2624 2631 try:
2625 2632 return fp.isatty()
2626 2633 except AttributeError:
2627 2634 return False
2628 2635
2629 2636 timecount = unitcountfn(
2630 2637 (1, 1e3, _('%.0f s')),
2631 2638 (100, 1, _('%.1f s')),
2632 2639 (10, 1, _('%.2f s')),
2633 2640 (1, 1, _('%.3f s')),
2634 2641 (100, 0.001, _('%.1f ms')),
2635 2642 (10, 0.001, _('%.2f ms')),
2636 2643 (1, 0.001, _('%.3f ms')),
2637 2644 (100, 0.000001, _('%.1f us')),
2638 2645 (10, 0.000001, _('%.2f us')),
2639 2646 (1, 0.000001, _('%.3f us')),
2640 2647 (100, 0.000000001, _('%.1f ns')),
2641 2648 (10, 0.000000001, _('%.2f ns')),
2642 2649 (1, 0.000000001, _('%.3f ns')),
2643 2650 )
2644 2651
2645 2652 _timenesting = [0]
2646 2653
2647 2654 def timed(func):
2648 2655 '''Report the execution time of a function call to stderr.
2649 2656
2650 2657 During development, use as a decorator when you need to measure
2651 2658 the cost of a function, e.g. as follows:
2652 2659
2653 2660 @util.timed
2654 2661 def foo(a, b, c):
2655 2662 pass
2656 2663 '''
2657 2664
2658 2665 def wrapper(*args, **kwargs):
2659 2666 start = time.time()
2660 2667 indent = 2
2661 2668 _timenesting[0] += indent
2662 2669 try:
2663 2670 return func(*args, **kwargs)
2664 2671 finally:
2665 2672 elapsed = time.time() - start
2666 2673 _timenesting[0] -= indent
2667 2674 sys.stderr.write('%s%s: %s\n' %
2668 2675 (' ' * _timenesting[0], func.__name__,
2669 2676 timecount(elapsed)))
2670 2677 return wrapper
2671 2678
2672 2679 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2673 2680 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2674 2681
2675 2682 def sizetoint(s):
2676 2683 '''Convert a space specifier to a byte count.
2677 2684
2678 2685 >>> sizetoint('30')
2679 2686 30
2680 2687 >>> sizetoint('2.2kb')
2681 2688 2252
2682 2689 >>> sizetoint('6M')
2683 2690 6291456
2684 2691 '''
2685 2692 t = s.strip().lower()
2686 2693 try:
2687 2694 for k, u in _sizeunits:
2688 2695 if t.endswith(k):
2689 2696 return int(float(t[:-len(k)]) * u)
2690 2697 return int(t)
2691 2698 except ValueError:
2692 2699 raise error.ParseError(_("couldn't parse size: %s") % s)
2693 2700
2694 2701 class hooks(object):
2695 2702 '''A collection of hook functions that can be used to extend a
2696 2703 function's behavior. Hooks are called in lexicographic order,
2697 2704 based on the names of their sources.'''
2698 2705
2699 2706 def __init__(self):
2700 2707 self._hooks = []
2701 2708
2702 2709 def add(self, source, hook):
2703 2710 self._hooks.append((source, hook))
2704 2711
2705 2712 def __call__(self, *args):
2706 2713 self._hooks.sort(key=lambda x: x[0])
2707 2714 results = []
2708 2715 for source, hook in self._hooks:
2709 2716 results.append(hook(*args))
2710 2717 return results
2711 2718
2712 2719 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2713 2720 '''Yields lines for a nicely formatted stacktrace.
2714 2721 Skips the 'skip' last entries.
2715 2722 Each file+linenumber is formatted according to fileline.
2716 2723 Each line is formatted according to line.
2717 2724 If line is None, it yields:
2718 2725 length of longest filepath+line number,
2719 2726 filepath+linenumber,
2720 2727 function
2721 2728
2722 2729 Not be used in production code but very convenient while developing.
2723 2730 '''
2724 2731 entries = [(fileline % (fn, ln), func)
2725 2732 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2726 2733 if entries:
2727 2734 fnmax = max(len(entry[0]) for entry in entries)
2728 2735 for fnln, func in entries:
2729 2736 if line is None:
2730 2737 yield (fnmax, fnln, func)
2731 2738 else:
2732 2739 yield line % (fnmax, fnln, func)
2733 2740
2734 2741 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2735 2742 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2736 2743 Skips the 'skip' last entries. By default it will flush stdout first.
2737 2744 It can be used everywhere and intentionally does not require an ui object.
2738 2745 Not be used in production code but very convenient while developing.
2739 2746 '''
2740 2747 if otherf:
2741 2748 otherf.flush()
2742 2749 f.write('%s at:\n' % msg)
2743 2750 for line in getstackframes(skip + 1):
2744 2751 f.write(line)
2745 2752 f.flush()
2746 2753
2747 2754 class dirs(object):
2748 2755 '''a multiset of directory names from a dirstate or manifest'''
2749 2756
2750 2757 def __init__(self, map, skip=None):
2751 2758 self._dirs = {}
2752 2759 addpath = self.addpath
2753 2760 if safehasattr(map, 'iteritems') and skip is not None:
2754 2761 for f, s in map.iteritems():
2755 2762 if s[0] != skip:
2756 2763 addpath(f)
2757 2764 else:
2758 2765 for f in map:
2759 2766 addpath(f)
2760 2767
2761 2768 def addpath(self, path):
2762 2769 dirs = self._dirs
2763 2770 for base in finddirs(path):
2764 2771 if base in dirs:
2765 2772 dirs[base] += 1
2766 2773 return
2767 2774 dirs[base] = 1
2768 2775
2769 2776 def delpath(self, path):
2770 2777 dirs = self._dirs
2771 2778 for base in finddirs(path):
2772 2779 if dirs[base] > 1:
2773 2780 dirs[base] -= 1
2774 2781 return
2775 2782 del dirs[base]
2776 2783
2777 2784 def __iter__(self):
2778 2785 return self._dirs.iterkeys()
2779 2786
2780 2787 def __contains__(self, d):
2781 2788 return d in self._dirs
2782 2789
2783 2790 if safehasattr(parsers, 'dirs'):
2784 2791 dirs = parsers.dirs
2785 2792
2786 2793 def finddirs(path):
2787 2794 pos = path.rfind('/')
2788 2795 while pos != -1:
2789 2796 yield path[:pos]
2790 2797 pos = path.rfind('/', 0, pos)
2791 2798
2792 2799 # compression utility
2793 2800
2794 2801 class nocompress(object):
2795 2802 def compress(self, x):
2796 2803 return x
2797 2804 def flush(self):
2798 2805 return ""
2799 2806
2800 2807 compressors = {
2801 2808 None: nocompress,
2802 2809 # lambda to prevent early import
2803 2810 'BZ': lambda: bz2.BZ2Compressor(),
2804 2811 'GZ': lambda: zlib.compressobj(),
2805 2812 }
2806 2813 # also support the old form by courtesies
2807 2814 compressors['UN'] = compressors[None]
2808 2815
2809 2816 def _makedecompressor(decompcls):
2810 2817 def generator(f):
2811 2818 d = decompcls()
2812 2819 for chunk in filechunkiter(f):
2813 2820 yield d.decompress(chunk)
2814 2821 def func(fh):
2815 2822 return chunkbuffer(generator(fh))
2816 2823 return func
2817 2824
2818 2825 class ctxmanager(object):
2819 2826 '''A context manager for use in 'with' blocks to allow multiple
2820 2827 contexts to be entered at once. This is both safer and more
2821 2828 flexible than contextlib.nested.
2822 2829
2823 2830 Once Mercurial supports Python 2.7+, this will become mostly
2824 2831 unnecessary.
2825 2832 '''
2826 2833
2827 2834 def __init__(self, *args):
2828 2835 '''Accepts a list of no-argument functions that return context
2829 2836 managers. These will be invoked at __call__ time.'''
2830 2837 self._pending = args
2831 2838 self._atexit = []
2832 2839
2833 2840 def __enter__(self):
2834 2841 return self
2835 2842
2836 2843 def enter(self):
2837 2844 '''Create and enter context managers in the order in which they were
2838 2845 passed to the constructor.'''
2839 2846 values = []
2840 2847 for func in self._pending:
2841 2848 obj = func()
2842 2849 values.append(obj.__enter__())
2843 2850 self._atexit.append(obj.__exit__)
2844 2851 del self._pending
2845 2852 return values
2846 2853
2847 2854 def atexit(self, func, *args, **kwargs):
2848 2855 '''Add a function to call when this context manager exits. The
2849 2856 ordering of multiple atexit calls is unspecified, save that
2850 2857 they will happen before any __exit__ functions.'''
2851 2858 def wrapper(exc_type, exc_val, exc_tb):
2852 2859 func(*args, **kwargs)
2853 2860 self._atexit.append(wrapper)
2854 2861 return func
2855 2862
2856 2863 def __exit__(self, exc_type, exc_val, exc_tb):
2857 2864 '''Context managers are exited in the reverse order from which
2858 2865 they were created.'''
2859 2866 received = exc_type is not None
2860 2867 suppressed = False
2861 2868 pending = None
2862 2869 self._atexit.reverse()
2863 2870 for exitfunc in self._atexit:
2864 2871 try:
2865 2872 if exitfunc(exc_type, exc_val, exc_tb):
2866 2873 suppressed = True
2867 2874 exc_type = None
2868 2875 exc_val = None
2869 2876 exc_tb = None
2870 2877 except BaseException:
2871 2878 pending = sys.exc_info()
2872 2879 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2873 2880 del self._atexit
2874 2881 if pending:
2875 2882 raise exc_val
2876 2883 return received and suppressed
2877 2884
2878 2885 def _bz2():
2879 2886 d = bz2.BZ2Decompressor()
2880 2887 # Bzip2 stream start with BZ, but we stripped it.
2881 2888 # we put it back for good measure.
2882 2889 d.decompress('BZ')
2883 2890 return d
2884 2891
2885 2892 decompressors = {None: lambda fh: fh,
2886 2893 '_truncatedBZ': _makedecompressor(_bz2),
2887 2894 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2888 2895 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2889 2896 }
2890 2897 # also support the old form by courtesies
2891 2898 decompressors['UN'] = decompressors[None]
2892 2899
2893 2900 # convenient shortcut
2894 2901 dst = debugstacktrace
@@ -1,260 +1,288 b''
1 1 This runs with TZ="GMT"
2 2
3 3 $ hg init
4 4 $ echo "test-parse-date" > a
5 5 $ hg add a
6 6 $ hg ci -d "2006-02-01 13:00:30" -m "rev 0"
7 7 $ echo "hi!" >> a
8 8 $ hg ci -d "2006-02-01 13:00:30 -0500" -m "rev 1"
9 9 $ hg tag -d "2006-04-15 13:30" "Hi"
10 10 $ hg backout --merge -d "2006-04-15 13:30 +0200" -m "rev 3" 1
11 11 reverting a
12 12 created new head
13 13 changeset 3:107ce1ee2b43 backs out changeset 1:25a1420a55f8
14 14 merging with changeset 3:107ce1ee2b43
15 15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
16 16 (branch merge, don't forget to commit)
17 17 $ hg ci -d "1150000000 14400" -m "rev 4 (merge)"
18 18 $ echo "fail" >> a
19 19 $ hg ci -d "should fail" -m "fail"
20 20 abort: invalid date: 'should fail'
21 21 [255]
22 22 $ hg ci -d "100000000000000000 1400" -m "fail"
23 23 abort: date exceeds 32 bits: 100000000000000000
24 24 [255]
25 25 $ hg ci -d "100000 1400000" -m "fail"
26 26 abort: impossible time zone offset: 1400000
27 27 [255]
28 28
29 29 Check with local timezone other than GMT and with DST
30 30
31 31 $ TZ="PST+8PDT+7,M4.1.0/02:00:00,M10.5.0/02:00:00"
32 32 $ export TZ
33 33
34 34 PST=UTC-8 / PDT=UTC-7
35 35 Summer time begins on April's first Sunday at 2:00am,
36 36 and ends on October's last Sunday at 2:00am.
37 37
38 38 $ hg debugrebuildstate
39 39 $ echo "a" > a
40 40 $ hg ci -d "2006-07-15 13:30" -m "summer@UTC-7"
41 41 $ hg debugrebuildstate
42 42 $ echo "b" > a
43 43 $ hg ci -d "2006-07-15 13:30 +0500" -m "summer@UTC+5"
44 44 $ hg debugrebuildstate
45 45 $ echo "c" > a
46 46 $ hg ci -d "2006-01-15 13:30" -m "winter@UTC-8"
47 47 $ hg debugrebuildstate
48 48 $ echo "d" > a
49 49 $ hg ci -d "2006-01-15 13:30 +0500" -m "winter@UTC+5"
50 50 $ hg log --template '{date|date}\n'
51 51 Sun Jan 15 13:30:00 2006 +0500
52 52 Sun Jan 15 13:30:00 2006 -0800
53 53 Sat Jul 15 13:30:00 2006 +0500
54 54 Sat Jul 15 13:30:00 2006 -0700
55 55 Sun Jun 11 00:26:40 2006 -0400
56 56 Sat Apr 15 13:30:00 2006 +0200
57 57 Sat Apr 15 13:30:00 2006 +0000
58 58 Wed Feb 01 13:00:30 2006 -0500
59 59 Wed Feb 01 13:00:30 2006 +0000
60 60
61 61 Test issue1014 (fractional timezones)
62 62
63 63 $ hg debugdate "1000000000 -16200" # 0430
64 64 internal: 1000000000 -16200
65 65 standard: Sun Sep 09 06:16:40 2001 +0430
66 66 $ hg debugdate "1000000000 -15300" # 0415
67 67 internal: 1000000000 -15300
68 68 standard: Sun Sep 09 06:01:40 2001 +0415
69 69 $ hg debugdate "1000000000 -14400" # 0400
70 70 internal: 1000000000 -14400
71 71 standard: Sun Sep 09 05:46:40 2001 +0400
72 72 $ hg debugdate "1000000000 0" # GMT
73 73 internal: 1000000000 0
74 74 standard: Sun Sep 09 01:46:40 2001 +0000
75 75 $ hg debugdate "1000000000 14400" # -0400
76 76 internal: 1000000000 14400
77 77 standard: Sat Sep 08 21:46:40 2001 -0400
78 78 $ hg debugdate "1000000000 15300" # -0415
79 79 internal: 1000000000 15300
80 80 standard: Sat Sep 08 21:31:40 2001 -0415
81 81 $ hg debugdate "1000000000 16200" # -0430
82 82 internal: 1000000000 16200
83 83 standard: Sat Sep 08 21:16:40 2001 -0430
84 84 $ hg debugdate "Sat Sep 08 21:16:40 2001 +0430"
85 85 internal: 999967600 -16200
86 86 standard: Sat Sep 08 21:16:40 2001 +0430
87 87 $ hg debugdate "Sat Sep 08 21:16:40 2001 -0430"
88 88 internal: 1000000000 16200
89 89 standard: Sat Sep 08 21:16:40 2001 -0430
90 90
91 91 Test 12-hours times
92 92
93 93 $ hg debugdate "2006-02-01 1:00:30PM +0000"
94 94 internal: 1138798830 0
95 95 standard: Wed Feb 01 13:00:30 2006 +0000
96 96 $ hg debugdate "1:00:30PM" > /dev/null
97 97
98 98 Normal range
99 99
100 100 $ hg log -d -1
101 101
102 102 Negative range
103 103
104 104 $ hg log -d "--2"
105 105 abort: -2 must be nonnegative (see "hg help dates")
106 106 [255]
107 107
108 108 Whitespace only
109 109
110 110 $ hg log -d " "
111 111 abort: dates cannot consist entirely of whitespace
112 112 [255]
113 113
114 114 Test date formats with '>' or '<' accompanied by space characters
115 115
116 116 $ hg log -d '>' --template '{date|date}\n'
117 117 abort: invalid day spec, use '>DATE'
118 118 [255]
119 119 $ hg log -d '<' --template '{date|date}\n'
120 120 abort: invalid day spec, use '<DATE'
121 121 [255]
122 122
123 123 $ hg log -d ' >' --template '{date|date}\n'
124 124 abort: invalid day spec, use '>DATE'
125 125 [255]
126 126 $ hg log -d ' <' --template '{date|date}\n'
127 127 abort: invalid day spec, use '<DATE'
128 128 [255]
129 129
130 130 $ hg log -d '> ' --template '{date|date}\n'
131 131 abort: invalid day spec, use '>DATE'
132 132 [255]
133 133 $ hg log -d '< ' --template '{date|date}\n'
134 134 abort: invalid day spec, use '<DATE'
135 135 [255]
136 136
137 137 $ hg log -d ' > ' --template '{date|date}\n'
138 138 abort: invalid day spec, use '>DATE'
139 139 [255]
140 140 $ hg log -d ' < ' --template '{date|date}\n'
141 141 abort: invalid day spec, use '<DATE'
142 142 [255]
143 143
144 144 $ hg log -d '>02/01' --template '{date|date}\n'
145 145 $ hg log -d '<02/01' --template '{date|date}\n'
146 146 Sun Jan 15 13:30:00 2006 +0500
147 147 Sun Jan 15 13:30:00 2006 -0800
148 148 Sat Jul 15 13:30:00 2006 +0500
149 149 Sat Jul 15 13:30:00 2006 -0700
150 150 Sun Jun 11 00:26:40 2006 -0400
151 151 Sat Apr 15 13:30:00 2006 +0200
152 152 Sat Apr 15 13:30:00 2006 +0000
153 153 Wed Feb 01 13:00:30 2006 -0500
154 154 Wed Feb 01 13:00:30 2006 +0000
155 155
156 156 $ hg log -d ' >02/01' --template '{date|date}\n'
157 157 $ hg log -d ' <02/01' --template '{date|date}\n'
158 158 Sun Jan 15 13:30:00 2006 +0500
159 159 Sun Jan 15 13:30:00 2006 -0800
160 160 Sat Jul 15 13:30:00 2006 +0500
161 161 Sat Jul 15 13:30:00 2006 -0700
162 162 Sun Jun 11 00:26:40 2006 -0400
163 163 Sat Apr 15 13:30:00 2006 +0200
164 164 Sat Apr 15 13:30:00 2006 +0000
165 165 Wed Feb 01 13:00:30 2006 -0500
166 166 Wed Feb 01 13:00:30 2006 +0000
167 167
168 168 $ hg log -d '> 02/01' --template '{date|date}\n'
169 169 $ hg log -d '< 02/01' --template '{date|date}\n'
170 170 Sun Jan 15 13:30:00 2006 +0500
171 171 Sun Jan 15 13:30:00 2006 -0800
172 172 Sat Jul 15 13:30:00 2006 +0500
173 173 Sat Jul 15 13:30:00 2006 -0700
174 174 Sun Jun 11 00:26:40 2006 -0400
175 175 Sat Apr 15 13:30:00 2006 +0200
176 176 Sat Apr 15 13:30:00 2006 +0000
177 177 Wed Feb 01 13:00:30 2006 -0500
178 178 Wed Feb 01 13:00:30 2006 +0000
179 179
180 180 $ hg log -d ' > 02/01' --template '{date|date}\n'
181 181 $ hg log -d ' < 02/01' --template '{date|date}\n'
182 182 Sun Jan 15 13:30:00 2006 +0500
183 183 Sun Jan 15 13:30:00 2006 -0800
184 184 Sat Jul 15 13:30:00 2006 +0500
185 185 Sat Jul 15 13:30:00 2006 -0700
186 186 Sun Jun 11 00:26:40 2006 -0400
187 187 Sat Apr 15 13:30:00 2006 +0200
188 188 Sat Apr 15 13:30:00 2006 +0000
189 189 Wed Feb 01 13:00:30 2006 -0500
190 190 Wed Feb 01 13:00:30 2006 +0000
191 191
192 192 $ hg log -d '>02/01 ' --template '{date|date}\n'
193 193 $ hg log -d '<02/01 ' --template '{date|date}\n'
194 194 Sun Jan 15 13:30:00 2006 +0500
195 195 Sun Jan 15 13:30:00 2006 -0800
196 196 Sat Jul 15 13:30:00 2006 +0500
197 197 Sat Jul 15 13:30:00 2006 -0700
198 198 Sun Jun 11 00:26:40 2006 -0400
199 199 Sat Apr 15 13:30:00 2006 +0200
200 200 Sat Apr 15 13:30:00 2006 +0000
201 201 Wed Feb 01 13:00:30 2006 -0500
202 202 Wed Feb 01 13:00:30 2006 +0000
203 203
204 204 $ hg log -d ' >02/01 ' --template '{date|date}\n'
205 205 $ hg log -d ' <02/01 ' --template '{date|date}\n'
206 206 Sun Jan 15 13:30:00 2006 +0500
207 207 Sun Jan 15 13:30:00 2006 -0800
208 208 Sat Jul 15 13:30:00 2006 +0500
209 209 Sat Jul 15 13:30:00 2006 -0700
210 210 Sun Jun 11 00:26:40 2006 -0400
211 211 Sat Apr 15 13:30:00 2006 +0200
212 212 Sat Apr 15 13:30:00 2006 +0000
213 213 Wed Feb 01 13:00:30 2006 -0500
214 214 Wed Feb 01 13:00:30 2006 +0000
215 215
216 216 $ hg log -d '> 02/01 ' --template '{date|date}\n'
217 217 $ hg log -d '< 02/01 ' --template '{date|date}\n'
218 218 Sun Jan 15 13:30:00 2006 +0500
219 219 Sun Jan 15 13:30:00 2006 -0800
220 220 Sat Jul 15 13:30:00 2006 +0500
221 221 Sat Jul 15 13:30:00 2006 -0700
222 222 Sun Jun 11 00:26:40 2006 -0400
223 223 Sat Apr 15 13:30:00 2006 +0200
224 224 Sat Apr 15 13:30:00 2006 +0000
225 225 Wed Feb 01 13:00:30 2006 -0500
226 226 Wed Feb 01 13:00:30 2006 +0000
227 227
228 228 $ hg log -d ' > 02/01 ' --template '{date|date}\n'
229 229 $ hg log -d ' < 02/01 ' --template '{date|date}\n'
230 230 Sun Jan 15 13:30:00 2006 +0500
231 231 Sun Jan 15 13:30:00 2006 -0800
232 232 Sat Jul 15 13:30:00 2006 +0500
233 233 Sat Jul 15 13:30:00 2006 -0700
234 234 Sun Jun 11 00:26:40 2006 -0400
235 235 Sat Apr 15 13:30:00 2006 +0200
236 236 Sat Apr 15 13:30:00 2006 +0000
237 237 Wed Feb 01 13:00:30 2006 -0500
238 238 Wed Feb 01 13:00:30 2006 +0000
239 239
240 240 Test issue 3764 (interpreting 'today' and 'yesterday')
241 241 $ echo "hello" >> a
242 242 >>> import datetime
243 243 >>> today = datetime.date.today().strftime("%b %d")
244 244 >>> yesterday = (datetime.date.today() - datetime.timedelta(days=1)).strftime("%b %d")
245 245 >>> dates = open('dates', 'w')
246 246 >>> dates.write(today + '\n')
247 247 >>> dates.write(yesterday + '\n')
248 248 >>> dates.close()
249 249 $ hg ci -d "`sed -n '1p' dates`" -m "today is a good day to code"
250 250 $ hg log -d today --template '{desc}\n'
251 251 today is a good day to code
252 252 $ echo "goodbye" >> a
253 253 $ hg ci -d "`sed -n '2p' dates`" -m "the time traveler's code"
254 254 $ hg log -d yesterday --template '{desc}\n'
255 255 the time traveler's code
256 256 $ echo "foo" >> a
257 257 $ hg commit -d now -m 'Explicitly committed now.'
258 258 $ hg log -d today --template '{desc}\n'
259 259 Explicitly committed now.
260 260 today is a good day to code
261
262 Test parsing various ISO8601 forms
263
264 $ hg debugdate "2016-07-27T12:10:21"
265 internal: 1469646621 * (glob)
266 standard: Wed Jul 27 12:10:21 2016 -0700
267 $ hg debugdate "2016-07-27T12:10:21Z"
268 internal: 1469621421 0
269 standard: Wed Jul 27 12:10:21 2016 +0000
270 $ hg debugdate "2016-07-27T12:10:21+00:00"
271 internal: 1469621421 0
272 standard: Wed Jul 27 12:10:21 2016 +0000
273 $ hg debugdate "2016-07-27T121021Z"
274 internal: 1469621421 0
275 standard: Wed Jul 27 12:10:21 2016 +0000
276
277 $ hg debugdate "2016-07-27 12:10:21"
278 internal: 1469646621 * (glob)
279 standard: Wed Jul 27 12:10:21 2016 -0700
280 $ hg debugdate "2016-07-27 12:10:21Z"
281 internal: 1469621421 0
282 standard: Wed Jul 27 12:10:21 2016 +0000
283 $ hg debugdate "2016-07-27 12:10:21+00:00"
284 internal: 1469621421 0
285 standard: Wed Jul 27 12:10:21 2016 +0000
286 $ hg debugdate "2016-07-27 121021Z"
287 internal: 1469621421 0
288 standard: Wed Jul 27 12:10:21 2016 +0000
General Comments 0
You need to be logged in to leave comments. Login now