##// END OF EJS Templates
date: accept broader range of ISO 8601 time specs...
Matt Mackall -
r29638:491ee264 stable
parent child Browse files
Show More
@@ -1,2894 +1,2901 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import zlib
37 import zlib
38
38
39 from . import (
39 from . import (
40 encoding,
40 encoding,
41 error,
41 error,
42 i18n,
42 i18n,
43 osutil,
43 osutil,
44 parsers,
44 parsers,
45 pycompat,
45 pycompat,
46 )
46 )
47
47
48 for attr in (
48 for attr in (
49 'empty',
49 'empty',
50 'httplib',
50 'httplib',
51 'httpserver',
51 'httpserver',
52 'pickle',
52 'pickle',
53 'queue',
53 'queue',
54 'urlerr',
54 'urlerr',
55 'urlparse',
55 'urlparse',
56 # we do import urlreq, but we do it outside the loop
56 # we do import urlreq, but we do it outside the loop
57 #'urlreq',
57 #'urlreq',
58 'stringio',
58 'stringio',
59 'socketserver',
59 'socketserver',
60 'xmlrpclib',
60 'xmlrpclib',
61 ):
61 ):
62 globals()[attr] = getattr(pycompat, attr)
62 globals()[attr] = getattr(pycompat, attr)
63
63
64 # This line is to make pyflakes happy:
64 # This line is to make pyflakes happy:
65 urlreq = pycompat.urlreq
65 urlreq = pycompat.urlreq
66
66
67 if os.name == 'nt':
67 if os.name == 'nt':
68 from . import windows as platform
68 from . import windows as platform
69 else:
69 else:
70 from . import posix as platform
70 from . import posix as platform
71
71
72 _ = i18n._
72 _ = i18n._
73
73
74 bindunixsocket = platform.bindunixsocket
74 bindunixsocket = platform.bindunixsocket
75 cachestat = platform.cachestat
75 cachestat = platform.cachestat
76 checkexec = platform.checkexec
76 checkexec = platform.checkexec
77 checklink = platform.checklink
77 checklink = platform.checklink
78 copymode = platform.copymode
78 copymode = platform.copymode
79 executablepath = platform.executablepath
79 executablepath = platform.executablepath
80 expandglobs = platform.expandglobs
80 expandglobs = platform.expandglobs
81 explainexit = platform.explainexit
81 explainexit = platform.explainexit
82 findexe = platform.findexe
82 findexe = platform.findexe
83 gethgcmd = platform.gethgcmd
83 gethgcmd = platform.gethgcmd
84 getuser = platform.getuser
84 getuser = platform.getuser
85 getpid = os.getpid
85 getpid = os.getpid
86 groupmembers = platform.groupmembers
86 groupmembers = platform.groupmembers
87 groupname = platform.groupname
87 groupname = platform.groupname
88 hidewindow = platform.hidewindow
88 hidewindow = platform.hidewindow
89 isexec = platform.isexec
89 isexec = platform.isexec
90 isowner = platform.isowner
90 isowner = platform.isowner
91 localpath = platform.localpath
91 localpath = platform.localpath
92 lookupreg = platform.lookupreg
92 lookupreg = platform.lookupreg
93 makedir = platform.makedir
93 makedir = platform.makedir
94 nlinks = platform.nlinks
94 nlinks = platform.nlinks
95 normpath = platform.normpath
95 normpath = platform.normpath
96 normcase = platform.normcase
96 normcase = platform.normcase
97 normcasespec = platform.normcasespec
97 normcasespec = platform.normcasespec
98 normcasefallback = platform.normcasefallback
98 normcasefallback = platform.normcasefallback
99 openhardlinks = platform.openhardlinks
99 openhardlinks = platform.openhardlinks
100 oslink = platform.oslink
100 oslink = platform.oslink
101 parsepatchoutput = platform.parsepatchoutput
101 parsepatchoutput = platform.parsepatchoutput
102 pconvert = platform.pconvert
102 pconvert = platform.pconvert
103 poll = platform.poll
103 poll = platform.poll
104 popen = platform.popen
104 popen = platform.popen
105 posixfile = platform.posixfile
105 posixfile = platform.posixfile
106 quotecommand = platform.quotecommand
106 quotecommand = platform.quotecommand
107 readpipe = platform.readpipe
107 readpipe = platform.readpipe
108 rename = platform.rename
108 rename = platform.rename
109 removedirs = platform.removedirs
109 removedirs = platform.removedirs
110 samedevice = platform.samedevice
110 samedevice = platform.samedevice
111 samefile = platform.samefile
111 samefile = platform.samefile
112 samestat = platform.samestat
112 samestat = platform.samestat
113 setbinary = platform.setbinary
113 setbinary = platform.setbinary
114 setflags = platform.setflags
114 setflags = platform.setflags
115 setsignalhandler = platform.setsignalhandler
115 setsignalhandler = platform.setsignalhandler
116 shellquote = platform.shellquote
116 shellquote = platform.shellquote
117 spawndetached = platform.spawndetached
117 spawndetached = platform.spawndetached
118 split = platform.split
118 split = platform.split
119 sshargs = platform.sshargs
119 sshargs = platform.sshargs
120 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
120 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
121 statisexec = platform.statisexec
121 statisexec = platform.statisexec
122 statislink = platform.statislink
122 statislink = platform.statislink
123 termwidth = platform.termwidth
123 termwidth = platform.termwidth
124 testpid = platform.testpid
124 testpid = platform.testpid
125 umask = platform.umask
125 umask = platform.umask
126 unlink = platform.unlink
126 unlink = platform.unlink
127 unlinkpath = platform.unlinkpath
127 unlinkpath = platform.unlinkpath
128 username = platform.username
128 username = platform.username
129
129
130 # Python compatibility
130 # Python compatibility
131
131
132 _notset = object()
132 _notset = object()
133
133
134 # disable Python's problematic floating point timestamps (issue4836)
134 # disable Python's problematic floating point timestamps (issue4836)
135 # (Python hypocritically says you shouldn't change this behavior in
135 # (Python hypocritically says you shouldn't change this behavior in
136 # libraries, and sure enough Mercurial is not a library.)
136 # libraries, and sure enough Mercurial is not a library.)
137 os.stat_float_times(False)
137 os.stat_float_times(False)
138
138
139 def safehasattr(thing, attr):
139 def safehasattr(thing, attr):
140 return getattr(thing, attr, _notset) is not _notset
140 return getattr(thing, attr, _notset) is not _notset
141
141
142 DIGESTS = {
142 DIGESTS = {
143 'md5': hashlib.md5,
143 'md5': hashlib.md5,
144 'sha1': hashlib.sha1,
144 'sha1': hashlib.sha1,
145 'sha512': hashlib.sha512,
145 'sha512': hashlib.sha512,
146 }
146 }
147 # List of digest types from strongest to weakest
147 # List of digest types from strongest to weakest
148 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
148 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
149
149
150 for k in DIGESTS_BY_STRENGTH:
150 for k in DIGESTS_BY_STRENGTH:
151 assert k in DIGESTS
151 assert k in DIGESTS
152
152
153 class digester(object):
153 class digester(object):
154 """helper to compute digests.
154 """helper to compute digests.
155
155
156 This helper can be used to compute one or more digests given their name.
156 This helper can be used to compute one or more digests given their name.
157
157
158 >>> d = digester(['md5', 'sha1'])
158 >>> d = digester(['md5', 'sha1'])
159 >>> d.update('foo')
159 >>> d.update('foo')
160 >>> [k for k in sorted(d)]
160 >>> [k for k in sorted(d)]
161 ['md5', 'sha1']
161 ['md5', 'sha1']
162 >>> d['md5']
162 >>> d['md5']
163 'acbd18db4cc2f85cedef654fccc4a4d8'
163 'acbd18db4cc2f85cedef654fccc4a4d8'
164 >>> d['sha1']
164 >>> d['sha1']
165 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
165 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
166 >>> digester.preferred(['md5', 'sha1'])
166 >>> digester.preferred(['md5', 'sha1'])
167 'sha1'
167 'sha1'
168 """
168 """
169
169
170 def __init__(self, digests, s=''):
170 def __init__(self, digests, s=''):
171 self._hashes = {}
171 self._hashes = {}
172 for k in digests:
172 for k in digests:
173 if k not in DIGESTS:
173 if k not in DIGESTS:
174 raise Abort(_('unknown digest type: %s') % k)
174 raise Abort(_('unknown digest type: %s') % k)
175 self._hashes[k] = DIGESTS[k]()
175 self._hashes[k] = DIGESTS[k]()
176 if s:
176 if s:
177 self.update(s)
177 self.update(s)
178
178
179 def update(self, data):
179 def update(self, data):
180 for h in self._hashes.values():
180 for h in self._hashes.values():
181 h.update(data)
181 h.update(data)
182
182
183 def __getitem__(self, key):
183 def __getitem__(self, key):
184 if key not in DIGESTS:
184 if key not in DIGESTS:
185 raise Abort(_('unknown digest type: %s') % k)
185 raise Abort(_('unknown digest type: %s') % k)
186 return self._hashes[key].hexdigest()
186 return self._hashes[key].hexdigest()
187
187
188 def __iter__(self):
188 def __iter__(self):
189 return iter(self._hashes)
189 return iter(self._hashes)
190
190
191 @staticmethod
191 @staticmethod
192 def preferred(supported):
192 def preferred(supported):
193 """returns the strongest digest type in both supported and DIGESTS."""
193 """returns the strongest digest type in both supported and DIGESTS."""
194
194
195 for k in DIGESTS_BY_STRENGTH:
195 for k in DIGESTS_BY_STRENGTH:
196 if k in supported:
196 if k in supported:
197 return k
197 return k
198 return None
198 return None
199
199
200 class digestchecker(object):
200 class digestchecker(object):
201 """file handle wrapper that additionally checks content against a given
201 """file handle wrapper that additionally checks content against a given
202 size and digests.
202 size and digests.
203
203
204 d = digestchecker(fh, size, {'md5': '...'})
204 d = digestchecker(fh, size, {'md5': '...'})
205
205
206 When multiple digests are given, all of them are validated.
206 When multiple digests are given, all of them are validated.
207 """
207 """
208
208
209 def __init__(self, fh, size, digests):
209 def __init__(self, fh, size, digests):
210 self._fh = fh
210 self._fh = fh
211 self._size = size
211 self._size = size
212 self._got = 0
212 self._got = 0
213 self._digests = dict(digests)
213 self._digests = dict(digests)
214 self._digester = digester(self._digests.keys())
214 self._digester = digester(self._digests.keys())
215
215
216 def read(self, length=-1):
216 def read(self, length=-1):
217 content = self._fh.read(length)
217 content = self._fh.read(length)
218 self._digester.update(content)
218 self._digester.update(content)
219 self._got += len(content)
219 self._got += len(content)
220 return content
220 return content
221
221
222 def validate(self):
222 def validate(self):
223 if self._size != self._got:
223 if self._size != self._got:
224 raise Abort(_('size mismatch: expected %d, got %d') %
224 raise Abort(_('size mismatch: expected %d, got %d') %
225 (self._size, self._got))
225 (self._size, self._got))
226 for k, v in self._digests.items():
226 for k, v in self._digests.items():
227 if v != self._digester[k]:
227 if v != self._digester[k]:
228 # i18n: first parameter is a digest name
228 # i18n: first parameter is a digest name
229 raise Abort(_('%s mismatch: expected %s, got %s') %
229 raise Abort(_('%s mismatch: expected %s, got %s') %
230 (k, v, self._digester[k]))
230 (k, v, self._digester[k]))
231
231
232 try:
232 try:
233 buffer = buffer
233 buffer = buffer
234 except NameError:
234 except NameError:
235 if sys.version_info[0] < 3:
235 if sys.version_info[0] < 3:
236 def buffer(sliceable, offset=0):
236 def buffer(sliceable, offset=0):
237 return sliceable[offset:]
237 return sliceable[offset:]
238 else:
238 else:
239 def buffer(sliceable, offset=0):
239 def buffer(sliceable, offset=0):
240 return memoryview(sliceable)[offset:]
240 return memoryview(sliceable)[offset:]
241
241
242 closefds = os.name == 'posix'
242 closefds = os.name == 'posix'
243
243
244 _chunksize = 4096
244 _chunksize = 4096
245
245
246 class bufferedinputpipe(object):
246 class bufferedinputpipe(object):
247 """a manually buffered input pipe
247 """a manually buffered input pipe
248
248
249 Python will not let us use buffered IO and lazy reading with 'polling' at
249 Python will not let us use buffered IO and lazy reading with 'polling' at
250 the same time. We cannot probe the buffer state and select will not detect
250 the same time. We cannot probe the buffer state and select will not detect
251 that data are ready to read if they are already buffered.
251 that data are ready to read if they are already buffered.
252
252
253 This class let us work around that by implementing its own buffering
253 This class let us work around that by implementing its own buffering
254 (allowing efficient readline) while offering a way to know if the buffer is
254 (allowing efficient readline) while offering a way to know if the buffer is
255 empty from the output (allowing collaboration of the buffer with polling).
255 empty from the output (allowing collaboration of the buffer with polling).
256
256
257 This class lives in the 'util' module because it makes use of the 'os'
257 This class lives in the 'util' module because it makes use of the 'os'
258 module from the python stdlib.
258 module from the python stdlib.
259 """
259 """
260
260
261 def __init__(self, input):
261 def __init__(self, input):
262 self._input = input
262 self._input = input
263 self._buffer = []
263 self._buffer = []
264 self._eof = False
264 self._eof = False
265 self._lenbuf = 0
265 self._lenbuf = 0
266
266
267 @property
267 @property
268 def hasbuffer(self):
268 def hasbuffer(self):
269 """True is any data is currently buffered
269 """True is any data is currently buffered
270
270
271 This will be used externally a pre-step for polling IO. If there is
271 This will be used externally a pre-step for polling IO. If there is
272 already data then no polling should be set in place."""
272 already data then no polling should be set in place."""
273 return bool(self._buffer)
273 return bool(self._buffer)
274
274
275 @property
275 @property
276 def closed(self):
276 def closed(self):
277 return self._input.closed
277 return self._input.closed
278
278
279 def fileno(self):
279 def fileno(self):
280 return self._input.fileno()
280 return self._input.fileno()
281
281
282 def close(self):
282 def close(self):
283 return self._input.close()
283 return self._input.close()
284
284
285 def read(self, size):
285 def read(self, size):
286 while (not self._eof) and (self._lenbuf < size):
286 while (not self._eof) and (self._lenbuf < size):
287 self._fillbuffer()
287 self._fillbuffer()
288 return self._frombuffer(size)
288 return self._frombuffer(size)
289
289
290 def readline(self, *args, **kwargs):
290 def readline(self, *args, **kwargs):
291 if 1 < len(self._buffer):
291 if 1 < len(self._buffer):
292 # this should not happen because both read and readline end with a
292 # this should not happen because both read and readline end with a
293 # _frombuffer call that collapse it.
293 # _frombuffer call that collapse it.
294 self._buffer = [''.join(self._buffer)]
294 self._buffer = [''.join(self._buffer)]
295 self._lenbuf = len(self._buffer[0])
295 self._lenbuf = len(self._buffer[0])
296 lfi = -1
296 lfi = -1
297 if self._buffer:
297 if self._buffer:
298 lfi = self._buffer[-1].find('\n')
298 lfi = self._buffer[-1].find('\n')
299 while (not self._eof) and lfi < 0:
299 while (not self._eof) and lfi < 0:
300 self._fillbuffer()
300 self._fillbuffer()
301 if self._buffer:
301 if self._buffer:
302 lfi = self._buffer[-1].find('\n')
302 lfi = self._buffer[-1].find('\n')
303 size = lfi + 1
303 size = lfi + 1
304 if lfi < 0: # end of file
304 if lfi < 0: # end of file
305 size = self._lenbuf
305 size = self._lenbuf
306 elif 1 < len(self._buffer):
306 elif 1 < len(self._buffer):
307 # we need to take previous chunks into account
307 # we need to take previous chunks into account
308 size += self._lenbuf - len(self._buffer[-1])
308 size += self._lenbuf - len(self._buffer[-1])
309 return self._frombuffer(size)
309 return self._frombuffer(size)
310
310
311 def _frombuffer(self, size):
311 def _frombuffer(self, size):
312 """return at most 'size' data from the buffer
312 """return at most 'size' data from the buffer
313
313
314 The data are removed from the buffer."""
314 The data are removed from the buffer."""
315 if size == 0 or not self._buffer:
315 if size == 0 or not self._buffer:
316 return ''
316 return ''
317 buf = self._buffer[0]
317 buf = self._buffer[0]
318 if 1 < len(self._buffer):
318 if 1 < len(self._buffer):
319 buf = ''.join(self._buffer)
319 buf = ''.join(self._buffer)
320
320
321 data = buf[:size]
321 data = buf[:size]
322 buf = buf[len(data):]
322 buf = buf[len(data):]
323 if buf:
323 if buf:
324 self._buffer = [buf]
324 self._buffer = [buf]
325 self._lenbuf = len(buf)
325 self._lenbuf = len(buf)
326 else:
326 else:
327 self._buffer = []
327 self._buffer = []
328 self._lenbuf = 0
328 self._lenbuf = 0
329 return data
329 return data
330
330
331 def _fillbuffer(self):
331 def _fillbuffer(self):
332 """read data to the buffer"""
332 """read data to the buffer"""
333 data = os.read(self._input.fileno(), _chunksize)
333 data = os.read(self._input.fileno(), _chunksize)
334 if not data:
334 if not data:
335 self._eof = True
335 self._eof = True
336 else:
336 else:
337 self._lenbuf += len(data)
337 self._lenbuf += len(data)
338 self._buffer.append(data)
338 self._buffer.append(data)
339
339
340 def popen2(cmd, env=None, newlines=False):
340 def popen2(cmd, env=None, newlines=False):
341 # Setting bufsize to -1 lets the system decide the buffer size.
341 # Setting bufsize to -1 lets the system decide the buffer size.
342 # The default for bufsize is 0, meaning unbuffered. This leads to
342 # The default for bufsize is 0, meaning unbuffered. This leads to
343 # poor performance on Mac OS X: http://bugs.python.org/issue4194
343 # poor performance on Mac OS X: http://bugs.python.org/issue4194
344 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
344 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
345 close_fds=closefds,
345 close_fds=closefds,
346 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
346 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
347 universal_newlines=newlines,
347 universal_newlines=newlines,
348 env=env)
348 env=env)
349 return p.stdin, p.stdout
349 return p.stdin, p.stdout
350
350
351 def popen3(cmd, env=None, newlines=False):
351 def popen3(cmd, env=None, newlines=False):
352 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
352 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
353 return stdin, stdout, stderr
353 return stdin, stdout, stderr
354
354
355 def popen4(cmd, env=None, newlines=False, bufsize=-1):
355 def popen4(cmd, env=None, newlines=False, bufsize=-1):
356 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
356 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
357 close_fds=closefds,
357 close_fds=closefds,
358 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
358 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
359 stderr=subprocess.PIPE,
359 stderr=subprocess.PIPE,
360 universal_newlines=newlines,
360 universal_newlines=newlines,
361 env=env)
361 env=env)
362 return p.stdin, p.stdout, p.stderr, p
362 return p.stdin, p.stdout, p.stderr, p
363
363
364 def version():
364 def version():
365 """Return version information if available."""
365 """Return version information if available."""
366 try:
366 try:
367 from . import __version__
367 from . import __version__
368 return __version__.version
368 return __version__.version
369 except ImportError:
369 except ImportError:
370 return 'unknown'
370 return 'unknown'
371
371
372 def versiontuple(v=None, n=4):
372 def versiontuple(v=None, n=4):
373 """Parses a Mercurial version string into an N-tuple.
373 """Parses a Mercurial version string into an N-tuple.
374
374
375 The version string to be parsed is specified with the ``v`` argument.
375 The version string to be parsed is specified with the ``v`` argument.
376 If it isn't defined, the current Mercurial version string will be parsed.
376 If it isn't defined, the current Mercurial version string will be parsed.
377
377
378 ``n`` can be 2, 3, or 4. Here is how some version strings map to
378 ``n`` can be 2, 3, or 4. Here is how some version strings map to
379 returned values:
379 returned values:
380
380
381 >>> v = '3.6.1+190-df9b73d2d444'
381 >>> v = '3.6.1+190-df9b73d2d444'
382 >>> versiontuple(v, 2)
382 >>> versiontuple(v, 2)
383 (3, 6)
383 (3, 6)
384 >>> versiontuple(v, 3)
384 >>> versiontuple(v, 3)
385 (3, 6, 1)
385 (3, 6, 1)
386 >>> versiontuple(v, 4)
386 >>> versiontuple(v, 4)
387 (3, 6, 1, '190-df9b73d2d444')
387 (3, 6, 1, '190-df9b73d2d444')
388
388
389 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
389 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
390 (3, 6, 1, '190-df9b73d2d444+20151118')
390 (3, 6, 1, '190-df9b73d2d444+20151118')
391
391
392 >>> v = '3.6'
392 >>> v = '3.6'
393 >>> versiontuple(v, 2)
393 >>> versiontuple(v, 2)
394 (3, 6)
394 (3, 6)
395 >>> versiontuple(v, 3)
395 >>> versiontuple(v, 3)
396 (3, 6, None)
396 (3, 6, None)
397 >>> versiontuple(v, 4)
397 >>> versiontuple(v, 4)
398 (3, 6, None, None)
398 (3, 6, None, None)
399
399
400 >>> v = '3.9-rc'
400 >>> v = '3.9-rc'
401 >>> versiontuple(v, 2)
401 >>> versiontuple(v, 2)
402 (3, 9)
402 (3, 9)
403 >>> versiontuple(v, 3)
403 >>> versiontuple(v, 3)
404 (3, 9, None)
404 (3, 9, None)
405 >>> versiontuple(v, 4)
405 >>> versiontuple(v, 4)
406 (3, 9, None, 'rc')
406 (3, 9, None, 'rc')
407
407
408 >>> v = '3.9-rc+2-02a8fea4289b'
408 >>> v = '3.9-rc+2-02a8fea4289b'
409 >>> versiontuple(v, 2)
409 >>> versiontuple(v, 2)
410 (3, 9)
410 (3, 9)
411 >>> versiontuple(v, 3)
411 >>> versiontuple(v, 3)
412 (3, 9, None)
412 (3, 9, None)
413 >>> versiontuple(v, 4)
413 >>> versiontuple(v, 4)
414 (3, 9, None, 'rc+2-02a8fea4289b')
414 (3, 9, None, 'rc+2-02a8fea4289b')
415 """
415 """
416 if not v:
416 if not v:
417 v = version()
417 v = version()
418 parts = remod.split('[\+-]', v, 1)
418 parts = remod.split('[\+-]', v, 1)
419 if len(parts) == 1:
419 if len(parts) == 1:
420 vparts, extra = parts[0], None
420 vparts, extra = parts[0], None
421 else:
421 else:
422 vparts, extra = parts
422 vparts, extra = parts
423
423
424 vints = []
424 vints = []
425 for i in vparts.split('.'):
425 for i in vparts.split('.'):
426 try:
426 try:
427 vints.append(int(i))
427 vints.append(int(i))
428 except ValueError:
428 except ValueError:
429 break
429 break
430 # (3, 6) -> (3, 6, None)
430 # (3, 6) -> (3, 6, None)
431 while len(vints) < 3:
431 while len(vints) < 3:
432 vints.append(None)
432 vints.append(None)
433
433
434 if n == 2:
434 if n == 2:
435 return (vints[0], vints[1])
435 return (vints[0], vints[1])
436 if n == 3:
436 if n == 3:
437 return (vints[0], vints[1], vints[2])
437 return (vints[0], vints[1], vints[2])
438 if n == 4:
438 if n == 4:
439 return (vints[0], vints[1], vints[2], extra)
439 return (vints[0], vints[1], vints[2], extra)
440
440
441 # used by parsedate
441 # used by parsedate
442 defaultdateformats = (
442 defaultdateformats = (
443 '%Y-%m-%d %H:%M:%S',
443 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
444 '%Y-%m-%dT%H:%M', # without seconds
445 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
446 '%Y-%m-%dT%H%M', # without seconds
447 '%Y-%m-%d %H:%M:%S', # our common legal variant
448 '%Y-%m-%d %H:%M', # without seconds
449 '%Y-%m-%d %H%M%S', # without :
450 '%Y-%m-%d %H%M', # without seconds
444 '%Y-%m-%d %I:%M:%S%p',
451 '%Y-%m-%d %I:%M:%S%p',
445 '%Y-%m-%d %H:%M',
452 '%Y-%m-%d %H:%M',
446 '%Y-%m-%d %I:%M%p',
453 '%Y-%m-%d %I:%M%p',
447 '%Y-%m-%d',
454 '%Y-%m-%d',
448 '%m-%d',
455 '%m-%d',
449 '%m/%d',
456 '%m/%d',
450 '%m/%d/%y',
457 '%m/%d/%y',
451 '%m/%d/%Y',
458 '%m/%d/%Y',
452 '%a %b %d %H:%M:%S %Y',
459 '%a %b %d %H:%M:%S %Y',
453 '%a %b %d %I:%M:%S%p %Y',
460 '%a %b %d %I:%M:%S%p %Y',
454 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
461 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
455 '%b %d %H:%M:%S %Y',
462 '%b %d %H:%M:%S %Y',
456 '%b %d %I:%M:%S%p %Y',
463 '%b %d %I:%M:%S%p %Y',
457 '%b %d %H:%M:%S',
464 '%b %d %H:%M:%S',
458 '%b %d %I:%M:%S%p',
465 '%b %d %I:%M:%S%p',
459 '%b %d %H:%M',
466 '%b %d %H:%M',
460 '%b %d %I:%M%p',
467 '%b %d %I:%M%p',
461 '%b %d %Y',
468 '%b %d %Y',
462 '%b %d',
469 '%b %d',
463 '%H:%M:%S',
470 '%H:%M:%S',
464 '%I:%M:%S%p',
471 '%I:%M:%S%p',
465 '%H:%M',
472 '%H:%M',
466 '%I:%M%p',
473 '%I:%M%p',
467 )
474 )
468
475
469 extendeddateformats = defaultdateformats + (
476 extendeddateformats = defaultdateformats + (
470 "%Y",
477 "%Y",
471 "%Y-%m",
478 "%Y-%m",
472 "%b",
479 "%b",
473 "%b %Y",
480 "%b %Y",
474 )
481 )
475
482
476 def cachefunc(func):
483 def cachefunc(func):
477 '''cache the result of function calls'''
484 '''cache the result of function calls'''
478 # XXX doesn't handle keywords args
485 # XXX doesn't handle keywords args
479 if func.__code__.co_argcount == 0:
486 if func.__code__.co_argcount == 0:
480 cache = []
487 cache = []
481 def f():
488 def f():
482 if len(cache) == 0:
489 if len(cache) == 0:
483 cache.append(func())
490 cache.append(func())
484 return cache[0]
491 return cache[0]
485 return f
492 return f
486 cache = {}
493 cache = {}
487 if func.__code__.co_argcount == 1:
494 if func.__code__.co_argcount == 1:
488 # we gain a small amount of time because
495 # we gain a small amount of time because
489 # we don't need to pack/unpack the list
496 # we don't need to pack/unpack the list
490 def f(arg):
497 def f(arg):
491 if arg not in cache:
498 if arg not in cache:
492 cache[arg] = func(arg)
499 cache[arg] = func(arg)
493 return cache[arg]
500 return cache[arg]
494 else:
501 else:
495 def f(*args):
502 def f(*args):
496 if args not in cache:
503 if args not in cache:
497 cache[args] = func(*args)
504 cache[args] = func(*args)
498 return cache[args]
505 return cache[args]
499
506
500 return f
507 return f
501
508
502 class sortdict(dict):
509 class sortdict(dict):
503 '''a simple sorted dictionary'''
510 '''a simple sorted dictionary'''
504 def __init__(self, data=None):
511 def __init__(self, data=None):
505 self._list = []
512 self._list = []
506 if data:
513 if data:
507 self.update(data)
514 self.update(data)
508 def copy(self):
515 def copy(self):
509 return sortdict(self)
516 return sortdict(self)
510 def __setitem__(self, key, val):
517 def __setitem__(self, key, val):
511 if key in self:
518 if key in self:
512 self._list.remove(key)
519 self._list.remove(key)
513 self._list.append(key)
520 self._list.append(key)
514 dict.__setitem__(self, key, val)
521 dict.__setitem__(self, key, val)
515 def __iter__(self):
522 def __iter__(self):
516 return self._list.__iter__()
523 return self._list.__iter__()
517 def update(self, src):
524 def update(self, src):
518 if isinstance(src, dict):
525 if isinstance(src, dict):
519 src = src.iteritems()
526 src = src.iteritems()
520 for k, v in src:
527 for k, v in src:
521 self[k] = v
528 self[k] = v
522 def clear(self):
529 def clear(self):
523 dict.clear(self)
530 dict.clear(self)
524 self._list = []
531 self._list = []
525 def items(self):
532 def items(self):
526 return [(k, self[k]) for k in self._list]
533 return [(k, self[k]) for k in self._list]
527 def __delitem__(self, key):
534 def __delitem__(self, key):
528 dict.__delitem__(self, key)
535 dict.__delitem__(self, key)
529 self._list.remove(key)
536 self._list.remove(key)
530 def pop(self, key, *args, **kwargs):
537 def pop(self, key, *args, **kwargs):
531 dict.pop(self, key, *args, **kwargs)
538 dict.pop(self, key, *args, **kwargs)
532 try:
539 try:
533 self._list.remove(key)
540 self._list.remove(key)
534 except ValueError:
541 except ValueError:
535 pass
542 pass
536 def keys(self):
543 def keys(self):
537 return self._list
544 return self._list
538 def iterkeys(self):
545 def iterkeys(self):
539 return self._list.__iter__()
546 return self._list.__iter__()
540 def iteritems(self):
547 def iteritems(self):
541 for k in self._list:
548 for k in self._list:
542 yield k, self[k]
549 yield k, self[k]
543 def insert(self, index, key, val):
550 def insert(self, index, key, val):
544 self._list.insert(index, key)
551 self._list.insert(index, key)
545 dict.__setitem__(self, key, val)
552 dict.__setitem__(self, key, val)
546 def __repr__(self):
553 def __repr__(self):
547 if not self:
554 if not self:
548 return '%s()' % self.__class__.__name__
555 return '%s()' % self.__class__.__name__
549 return '%s(%r)' % (self.__class__.__name__, self.items())
556 return '%s(%r)' % (self.__class__.__name__, self.items())
550
557
551 class _lrucachenode(object):
558 class _lrucachenode(object):
552 """A node in a doubly linked list.
559 """A node in a doubly linked list.
553
560
554 Holds a reference to nodes on either side as well as a key-value
561 Holds a reference to nodes on either side as well as a key-value
555 pair for the dictionary entry.
562 pair for the dictionary entry.
556 """
563 """
557 __slots__ = ('next', 'prev', 'key', 'value')
564 __slots__ = ('next', 'prev', 'key', 'value')
558
565
559 def __init__(self):
566 def __init__(self):
560 self.next = None
567 self.next = None
561 self.prev = None
568 self.prev = None
562
569
563 self.key = _notset
570 self.key = _notset
564 self.value = None
571 self.value = None
565
572
566 def markempty(self):
573 def markempty(self):
567 """Mark the node as emptied."""
574 """Mark the node as emptied."""
568 self.key = _notset
575 self.key = _notset
569
576
570 class lrucachedict(object):
577 class lrucachedict(object):
571 """Dict that caches most recent accesses and sets.
578 """Dict that caches most recent accesses and sets.
572
579
573 The dict consists of an actual backing dict - indexed by original
580 The dict consists of an actual backing dict - indexed by original
574 key - and a doubly linked circular list defining the order of entries in
581 key - and a doubly linked circular list defining the order of entries in
575 the cache.
582 the cache.
576
583
577 The head node is the newest entry in the cache. If the cache is full,
584 The head node is the newest entry in the cache. If the cache is full,
578 we recycle head.prev and make it the new head. Cache accesses result in
585 we recycle head.prev and make it the new head. Cache accesses result in
579 the node being moved to before the existing head and being marked as the
586 the node being moved to before the existing head and being marked as the
580 new head node.
587 new head node.
581 """
588 """
582 def __init__(self, max):
589 def __init__(self, max):
583 self._cache = {}
590 self._cache = {}
584
591
585 self._head = head = _lrucachenode()
592 self._head = head = _lrucachenode()
586 head.prev = head
593 head.prev = head
587 head.next = head
594 head.next = head
588 self._size = 1
595 self._size = 1
589 self._capacity = max
596 self._capacity = max
590
597
591 def __len__(self):
598 def __len__(self):
592 return len(self._cache)
599 return len(self._cache)
593
600
594 def __contains__(self, k):
601 def __contains__(self, k):
595 return k in self._cache
602 return k in self._cache
596
603
597 def __iter__(self):
604 def __iter__(self):
598 # We don't have to iterate in cache order, but why not.
605 # We don't have to iterate in cache order, but why not.
599 n = self._head
606 n = self._head
600 for i in range(len(self._cache)):
607 for i in range(len(self._cache)):
601 yield n.key
608 yield n.key
602 n = n.next
609 n = n.next
603
610
604 def __getitem__(self, k):
611 def __getitem__(self, k):
605 node = self._cache[k]
612 node = self._cache[k]
606 self._movetohead(node)
613 self._movetohead(node)
607 return node.value
614 return node.value
608
615
609 def __setitem__(self, k, v):
616 def __setitem__(self, k, v):
610 node = self._cache.get(k)
617 node = self._cache.get(k)
611 # Replace existing value and mark as newest.
618 # Replace existing value and mark as newest.
612 if node is not None:
619 if node is not None:
613 node.value = v
620 node.value = v
614 self._movetohead(node)
621 self._movetohead(node)
615 return
622 return
616
623
617 if self._size < self._capacity:
624 if self._size < self._capacity:
618 node = self._addcapacity()
625 node = self._addcapacity()
619 else:
626 else:
620 # Grab the last/oldest item.
627 # Grab the last/oldest item.
621 node = self._head.prev
628 node = self._head.prev
622
629
623 # At capacity. Kill the old entry.
630 # At capacity. Kill the old entry.
624 if node.key is not _notset:
631 if node.key is not _notset:
625 del self._cache[node.key]
632 del self._cache[node.key]
626
633
627 node.key = k
634 node.key = k
628 node.value = v
635 node.value = v
629 self._cache[k] = node
636 self._cache[k] = node
630 # And mark it as newest entry. No need to adjust order since it
637 # And mark it as newest entry. No need to adjust order since it
631 # is already self._head.prev.
638 # is already self._head.prev.
632 self._head = node
639 self._head = node
633
640
634 def __delitem__(self, k):
641 def __delitem__(self, k):
635 node = self._cache.pop(k)
642 node = self._cache.pop(k)
636 node.markempty()
643 node.markempty()
637
644
638 # Temporarily mark as newest item before re-adjusting head to make
645 # Temporarily mark as newest item before re-adjusting head to make
639 # this node the oldest item.
646 # this node the oldest item.
640 self._movetohead(node)
647 self._movetohead(node)
641 self._head = node.next
648 self._head = node.next
642
649
643 # Additional dict methods.
650 # Additional dict methods.
644
651
645 def get(self, k, default=None):
652 def get(self, k, default=None):
646 try:
653 try:
647 return self._cache[k]
654 return self._cache[k]
648 except KeyError:
655 except KeyError:
649 return default
656 return default
650
657
651 def clear(self):
658 def clear(self):
652 n = self._head
659 n = self._head
653 while n.key is not _notset:
660 while n.key is not _notset:
654 n.markempty()
661 n.markempty()
655 n = n.next
662 n = n.next
656
663
657 self._cache.clear()
664 self._cache.clear()
658
665
659 def copy(self):
666 def copy(self):
660 result = lrucachedict(self._capacity)
667 result = lrucachedict(self._capacity)
661 n = self._head.prev
668 n = self._head.prev
662 # Iterate in oldest-to-newest order, so the copy has the right ordering
669 # Iterate in oldest-to-newest order, so the copy has the right ordering
663 for i in range(len(self._cache)):
670 for i in range(len(self._cache)):
664 result[n.key] = n.value
671 result[n.key] = n.value
665 n = n.prev
672 n = n.prev
666 return result
673 return result
667
674
668 def _movetohead(self, node):
675 def _movetohead(self, node):
669 """Mark a node as the newest, making it the new head.
676 """Mark a node as the newest, making it the new head.
670
677
671 When a node is accessed, it becomes the freshest entry in the LRU
678 When a node is accessed, it becomes the freshest entry in the LRU
672 list, which is denoted by self._head.
679 list, which is denoted by self._head.
673
680
674 Visually, let's make ``N`` the new head node (* denotes head):
681 Visually, let's make ``N`` the new head node (* denotes head):
675
682
676 previous/oldest <-> head <-> next/next newest
683 previous/oldest <-> head <-> next/next newest
677
684
678 ----<->--- A* ---<->-----
685 ----<->--- A* ---<->-----
679 | |
686 | |
680 E <-> D <-> N <-> C <-> B
687 E <-> D <-> N <-> C <-> B
681
688
682 To:
689 To:
683
690
684 ----<->--- N* ---<->-----
691 ----<->--- N* ---<->-----
685 | |
692 | |
686 E <-> D <-> C <-> B <-> A
693 E <-> D <-> C <-> B <-> A
687
694
688 This requires the following moves:
695 This requires the following moves:
689
696
690 C.next = D (node.prev.next = node.next)
697 C.next = D (node.prev.next = node.next)
691 D.prev = C (node.next.prev = node.prev)
698 D.prev = C (node.next.prev = node.prev)
692 E.next = N (head.prev.next = node)
699 E.next = N (head.prev.next = node)
693 N.prev = E (node.prev = head.prev)
700 N.prev = E (node.prev = head.prev)
694 N.next = A (node.next = head)
701 N.next = A (node.next = head)
695 A.prev = N (head.prev = node)
702 A.prev = N (head.prev = node)
696 """
703 """
697 head = self._head
704 head = self._head
698 # C.next = D
705 # C.next = D
699 node.prev.next = node.next
706 node.prev.next = node.next
700 # D.prev = C
707 # D.prev = C
701 node.next.prev = node.prev
708 node.next.prev = node.prev
702 # N.prev = E
709 # N.prev = E
703 node.prev = head.prev
710 node.prev = head.prev
704 # N.next = A
711 # N.next = A
705 # It is tempting to do just "head" here, however if node is
712 # It is tempting to do just "head" here, however if node is
706 # adjacent to head, this will do bad things.
713 # adjacent to head, this will do bad things.
707 node.next = head.prev.next
714 node.next = head.prev.next
708 # E.next = N
715 # E.next = N
709 node.next.prev = node
716 node.next.prev = node
710 # A.prev = N
717 # A.prev = N
711 node.prev.next = node
718 node.prev.next = node
712
719
713 self._head = node
720 self._head = node
714
721
715 def _addcapacity(self):
722 def _addcapacity(self):
716 """Add a node to the circular linked list.
723 """Add a node to the circular linked list.
717
724
718 The new node is inserted before the head node.
725 The new node is inserted before the head node.
719 """
726 """
720 head = self._head
727 head = self._head
721 node = _lrucachenode()
728 node = _lrucachenode()
722 head.prev.next = node
729 head.prev.next = node
723 node.prev = head.prev
730 node.prev = head.prev
724 node.next = head
731 node.next = head
725 head.prev = node
732 head.prev = node
726 self._size += 1
733 self._size += 1
727 return node
734 return node
728
735
729 def lrucachefunc(func):
736 def lrucachefunc(func):
730 '''cache most recent results of function calls'''
737 '''cache most recent results of function calls'''
731 cache = {}
738 cache = {}
732 order = collections.deque()
739 order = collections.deque()
733 if func.__code__.co_argcount == 1:
740 if func.__code__.co_argcount == 1:
734 def f(arg):
741 def f(arg):
735 if arg not in cache:
742 if arg not in cache:
736 if len(cache) > 20:
743 if len(cache) > 20:
737 del cache[order.popleft()]
744 del cache[order.popleft()]
738 cache[arg] = func(arg)
745 cache[arg] = func(arg)
739 else:
746 else:
740 order.remove(arg)
747 order.remove(arg)
741 order.append(arg)
748 order.append(arg)
742 return cache[arg]
749 return cache[arg]
743 else:
750 else:
744 def f(*args):
751 def f(*args):
745 if args not in cache:
752 if args not in cache:
746 if len(cache) > 20:
753 if len(cache) > 20:
747 del cache[order.popleft()]
754 del cache[order.popleft()]
748 cache[args] = func(*args)
755 cache[args] = func(*args)
749 else:
756 else:
750 order.remove(args)
757 order.remove(args)
751 order.append(args)
758 order.append(args)
752 return cache[args]
759 return cache[args]
753
760
754 return f
761 return f
755
762
756 class propertycache(object):
763 class propertycache(object):
757 def __init__(self, func):
764 def __init__(self, func):
758 self.func = func
765 self.func = func
759 self.name = func.__name__
766 self.name = func.__name__
760 def __get__(self, obj, type=None):
767 def __get__(self, obj, type=None):
761 result = self.func(obj)
768 result = self.func(obj)
762 self.cachevalue(obj, result)
769 self.cachevalue(obj, result)
763 return result
770 return result
764
771
765 def cachevalue(self, obj, value):
772 def cachevalue(self, obj, value):
766 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
773 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
767 obj.__dict__[self.name] = value
774 obj.__dict__[self.name] = value
768
775
769 def pipefilter(s, cmd):
776 def pipefilter(s, cmd):
770 '''filter string S through command CMD, returning its output'''
777 '''filter string S through command CMD, returning its output'''
771 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
778 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
772 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
779 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
773 pout, perr = p.communicate(s)
780 pout, perr = p.communicate(s)
774 return pout
781 return pout
775
782
776 def tempfilter(s, cmd):
783 def tempfilter(s, cmd):
777 '''filter string S through a pair of temporary files with CMD.
784 '''filter string S through a pair of temporary files with CMD.
778 CMD is used as a template to create the real command to be run,
785 CMD is used as a template to create the real command to be run,
779 with the strings INFILE and OUTFILE replaced by the real names of
786 with the strings INFILE and OUTFILE replaced by the real names of
780 the temporary files generated.'''
787 the temporary files generated.'''
781 inname, outname = None, None
788 inname, outname = None, None
782 try:
789 try:
783 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
790 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
784 fp = os.fdopen(infd, 'wb')
791 fp = os.fdopen(infd, 'wb')
785 fp.write(s)
792 fp.write(s)
786 fp.close()
793 fp.close()
787 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
794 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
788 os.close(outfd)
795 os.close(outfd)
789 cmd = cmd.replace('INFILE', inname)
796 cmd = cmd.replace('INFILE', inname)
790 cmd = cmd.replace('OUTFILE', outname)
797 cmd = cmd.replace('OUTFILE', outname)
791 code = os.system(cmd)
798 code = os.system(cmd)
792 if sys.platform == 'OpenVMS' and code & 1:
799 if sys.platform == 'OpenVMS' and code & 1:
793 code = 0
800 code = 0
794 if code:
801 if code:
795 raise Abort(_("command '%s' failed: %s") %
802 raise Abort(_("command '%s' failed: %s") %
796 (cmd, explainexit(code)))
803 (cmd, explainexit(code)))
797 return readfile(outname)
804 return readfile(outname)
798 finally:
805 finally:
799 try:
806 try:
800 if inname:
807 if inname:
801 os.unlink(inname)
808 os.unlink(inname)
802 except OSError:
809 except OSError:
803 pass
810 pass
804 try:
811 try:
805 if outname:
812 if outname:
806 os.unlink(outname)
813 os.unlink(outname)
807 except OSError:
814 except OSError:
808 pass
815 pass
809
816
810 filtertable = {
817 filtertable = {
811 'tempfile:': tempfilter,
818 'tempfile:': tempfilter,
812 'pipe:': pipefilter,
819 'pipe:': pipefilter,
813 }
820 }
814
821
815 def filter(s, cmd):
822 def filter(s, cmd):
816 "filter a string through a command that transforms its input to its output"
823 "filter a string through a command that transforms its input to its output"
817 for name, fn in filtertable.iteritems():
824 for name, fn in filtertable.iteritems():
818 if cmd.startswith(name):
825 if cmd.startswith(name):
819 return fn(s, cmd[len(name):].lstrip())
826 return fn(s, cmd[len(name):].lstrip())
820 return pipefilter(s, cmd)
827 return pipefilter(s, cmd)
821
828
822 def binary(s):
829 def binary(s):
823 """return true if a string is binary data"""
830 """return true if a string is binary data"""
824 return bool(s and '\0' in s)
831 return bool(s and '\0' in s)
825
832
826 def increasingchunks(source, min=1024, max=65536):
833 def increasingchunks(source, min=1024, max=65536):
827 '''return no less than min bytes per chunk while data remains,
834 '''return no less than min bytes per chunk while data remains,
828 doubling min after each chunk until it reaches max'''
835 doubling min after each chunk until it reaches max'''
829 def log2(x):
836 def log2(x):
830 if not x:
837 if not x:
831 return 0
838 return 0
832 i = 0
839 i = 0
833 while x:
840 while x:
834 x >>= 1
841 x >>= 1
835 i += 1
842 i += 1
836 return i - 1
843 return i - 1
837
844
838 buf = []
845 buf = []
839 blen = 0
846 blen = 0
840 for chunk in source:
847 for chunk in source:
841 buf.append(chunk)
848 buf.append(chunk)
842 blen += len(chunk)
849 blen += len(chunk)
843 if blen >= min:
850 if blen >= min:
844 if min < max:
851 if min < max:
845 min = min << 1
852 min = min << 1
846 nmin = 1 << log2(blen)
853 nmin = 1 << log2(blen)
847 if nmin > min:
854 if nmin > min:
848 min = nmin
855 min = nmin
849 if min > max:
856 if min > max:
850 min = max
857 min = max
851 yield ''.join(buf)
858 yield ''.join(buf)
852 blen = 0
859 blen = 0
853 buf = []
860 buf = []
854 if buf:
861 if buf:
855 yield ''.join(buf)
862 yield ''.join(buf)
856
863
857 Abort = error.Abort
864 Abort = error.Abort
858
865
859 def always(fn):
866 def always(fn):
860 return True
867 return True
861
868
862 def never(fn):
869 def never(fn):
863 return False
870 return False
864
871
865 def nogc(func):
872 def nogc(func):
866 """disable garbage collector
873 """disable garbage collector
867
874
868 Python's garbage collector triggers a GC each time a certain number of
875 Python's garbage collector triggers a GC each time a certain number of
869 container objects (the number being defined by gc.get_threshold()) are
876 container objects (the number being defined by gc.get_threshold()) are
870 allocated even when marked not to be tracked by the collector. Tracking has
877 allocated even when marked not to be tracked by the collector. Tracking has
871 no effect on when GCs are triggered, only on what objects the GC looks
878 no effect on when GCs are triggered, only on what objects the GC looks
872 into. As a workaround, disable GC while building complex (huge)
879 into. As a workaround, disable GC while building complex (huge)
873 containers.
880 containers.
874
881
875 This garbage collector issue have been fixed in 2.7.
882 This garbage collector issue have been fixed in 2.7.
876 """
883 """
877 def wrapper(*args, **kwargs):
884 def wrapper(*args, **kwargs):
878 gcenabled = gc.isenabled()
885 gcenabled = gc.isenabled()
879 gc.disable()
886 gc.disable()
880 try:
887 try:
881 return func(*args, **kwargs)
888 return func(*args, **kwargs)
882 finally:
889 finally:
883 if gcenabled:
890 if gcenabled:
884 gc.enable()
891 gc.enable()
885 return wrapper
892 return wrapper
886
893
887 def pathto(root, n1, n2):
894 def pathto(root, n1, n2):
888 '''return the relative path from one place to another.
895 '''return the relative path from one place to another.
889 root should use os.sep to separate directories
896 root should use os.sep to separate directories
890 n1 should use os.sep to separate directories
897 n1 should use os.sep to separate directories
891 n2 should use "/" to separate directories
898 n2 should use "/" to separate directories
892 returns an os.sep-separated path.
899 returns an os.sep-separated path.
893
900
894 If n1 is a relative path, it's assumed it's
901 If n1 is a relative path, it's assumed it's
895 relative to root.
902 relative to root.
896 n2 should always be relative to root.
903 n2 should always be relative to root.
897 '''
904 '''
898 if not n1:
905 if not n1:
899 return localpath(n2)
906 return localpath(n2)
900 if os.path.isabs(n1):
907 if os.path.isabs(n1):
901 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
908 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
902 return os.path.join(root, localpath(n2))
909 return os.path.join(root, localpath(n2))
903 n2 = '/'.join((pconvert(root), n2))
910 n2 = '/'.join((pconvert(root), n2))
904 a, b = splitpath(n1), n2.split('/')
911 a, b = splitpath(n1), n2.split('/')
905 a.reverse()
912 a.reverse()
906 b.reverse()
913 b.reverse()
907 while a and b and a[-1] == b[-1]:
914 while a and b and a[-1] == b[-1]:
908 a.pop()
915 a.pop()
909 b.pop()
916 b.pop()
910 b.reverse()
917 b.reverse()
911 return os.sep.join((['..'] * len(a)) + b) or '.'
918 return os.sep.join((['..'] * len(a)) + b) or '.'
912
919
913 def mainfrozen():
920 def mainfrozen():
914 """return True if we are a frozen executable.
921 """return True if we are a frozen executable.
915
922
916 The code supports py2exe (most common, Windows only) and tools/freeze
923 The code supports py2exe (most common, Windows only) and tools/freeze
917 (portable, not much used).
924 (portable, not much used).
918 """
925 """
919 return (safehasattr(sys, "frozen") or # new py2exe
926 return (safehasattr(sys, "frozen") or # new py2exe
920 safehasattr(sys, "importers") or # old py2exe
927 safehasattr(sys, "importers") or # old py2exe
921 imp.is_frozen("__main__")) # tools/freeze
928 imp.is_frozen("__main__")) # tools/freeze
922
929
923 # the location of data files matching the source code
930 # the location of data files matching the source code
924 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
931 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
925 # executable version (py2exe) doesn't support __file__
932 # executable version (py2exe) doesn't support __file__
926 datapath = os.path.dirname(sys.executable)
933 datapath = os.path.dirname(sys.executable)
927 else:
934 else:
928 datapath = os.path.dirname(__file__)
935 datapath = os.path.dirname(__file__)
929
936
930 i18n.setdatapath(datapath)
937 i18n.setdatapath(datapath)
931
938
932 _hgexecutable = None
939 _hgexecutable = None
933
940
934 def hgexecutable():
941 def hgexecutable():
935 """return location of the 'hg' executable.
942 """return location of the 'hg' executable.
936
943
937 Defaults to $HG or 'hg' in the search path.
944 Defaults to $HG or 'hg' in the search path.
938 """
945 """
939 if _hgexecutable is None:
946 if _hgexecutable is None:
940 hg = os.environ.get('HG')
947 hg = os.environ.get('HG')
941 mainmod = sys.modules['__main__']
948 mainmod = sys.modules['__main__']
942 if hg:
949 if hg:
943 _sethgexecutable(hg)
950 _sethgexecutable(hg)
944 elif mainfrozen():
951 elif mainfrozen():
945 if getattr(sys, 'frozen', None) == 'macosx_app':
952 if getattr(sys, 'frozen', None) == 'macosx_app':
946 # Env variable set by py2app
953 # Env variable set by py2app
947 _sethgexecutable(os.environ['EXECUTABLEPATH'])
954 _sethgexecutable(os.environ['EXECUTABLEPATH'])
948 else:
955 else:
949 _sethgexecutable(sys.executable)
956 _sethgexecutable(sys.executable)
950 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
957 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
951 _sethgexecutable(mainmod.__file__)
958 _sethgexecutable(mainmod.__file__)
952 else:
959 else:
953 exe = findexe('hg') or os.path.basename(sys.argv[0])
960 exe = findexe('hg') or os.path.basename(sys.argv[0])
954 _sethgexecutable(exe)
961 _sethgexecutable(exe)
955 return _hgexecutable
962 return _hgexecutable
956
963
957 def _sethgexecutable(path):
964 def _sethgexecutable(path):
958 """set location of the 'hg' executable"""
965 """set location of the 'hg' executable"""
959 global _hgexecutable
966 global _hgexecutable
960 _hgexecutable = path
967 _hgexecutable = path
961
968
962 def _isstdout(f):
969 def _isstdout(f):
963 fileno = getattr(f, 'fileno', None)
970 fileno = getattr(f, 'fileno', None)
964 return fileno and fileno() == sys.__stdout__.fileno()
971 return fileno and fileno() == sys.__stdout__.fileno()
965
972
966 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
973 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
967 '''enhanced shell command execution.
974 '''enhanced shell command execution.
968 run with environment maybe modified, maybe in different dir.
975 run with environment maybe modified, maybe in different dir.
969
976
970 if command fails and onerr is None, return status, else raise onerr
977 if command fails and onerr is None, return status, else raise onerr
971 object as exception.
978 object as exception.
972
979
973 if out is specified, it is assumed to be a file-like object that has a
980 if out is specified, it is assumed to be a file-like object that has a
974 write() method. stdout and stderr will be redirected to out.'''
981 write() method. stdout and stderr will be redirected to out.'''
975 if environ is None:
982 if environ is None:
976 environ = {}
983 environ = {}
977 try:
984 try:
978 sys.stdout.flush()
985 sys.stdout.flush()
979 except Exception:
986 except Exception:
980 pass
987 pass
981 def py2shell(val):
988 def py2shell(val):
982 'convert python object into string that is useful to shell'
989 'convert python object into string that is useful to shell'
983 if val is None or val is False:
990 if val is None or val is False:
984 return '0'
991 return '0'
985 if val is True:
992 if val is True:
986 return '1'
993 return '1'
987 return str(val)
994 return str(val)
988 origcmd = cmd
995 origcmd = cmd
989 cmd = quotecommand(cmd)
996 cmd = quotecommand(cmd)
990 if sys.platform == 'plan9' and (sys.version_info[0] == 2
997 if sys.platform == 'plan9' and (sys.version_info[0] == 2
991 and sys.version_info[1] < 7):
998 and sys.version_info[1] < 7):
992 # subprocess kludge to work around issues in half-baked Python
999 # subprocess kludge to work around issues in half-baked Python
993 # ports, notably bichued/python:
1000 # ports, notably bichued/python:
994 if not cwd is None:
1001 if not cwd is None:
995 os.chdir(cwd)
1002 os.chdir(cwd)
996 rc = os.system(cmd)
1003 rc = os.system(cmd)
997 else:
1004 else:
998 env = dict(os.environ)
1005 env = dict(os.environ)
999 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1006 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1000 env['HG'] = hgexecutable()
1007 env['HG'] = hgexecutable()
1001 if out is None or _isstdout(out):
1008 if out is None or _isstdout(out):
1002 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1009 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1003 env=env, cwd=cwd)
1010 env=env, cwd=cwd)
1004 else:
1011 else:
1005 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1012 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1006 env=env, cwd=cwd, stdout=subprocess.PIPE,
1013 env=env, cwd=cwd, stdout=subprocess.PIPE,
1007 stderr=subprocess.STDOUT)
1014 stderr=subprocess.STDOUT)
1008 while True:
1015 while True:
1009 line = proc.stdout.readline()
1016 line = proc.stdout.readline()
1010 if not line:
1017 if not line:
1011 break
1018 break
1012 out.write(line)
1019 out.write(line)
1013 proc.wait()
1020 proc.wait()
1014 rc = proc.returncode
1021 rc = proc.returncode
1015 if sys.platform == 'OpenVMS' and rc & 1:
1022 if sys.platform == 'OpenVMS' and rc & 1:
1016 rc = 0
1023 rc = 0
1017 if rc and onerr:
1024 if rc and onerr:
1018 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1025 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1019 explainexit(rc)[0])
1026 explainexit(rc)[0])
1020 if errprefix:
1027 if errprefix:
1021 errmsg = '%s: %s' % (errprefix, errmsg)
1028 errmsg = '%s: %s' % (errprefix, errmsg)
1022 raise onerr(errmsg)
1029 raise onerr(errmsg)
1023 return rc
1030 return rc
1024
1031
1025 def checksignature(func):
1032 def checksignature(func):
1026 '''wrap a function with code to check for calling errors'''
1033 '''wrap a function with code to check for calling errors'''
1027 def check(*args, **kwargs):
1034 def check(*args, **kwargs):
1028 try:
1035 try:
1029 return func(*args, **kwargs)
1036 return func(*args, **kwargs)
1030 except TypeError:
1037 except TypeError:
1031 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1038 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1032 raise error.SignatureError
1039 raise error.SignatureError
1033 raise
1040 raise
1034
1041
1035 return check
1042 return check
1036
1043
1037 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1044 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1038 '''copy a file, preserving mode and optionally other stat info like
1045 '''copy a file, preserving mode and optionally other stat info like
1039 atime/mtime
1046 atime/mtime
1040
1047
1041 checkambig argument is used with filestat, and is useful only if
1048 checkambig argument is used with filestat, and is useful only if
1042 destination file is guarded by any lock (e.g. repo.lock or
1049 destination file is guarded by any lock (e.g. repo.lock or
1043 repo.wlock).
1050 repo.wlock).
1044
1051
1045 copystat and checkambig should be exclusive.
1052 copystat and checkambig should be exclusive.
1046 '''
1053 '''
1047 assert not (copystat and checkambig)
1054 assert not (copystat and checkambig)
1048 oldstat = None
1055 oldstat = None
1049 if os.path.lexists(dest):
1056 if os.path.lexists(dest):
1050 if checkambig:
1057 if checkambig:
1051 oldstat = checkambig and filestat(dest)
1058 oldstat = checkambig and filestat(dest)
1052 unlink(dest)
1059 unlink(dest)
1053 # hardlinks are problematic on CIFS, quietly ignore this flag
1060 # hardlinks are problematic on CIFS, quietly ignore this flag
1054 # until we find a way to work around it cleanly (issue4546)
1061 # until we find a way to work around it cleanly (issue4546)
1055 if False and hardlink:
1062 if False and hardlink:
1056 try:
1063 try:
1057 oslink(src, dest)
1064 oslink(src, dest)
1058 return
1065 return
1059 except (IOError, OSError):
1066 except (IOError, OSError):
1060 pass # fall back to normal copy
1067 pass # fall back to normal copy
1061 if os.path.islink(src):
1068 if os.path.islink(src):
1062 os.symlink(os.readlink(src), dest)
1069 os.symlink(os.readlink(src), dest)
1063 # copytime is ignored for symlinks, but in general copytime isn't needed
1070 # copytime is ignored for symlinks, but in general copytime isn't needed
1064 # for them anyway
1071 # for them anyway
1065 else:
1072 else:
1066 try:
1073 try:
1067 shutil.copyfile(src, dest)
1074 shutil.copyfile(src, dest)
1068 if copystat:
1075 if copystat:
1069 # copystat also copies mode
1076 # copystat also copies mode
1070 shutil.copystat(src, dest)
1077 shutil.copystat(src, dest)
1071 else:
1078 else:
1072 shutil.copymode(src, dest)
1079 shutil.copymode(src, dest)
1073 if oldstat and oldstat.stat:
1080 if oldstat and oldstat.stat:
1074 newstat = filestat(dest)
1081 newstat = filestat(dest)
1075 if newstat.isambig(oldstat):
1082 if newstat.isambig(oldstat):
1076 # stat of copied file is ambiguous to original one
1083 # stat of copied file is ambiguous to original one
1077 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1084 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1078 os.utime(dest, (advanced, advanced))
1085 os.utime(dest, (advanced, advanced))
1079 except shutil.Error as inst:
1086 except shutil.Error as inst:
1080 raise Abort(str(inst))
1087 raise Abort(str(inst))
1081
1088
1082 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1089 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1083 """Copy a directory tree using hardlinks if possible."""
1090 """Copy a directory tree using hardlinks if possible."""
1084 num = 0
1091 num = 0
1085
1092
1086 if hardlink is None:
1093 if hardlink is None:
1087 hardlink = (os.stat(src).st_dev ==
1094 hardlink = (os.stat(src).st_dev ==
1088 os.stat(os.path.dirname(dst)).st_dev)
1095 os.stat(os.path.dirname(dst)).st_dev)
1089 if hardlink:
1096 if hardlink:
1090 topic = _('linking')
1097 topic = _('linking')
1091 else:
1098 else:
1092 topic = _('copying')
1099 topic = _('copying')
1093
1100
1094 if os.path.isdir(src):
1101 if os.path.isdir(src):
1095 os.mkdir(dst)
1102 os.mkdir(dst)
1096 for name, kind in osutil.listdir(src):
1103 for name, kind in osutil.listdir(src):
1097 srcname = os.path.join(src, name)
1104 srcname = os.path.join(src, name)
1098 dstname = os.path.join(dst, name)
1105 dstname = os.path.join(dst, name)
1099 def nprog(t, pos):
1106 def nprog(t, pos):
1100 if pos is not None:
1107 if pos is not None:
1101 return progress(t, pos + num)
1108 return progress(t, pos + num)
1102 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1109 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1103 num += n
1110 num += n
1104 else:
1111 else:
1105 if hardlink:
1112 if hardlink:
1106 try:
1113 try:
1107 oslink(src, dst)
1114 oslink(src, dst)
1108 except (IOError, OSError):
1115 except (IOError, OSError):
1109 hardlink = False
1116 hardlink = False
1110 shutil.copy(src, dst)
1117 shutil.copy(src, dst)
1111 else:
1118 else:
1112 shutil.copy(src, dst)
1119 shutil.copy(src, dst)
1113 num += 1
1120 num += 1
1114 progress(topic, num)
1121 progress(topic, num)
1115 progress(topic, None)
1122 progress(topic, None)
1116
1123
1117 return hardlink, num
1124 return hardlink, num
1118
1125
1119 _winreservednames = '''con prn aux nul
1126 _winreservednames = '''con prn aux nul
1120 com1 com2 com3 com4 com5 com6 com7 com8 com9
1127 com1 com2 com3 com4 com5 com6 com7 com8 com9
1121 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1128 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1122 _winreservedchars = ':*?"<>|'
1129 _winreservedchars = ':*?"<>|'
1123 def checkwinfilename(path):
1130 def checkwinfilename(path):
1124 r'''Check that the base-relative path is a valid filename on Windows.
1131 r'''Check that the base-relative path is a valid filename on Windows.
1125 Returns None if the path is ok, or a UI string describing the problem.
1132 Returns None if the path is ok, or a UI string describing the problem.
1126
1133
1127 >>> checkwinfilename("just/a/normal/path")
1134 >>> checkwinfilename("just/a/normal/path")
1128 >>> checkwinfilename("foo/bar/con.xml")
1135 >>> checkwinfilename("foo/bar/con.xml")
1129 "filename contains 'con', which is reserved on Windows"
1136 "filename contains 'con', which is reserved on Windows"
1130 >>> checkwinfilename("foo/con.xml/bar")
1137 >>> checkwinfilename("foo/con.xml/bar")
1131 "filename contains 'con', which is reserved on Windows"
1138 "filename contains 'con', which is reserved on Windows"
1132 >>> checkwinfilename("foo/bar/xml.con")
1139 >>> checkwinfilename("foo/bar/xml.con")
1133 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1140 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1134 "filename contains 'AUX', which is reserved on Windows"
1141 "filename contains 'AUX', which is reserved on Windows"
1135 >>> checkwinfilename("foo/bar/bla:.txt")
1142 >>> checkwinfilename("foo/bar/bla:.txt")
1136 "filename contains ':', which is reserved on Windows"
1143 "filename contains ':', which is reserved on Windows"
1137 >>> checkwinfilename("foo/bar/b\07la.txt")
1144 >>> checkwinfilename("foo/bar/b\07la.txt")
1138 "filename contains '\\x07', which is invalid on Windows"
1145 "filename contains '\\x07', which is invalid on Windows"
1139 >>> checkwinfilename("foo/bar/bla ")
1146 >>> checkwinfilename("foo/bar/bla ")
1140 "filename ends with ' ', which is not allowed on Windows"
1147 "filename ends with ' ', which is not allowed on Windows"
1141 >>> checkwinfilename("../bar")
1148 >>> checkwinfilename("../bar")
1142 >>> checkwinfilename("foo\\")
1149 >>> checkwinfilename("foo\\")
1143 "filename ends with '\\', which is invalid on Windows"
1150 "filename ends with '\\', which is invalid on Windows"
1144 >>> checkwinfilename("foo\\/bar")
1151 >>> checkwinfilename("foo\\/bar")
1145 "directory name ends with '\\', which is invalid on Windows"
1152 "directory name ends with '\\', which is invalid on Windows"
1146 '''
1153 '''
1147 if path.endswith('\\'):
1154 if path.endswith('\\'):
1148 return _("filename ends with '\\', which is invalid on Windows")
1155 return _("filename ends with '\\', which is invalid on Windows")
1149 if '\\/' in path:
1156 if '\\/' in path:
1150 return _("directory name ends with '\\', which is invalid on Windows")
1157 return _("directory name ends with '\\', which is invalid on Windows")
1151 for n in path.replace('\\', '/').split('/'):
1158 for n in path.replace('\\', '/').split('/'):
1152 if not n:
1159 if not n:
1153 continue
1160 continue
1154 for c in n:
1161 for c in n:
1155 if c in _winreservedchars:
1162 if c in _winreservedchars:
1156 return _("filename contains '%s', which is reserved "
1163 return _("filename contains '%s', which is reserved "
1157 "on Windows") % c
1164 "on Windows") % c
1158 if ord(c) <= 31:
1165 if ord(c) <= 31:
1159 return _("filename contains %r, which is invalid "
1166 return _("filename contains %r, which is invalid "
1160 "on Windows") % c
1167 "on Windows") % c
1161 base = n.split('.')[0]
1168 base = n.split('.')[0]
1162 if base and base.lower() in _winreservednames:
1169 if base and base.lower() in _winreservednames:
1163 return _("filename contains '%s', which is reserved "
1170 return _("filename contains '%s', which is reserved "
1164 "on Windows") % base
1171 "on Windows") % base
1165 t = n[-1]
1172 t = n[-1]
1166 if t in '. ' and n not in '..':
1173 if t in '. ' and n not in '..':
1167 return _("filename ends with '%s', which is not allowed "
1174 return _("filename ends with '%s', which is not allowed "
1168 "on Windows") % t
1175 "on Windows") % t
1169
1176
1170 if os.name == 'nt':
1177 if os.name == 'nt':
1171 checkosfilename = checkwinfilename
1178 checkosfilename = checkwinfilename
1172 else:
1179 else:
1173 checkosfilename = platform.checkosfilename
1180 checkosfilename = platform.checkosfilename
1174
1181
1175 def makelock(info, pathname):
1182 def makelock(info, pathname):
1176 try:
1183 try:
1177 return os.symlink(info, pathname)
1184 return os.symlink(info, pathname)
1178 except OSError as why:
1185 except OSError as why:
1179 if why.errno == errno.EEXIST:
1186 if why.errno == errno.EEXIST:
1180 raise
1187 raise
1181 except AttributeError: # no symlink in os
1188 except AttributeError: # no symlink in os
1182 pass
1189 pass
1183
1190
1184 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1191 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1185 os.write(ld, info)
1192 os.write(ld, info)
1186 os.close(ld)
1193 os.close(ld)
1187
1194
1188 def readlock(pathname):
1195 def readlock(pathname):
1189 try:
1196 try:
1190 return os.readlink(pathname)
1197 return os.readlink(pathname)
1191 except OSError as why:
1198 except OSError as why:
1192 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1199 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1193 raise
1200 raise
1194 except AttributeError: # no symlink in os
1201 except AttributeError: # no symlink in os
1195 pass
1202 pass
1196 fp = posixfile(pathname)
1203 fp = posixfile(pathname)
1197 r = fp.read()
1204 r = fp.read()
1198 fp.close()
1205 fp.close()
1199 return r
1206 return r
1200
1207
1201 def fstat(fp):
1208 def fstat(fp):
1202 '''stat file object that may not have fileno method.'''
1209 '''stat file object that may not have fileno method.'''
1203 try:
1210 try:
1204 return os.fstat(fp.fileno())
1211 return os.fstat(fp.fileno())
1205 except AttributeError:
1212 except AttributeError:
1206 return os.stat(fp.name)
1213 return os.stat(fp.name)
1207
1214
1208 # File system features
1215 # File system features
1209
1216
1210 def checkcase(path):
1217 def checkcase(path):
1211 """
1218 """
1212 Return true if the given path is on a case-sensitive filesystem
1219 Return true if the given path is on a case-sensitive filesystem
1213
1220
1214 Requires a path (like /foo/.hg) ending with a foldable final
1221 Requires a path (like /foo/.hg) ending with a foldable final
1215 directory component.
1222 directory component.
1216 """
1223 """
1217 s1 = os.lstat(path)
1224 s1 = os.lstat(path)
1218 d, b = os.path.split(path)
1225 d, b = os.path.split(path)
1219 b2 = b.upper()
1226 b2 = b.upper()
1220 if b == b2:
1227 if b == b2:
1221 b2 = b.lower()
1228 b2 = b.lower()
1222 if b == b2:
1229 if b == b2:
1223 return True # no evidence against case sensitivity
1230 return True # no evidence against case sensitivity
1224 p2 = os.path.join(d, b2)
1231 p2 = os.path.join(d, b2)
1225 try:
1232 try:
1226 s2 = os.lstat(p2)
1233 s2 = os.lstat(p2)
1227 if s2 == s1:
1234 if s2 == s1:
1228 return False
1235 return False
1229 return True
1236 return True
1230 except OSError:
1237 except OSError:
1231 return True
1238 return True
1232
1239
1233 try:
1240 try:
1234 import re2
1241 import re2
1235 _re2 = None
1242 _re2 = None
1236 except ImportError:
1243 except ImportError:
1237 _re2 = False
1244 _re2 = False
1238
1245
1239 class _re(object):
1246 class _re(object):
1240 def _checkre2(self):
1247 def _checkre2(self):
1241 global _re2
1248 global _re2
1242 try:
1249 try:
1243 # check if match works, see issue3964
1250 # check if match works, see issue3964
1244 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1251 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1245 except ImportError:
1252 except ImportError:
1246 _re2 = False
1253 _re2 = False
1247
1254
1248 def compile(self, pat, flags=0):
1255 def compile(self, pat, flags=0):
1249 '''Compile a regular expression, using re2 if possible
1256 '''Compile a regular expression, using re2 if possible
1250
1257
1251 For best performance, use only re2-compatible regexp features. The
1258 For best performance, use only re2-compatible regexp features. The
1252 only flags from the re module that are re2-compatible are
1259 only flags from the re module that are re2-compatible are
1253 IGNORECASE and MULTILINE.'''
1260 IGNORECASE and MULTILINE.'''
1254 if _re2 is None:
1261 if _re2 is None:
1255 self._checkre2()
1262 self._checkre2()
1256 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1263 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1257 if flags & remod.IGNORECASE:
1264 if flags & remod.IGNORECASE:
1258 pat = '(?i)' + pat
1265 pat = '(?i)' + pat
1259 if flags & remod.MULTILINE:
1266 if flags & remod.MULTILINE:
1260 pat = '(?m)' + pat
1267 pat = '(?m)' + pat
1261 try:
1268 try:
1262 return re2.compile(pat)
1269 return re2.compile(pat)
1263 except re2.error:
1270 except re2.error:
1264 pass
1271 pass
1265 return remod.compile(pat, flags)
1272 return remod.compile(pat, flags)
1266
1273
1267 @propertycache
1274 @propertycache
1268 def escape(self):
1275 def escape(self):
1269 '''Return the version of escape corresponding to self.compile.
1276 '''Return the version of escape corresponding to self.compile.
1270
1277
1271 This is imperfect because whether re2 or re is used for a particular
1278 This is imperfect because whether re2 or re is used for a particular
1272 function depends on the flags, etc, but it's the best we can do.
1279 function depends on the flags, etc, but it's the best we can do.
1273 '''
1280 '''
1274 global _re2
1281 global _re2
1275 if _re2 is None:
1282 if _re2 is None:
1276 self._checkre2()
1283 self._checkre2()
1277 if _re2:
1284 if _re2:
1278 return re2.escape
1285 return re2.escape
1279 else:
1286 else:
1280 return remod.escape
1287 return remod.escape
1281
1288
1282 re = _re()
1289 re = _re()
1283
1290
1284 _fspathcache = {}
1291 _fspathcache = {}
1285 def fspath(name, root):
1292 def fspath(name, root):
1286 '''Get name in the case stored in the filesystem
1293 '''Get name in the case stored in the filesystem
1287
1294
1288 The name should be relative to root, and be normcase-ed for efficiency.
1295 The name should be relative to root, and be normcase-ed for efficiency.
1289
1296
1290 Note that this function is unnecessary, and should not be
1297 Note that this function is unnecessary, and should not be
1291 called, for case-sensitive filesystems (simply because it's expensive).
1298 called, for case-sensitive filesystems (simply because it's expensive).
1292
1299
1293 The root should be normcase-ed, too.
1300 The root should be normcase-ed, too.
1294 '''
1301 '''
1295 def _makefspathcacheentry(dir):
1302 def _makefspathcacheentry(dir):
1296 return dict((normcase(n), n) for n in os.listdir(dir))
1303 return dict((normcase(n), n) for n in os.listdir(dir))
1297
1304
1298 seps = os.sep
1305 seps = os.sep
1299 if os.altsep:
1306 if os.altsep:
1300 seps = seps + os.altsep
1307 seps = seps + os.altsep
1301 # Protect backslashes. This gets silly very quickly.
1308 # Protect backslashes. This gets silly very quickly.
1302 seps.replace('\\','\\\\')
1309 seps.replace('\\','\\\\')
1303 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1310 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1304 dir = os.path.normpath(root)
1311 dir = os.path.normpath(root)
1305 result = []
1312 result = []
1306 for part, sep in pattern.findall(name):
1313 for part, sep in pattern.findall(name):
1307 if sep:
1314 if sep:
1308 result.append(sep)
1315 result.append(sep)
1309 continue
1316 continue
1310
1317
1311 if dir not in _fspathcache:
1318 if dir not in _fspathcache:
1312 _fspathcache[dir] = _makefspathcacheentry(dir)
1319 _fspathcache[dir] = _makefspathcacheentry(dir)
1313 contents = _fspathcache[dir]
1320 contents = _fspathcache[dir]
1314
1321
1315 found = contents.get(part)
1322 found = contents.get(part)
1316 if not found:
1323 if not found:
1317 # retry "once per directory" per "dirstate.walk" which
1324 # retry "once per directory" per "dirstate.walk" which
1318 # may take place for each patches of "hg qpush", for example
1325 # may take place for each patches of "hg qpush", for example
1319 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1326 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1320 found = contents.get(part)
1327 found = contents.get(part)
1321
1328
1322 result.append(found or part)
1329 result.append(found or part)
1323 dir = os.path.join(dir, part)
1330 dir = os.path.join(dir, part)
1324
1331
1325 return ''.join(result)
1332 return ''.join(result)
1326
1333
1327 def checknlink(testfile):
1334 def checknlink(testfile):
1328 '''check whether hardlink count reporting works properly'''
1335 '''check whether hardlink count reporting works properly'''
1329
1336
1330 # testfile may be open, so we need a separate file for checking to
1337 # testfile may be open, so we need a separate file for checking to
1331 # work around issue2543 (or testfile may get lost on Samba shares)
1338 # work around issue2543 (or testfile may get lost on Samba shares)
1332 f1 = testfile + ".hgtmp1"
1339 f1 = testfile + ".hgtmp1"
1333 if os.path.lexists(f1):
1340 if os.path.lexists(f1):
1334 return False
1341 return False
1335 try:
1342 try:
1336 posixfile(f1, 'w').close()
1343 posixfile(f1, 'w').close()
1337 except IOError:
1344 except IOError:
1338 return False
1345 return False
1339
1346
1340 f2 = testfile + ".hgtmp2"
1347 f2 = testfile + ".hgtmp2"
1341 fd = None
1348 fd = None
1342 try:
1349 try:
1343 oslink(f1, f2)
1350 oslink(f1, f2)
1344 # nlinks() may behave differently for files on Windows shares if
1351 # nlinks() may behave differently for files on Windows shares if
1345 # the file is open.
1352 # the file is open.
1346 fd = posixfile(f2)
1353 fd = posixfile(f2)
1347 return nlinks(f2) > 1
1354 return nlinks(f2) > 1
1348 except OSError:
1355 except OSError:
1349 return False
1356 return False
1350 finally:
1357 finally:
1351 if fd is not None:
1358 if fd is not None:
1352 fd.close()
1359 fd.close()
1353 for f in (f1, f2):
1360 for f in (f1, f2):
1354 try:
1361 try:
1355 os.unlink(f)
1362 os.unlink(f)
1356 except OSError:
1363 except OSError:
1357 pass
1364 pass
1358
1365
1359 def endswithsep(path):
1366 def endswithsep(path):
1360 '''Check path ends with os.sep or os.altsep.'''
1367 '''Check path ends with os.sep or os.altsep.'''
1361 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1368 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1362
1369
1363 def splitpath(path):
1370 def splitpath(path):
1364 '''Split path by os.sep.
1371 '''Split path by os.sep.
1365 Note that this function does not use os.altsep because this is
1372 Note that this function does not use os.altsep because this is
1366 an alternative of simple "xxx.split(os.sep)".
1373 an alternative of simple "xxx.split(os.sep)".
1367 It is recommended to use os.path.normpath() before using this
1374 It is recommended to use os.path.normpath() before using this
1368 function if need.'''
1375 function if need.'''
1369 return path.split(os.sep)
1376 return path.split(os.sep)
1370
1377
1371 def gui():
1378 def gui():
1372 '''Are we running in a GUI?'''
1379 '''Are we running in a GUI?'''
1373 if sys.platform == 'darwin':
1380 if sys.platform == 'darwin':
1374 if 'SSH_CONNECTION' in os.environ:
1381 if 'SSH_CONNECTION' in os.environ:
1375 # handle SSH access to a box where the user is logged in
1382 # handle SSH access to a box where the user is logged in
1376 return False
1383 return False
1377 elif getattr(osutil, 'isgui', None):
1384 elif getattr(osutil, 'isgui', None):
1378 # check if a CoreGraphics session is available
1385 # check if a CoreGraphics session is available
1379 return osutil.isgui()
1386 return osutil.isgui()
1380 else:
1387 else:
1381 # pure build; use a safe default
1388 # pure build; use a safe default
1382 return True
1389 return True
1383 else:
1390 else:
1384 return os.name == "nt" or os.environ.get("DISPLAY")
1391 return os.name == "nt" or os.environ.get("DISPLAY")
1385
1392
1386 def mktempcopy(name, emptyok=False, createmode=None):
1393 def mktempcopy(name, emptyok=False, createmode=None):
1387 """Create a temporary file with the same contents from name
1394 """Create a temporary file with the same contents from name
1388
1395
1389 The permission bits are copied from the original file.
1396 The permission bits are copied from the original file.
1390
1397
1391 If the temporary file is going to be truncated immediately, you
1398 If the temporary file is going to be truncated immediately, you
1392 can use emptyok=True as an optimization.
1399 can use emptyok=True as an optimization.
1393
1400
1394 Returns the name of the temporary file.
1401 Returns the name of the temporary file.
1395 """
1402 """
1396 d, fn = os.path.split(name)
1403 d, fn = os.path.split(name)
1397 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1404 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1398 os.close(fd)
1405 os.close(fd)
1399 # Temporary files are created with mode 0600, which is usually not
1406 # Temporary files are created with mode 0600, which is usually not
1400 # what we want. If the original file already exists, just copy
1407 # what we want. If the original file already exists, just copy
1401 # its mode. Otherwise, manually obey umask.
1408 # its mode. Otherwise, manually obey umask.
1402 copymode(name, temp, createmode)
1409 copymode(name, temp, createmode)
1403 if emptyok:
1410 if emptyok:
1404 return temp
1411 return temp
1405 try:
1412 try:
1406 try:
1413 try:
1407 ifp = posixfile(name, "rb")
1414 ifp = posixfile(name, "rb")
1408 except IOError as inst:
1415 except IOError as inst:
1409 if inst.errno == errno.ENOENT:
1416 if inst.errno == errno.ENOENT:
1410 return temp
1417 return temp
1411 if not getattr(inst, 'filename', None):
1418 if not getattr(inst, 'filename', None):
1412 inst.filename = name
1419 inst.filename = name
1413 raise
1420 raise
1414 ofp = posixfile(temp, "wb")
1421 ofp = posixfile(temp, "wb")
1415 for chunk in filechunkiter(ifp):
1422 for chunk in filechunkiter(ifp):
1416 ofp.write(chunk)
1423 ofp.write(chunk)
1417 ifp.close()
1424 ifp.close()
1418 ofp.close()
1425 ofp.close()
1419 except: # re-raises
1426 except: # re-raises
1420 try: os.unlink(temp)
1427 try: os.unlink(temp)
1421 except OSError: pass
1428 except OSError: pass
1422 raise
1429 raise
1423 return temp
1430 return temp
1424
1431
1425 class filestat(object):
1432 class filestat(object):
1426 """help to exactly detect change of a file
1433 """help to exactly detect change of a file
1427
1434
1428 'stat' attribute is result of 'os.stat()' if specified 'path'
1435 'stat' attribute is result of 'os.stat()' if specified 'path'
1429 exists. Otherwise, it is None. This can avoid preparative
1436 exists. Otherwise, it is None. This can avoid preparative
1430 'exists()' examination on client side of this class.
1437 'exists()' examination on client side of this class.
1431 """
1438 """
1432 def __init__(self, path):
1439 def __init__(self, path):
1433 try:
1440 try:
1434 self.stat = os.stat(path)
1441 self.stat = os.stat(path)
1435 except OSError as err:
1442 except OSError as err:
1436 if err.errno != errno.ENOENT:
1443 if err.errno != errno.ENOENT:
1437 raise
1444 raise
1438 self.stat = None
1445 self.stat = None
1439
1446
1440 __hash__ = object.__hash__
1447 __hash__ = object.__hash__
1441
1448
1442 def __eq__(self, old):
1449 def __eq__(self, old):
1443 try:
1450 try:
1444 # if ambiguity between stat of new and old file is
1451 # if ambiguity between stat of new and old file is
1445 # avoided, comparision of size, ctime and mtime is enough
1452 # avoided, comparision of size, ctime and mtime is enough
1446 # to exactly detect change of a file regardless of platform
1453 # to exactly detect change of a file regardless of platform
1447 return (self.stat.st_size == old.stat.st_size and
1454 return (self.stat.st_size == old.stat.st_size and
1448 self.stat.st_ctime == old.stat.st_ctime and
1455 self.stat.st_ctime == old.stat.st_ctime and
1449 self.stat.st_mtime == old.stat.st_mtime)
1456 self.stat.st_mtime == old.stat.st_mtime)
1450 except AttributeError:
1457 except AttributeError:
1451 return False
1458 return False
1452
1459
1453 def isambig(self, old):
1460 def isambig(self, old):
1454 """Examine whether new (= self) stat is ambiguous against old one
1461 """Examine whether new (= self) stat is ambiguous against old one
1455
1462
1456 "S[N]" below means stat of a file at N-th change:
1463 "S[N]" below means stat of a file at N-th change:
1457
1464
1458 - S[n-1].ctime < S[n].ctime: can detect change of a file
1465 - S[n-1].ctime < S[n].ctime: can detect change of a file
1459 - S[n-1].ctime == S[n].ctime
1466 - S[n-1].ctime == S[n].ctime
1460 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1467 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1461 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1468 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1462 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1469 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1463 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1470 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1464
1471
1465 Case (*2) above means that a file was changed twice or more at
1472 Case (*2) above means that a file was changed twice or more at
1466 same time in sec (= S[n-1].ctime), and comparison of timestamp
1473 same time in sec (= S[n-1].ctime), and comparison of timestamp
1467 is ambiguous.
1474 is ambiguous.
1468
1475
1469 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1476 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1470 timestamp is ambiguous".
1477 timestamp is ambiguous".
1471
1478
1472 But advancing mtime only in case (*2) doesn't work as
1479 But advancing mtime only in case (*2) doesn't work as
1473 expected, because naturally advanced S[n].mtime in case (*1)
1480 expected, because naturally advanced S[n].mtime in case (*1)
1474 might be equal to manually advanced S[n-1 or earlier].mtime.
1481 might be equal to manually advanced S[n-1 or earlier].mtime.
1475
1482
1476 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1483 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1477 treated as ambiguous regardless of mtime, to avoid overlooking
1484 treated as ambiguous regardless of mtime, to avoid overlooking
1478 by confliction between such mtime.
1485 by confliction between such mtime.
1479
1486
1480 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1487 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1481 S[n].mtime", even if size of a file isn't changed.
1488 S[n].mtime", even if size of a file isn't changed.
1482 """
1489 """
1483 try:
1490 try:
1484 return (self.stat.st_ctime == old.stat.st_ctime)
1491 return (self.stat.st_ctime == old.stat.st_ctime)
1485 except AttributeError:
1492 except AttributeError:
1486 return False
1493 return False
1487
1494
1488 def __ne__(self, other):
1495 def __ne__(self, other):
1489 return not self == other
1496 return not self == other
1490
1497
1491 class atomictempfile(object):
1498 class atomictempfile(object):
1492 '''writable file object that atomically updates a file
1499 '''writable file object that atomically updates a file
1493
1500
1494 All writes will go to a temporary copy of the original file. Call
1501 All writes will go to a temporary copy of the original file. Call
1495 close() when you are done writing, and atomictempfile will rename
1502 close() when you are done writing, and atomictempfile will rename
1496 the temporary copy to the original name, making the changes
1503 the temporary copy to the original name, making the changes
1497 visible. If the object is destroyed without being closed, all your
1504 visible. If the object is destroyed without being closed, all your
1498 writes are discarded.
1505 writes are discarded.
1499
1506
1500 checkambig argument of constructor is used with filestat, and is
1507 checkambig argument of constructor is used with filestat, and is
1501 useful only if target file is guarded by any lock (e.g. repo.lock
1508 useful only if target file is guarded by any lock (e.g. repo.lock
1502 or repo.wlock).
1509 or repo.wlock).
1503 '''
1510 '''
1504 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1511 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1505 self.__name = name # permanent name
1512 self.__name = name # permanent name
1506 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1513 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1507 createmode=createmode)
1514 createmode=createmode)
1508 self._fp = posixfile(self._tempname, mode)
1515 self._fp = posixfile(self._tempname, mode)
1509 self._checkambig = checkambig
1516 self._checkambig = checkambig
1510
1517
1511 # delegated methods
1518 # delegated methods
1512 self.read = self._fp.read
1519 self.read = self._fp.read
1513 self.write = self._fp.write
1520 self.write = self._fp.write
1514 self.seek = self._fp.seek
1521 self.seek = self._fp.seek
1515 self.tell = self._fp.tell
1522 self.tell = self._fp.tell
1516 self.fileno = self._fp.fileno
1523 self.fileno = self._fp.fileno
1517
1524
1518 def close(self):
1525 def close(self):
1519 if not self._fp.closed:
1526 if not self._fp.closed:
1520 self._fp.close()
1527 self._fp.close()
1521 filename = localpath(self.__name)
1528 filename = localpath(self.__name)
1522 oldstat = self._checkambig and filestat(filename)
1529 oldstat = self._checkambig and filestat(filename)
1523 if oldstat and oldstat.stat:
1530 if oldstat and oldstat.stat:
1524 rename(self._tempname, filename)
1531 rename(self._tempname, filename)
1525 newstat = filestat(filename)
1532 newstat = filestat(filename)
1526 if newstat.isambig(oldstat):
1533 if newstat.isambig(oldstat):
1527 # stat of changed file is ambiguous to original one
1534 # stat of changed file is ambiguous to original one
1528 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1535 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1529 os.utime(filename, (advanced, advanced))
1536 os.utime(filename, (advanced, advanced))
1530 else:
1537 else:
1531 rename(self._tempname, filename)
1538 rename(self._tempname, filename)
1532
1539
1533 def discard(self):
1540 def discard(self):
1534 if not self._fp.closed:
1541 if not self._fp.closed:
1535 try:
1542 try:
1536 os.unlink(self._tempname)
1543 os.unlink(self._tempname)
1537 except OSError:
1544 except OSError:
1538 pass
1545 pass
1539 self._fp.close()
1546 self._fp.close()
1540
1547
1541 def __del__(self):
1548 def __del__(self):
1542 if safehasattr(self, '_fp'): # constructor actually did something
1549 if safehasattr(self, '_fp'): # constructor actually did something
1543 self.discard()
1550 self.discard()
1544
1551
1545 def __enter__(self):
1552 def __enter__(self):
1546 return self
1553 return self
1547
1554
1548 def __exit__(self, exctype, excvalue, traceback):
1555 def __exit__(self, exctype, excvalue, traceback):
1549 if exctype is not None:
1556 if exctype is not None:
1550 self.discard()
1557 self.discard()
1551 else:
1558 else:
1552 self.close()
1559 self.close()
1553
1560
1554 def makedirs(name, mode=None, notindexed=False):
1561 def makedirs(name, mode=None, notindexed=False):
1555 """recursive directory creation with parent mode inheritance
1562 """recursive directory creation with parent mode inheritance
1556
1563
1557 Newly created directories are marked as "not to be indexed by
1564 Newly created directories are marked as "not to be indexed by
1558 the content indexing service", if ``notindexed`` is specified
1565 the content indexing service", if ``notindexed`` is specified
1559 for "write" mode access.
1566 for "write" mode access.
1560 """
1567 """
1561 try:
1568 try:
1562 makedir(name, notindexed)
1569 makedir(name, notindexed)
1563 except OSError as err:
1570 except OSError as err:
1564 if err.errno == errno.EEXIST:
1571 if err.errno == errno.EEXIST:
1565 return
1572 return
1566 if err.errno != errno.ENOENT or not name:
1573 if err.errno != errno.ENOENT or not name:
1567 raise
1574 raise
1568 parent = os.path.dirname(os.path.abspath(name))
1575 parent = os.path.dirname(os.path.abspath(name))
1569 if parent == name:
1576 if parent == name:
1570 raise
1577 raise
1571 makedirs(parent, mode, notindexed)
1578 makedirs(parent, mode, notindexed)
1572 try:
1579 try:
1573 makedir(name, notindexed)
1580 makedir(name, notindexed)
1574 except OSError as err:
1581 except OSError as err:
1575 # Catch EEXIST to handle races
1582 # Catch EEXIST to handle races
1576 if err.errno == errno.EEXIST:
1583 if err.errno == errno.EEXIST:
1577 return
1584 return
1578 raise
1585 raise
1579 if mode is not None:
1586 if mode is not None:
1580 os.chmod(name, mode)
1587 os.chmod(name, mode)
1581
1588
1582 def readfile(path):
1589 def readfile(path):
1583 with open(path, 'rb') as fp:
1590 with open(path, 'rb') as fp:
1584 return fp.read()
1591 return fp.read()
1585
1592
1586 def writefile(path, text):
1593 def writefile(path, text):
1587 with open(path, 'wb') as fp:
1594 with open(path, 'wb') as fp:
1588 fp.write(text)
1595 fp.write(text)
1589
1596
1590 def appendfile(path, text):
1597 def appendfile(path, text):
1591 with open(path, 'ab') as fp:
1598 with open(path, 'ab') as fp:
1592 fp.write(text)
1599 fp.write(text)
1593
1600
1594 class chunkbuffer(object):
1601 class chunkbuffer(object):
1595 """Allow arbitrary sized chunks of data to be efficiently read from an
1602 """Allow arbitrary sized chunks of data to be efficiently read from an
1596 iterator over chunks of arbitrary size."""
1603 iterator over chunks of arbitrary size."""
1597
1604
1598 def __init__(self, in_iter):
1605 def __init__(self, in_iter):
1599 """in_iter is the iterator that's iterating over the input chunks.
1606 """in_iter is the iterator that's iterating over the input chunks.
1600 targetsize is how big a buffer to try to maintain."""
1607 targetsize is how big a buffer to try to maintain."""
1601 def splitbig(chunks):
1608 def splitbig(chunks):
1602 for chunk in chunks:
1609 for chunk in chunks:
1603 if len(chunk) > 2**20:
1610 if len(chunk) > 2**20:
1604 pos = 0
1611 pos = 0
1605 while pos < len(chunk):
1612 while pos < len(chunk):
1606 end = pos + 2 ** 18
1613 end = pos + 2 ** 18
1607 yield chunk[pos:end]
1614 yield chunk[pos:end]
1608 pos = end
1615 pos = end
1609 else:
1616 else:
1610 yield chunk
1617 yield chunk
1611 self.iter = splitbig(in_iter)
1618 self.iter = splitbig(in_iter)
1612 self._queue = collections.deque()
1619 self._queue = collections.deque()
1613 self._chunkoffset = 0
1620 self._chunkoffset = 0
1614
1621
1615 def read(self, l=None):
1622 def read(self, l=None):
1616 """Read L bytes of data from the iterator of chunks of data.
1623 """Read L bytes of data from the iterator of chunks of data.
1617 Returns less than L bytes if the iterator runs dry.
1624 Returns less than L bytes if the iterator runs dry.
1618
1625
1619 If size parameter is omitted, read everything"""
1626 If size parameter is omitted, read everything"""
1620 if l is None:
1627 if l is None:
1621 return ''.join(self.iter)
1628 return ''.join(self.iter)
1622
1629
1623 left = l
1630 left = l
1624 buf = []
1631 buf = []
1625 queue = self._queue
1632 queue = self._queue
1626 while left > 0:
1633 while left > 0:
1627 # refill the queue
1634 # refill the queue
1628 if not queue:
1635 if not queue:
1629 target = 2**18
1636 target = 2**18
1630 for chunk in self.iter:
1637 for chunk in self.iter:
1631 queue.append(chunk)
1638 queue.append(chunk)
1632 target -= len(chunk)
1639 target -= len(chunk)
1633 if target <= 0:
1640 if target <= 0:
1634 break
1641 break
1635 if not queue:
1642 if not queue:
1636 break
1643 break
1637
1644
1638 # The easy way to do this would be to queue.popleft(), modify the
1645 # The easy way to do this would be to queue.popleft(), modify the
1639 # chunk (if necessary), then queue.appendleft(). However, for cases
1646 # chunk (if necessary), then queue.appendleft(). However, for cases
1640 # where we read partial chunk content, this incurs 2 dequeue
1647 # where we read partial chunk content, this incurs 2 dequeue
1641 # mutations and creates a new str for the remaining chunk in the
1648 # mutations and creates a new str for the remaining chunk in the
1642 # queue. Our code below avoids this overhead.
1649 # queue. Our code below avoids this overhead.
1643
1650
1644 chunk = queue[0]
1651 chunk = queue[0]
1645 chunkl = len(chunk)
1652 chunkl = len(chunk)
1646 offset = self._chunkoffset
1653 offset = self._chunkoffset
1647
1654
1648 # Use full chunk.
1655 # Use full chunk.
1649 if offset == 0 and left >= chunkl:
1656 if offset == 0 and left >= chunkl:
1650 left -= chunkl
1657 left -= chunkl
1651 queue.popleft()
1658 queue.popleft()
1652 buf.append(chunk)
1659 buf.append(chunk)
1653 # self._chunkoffset remains at 0.
1660 # self._chunkoffset remains at 0.
1654 continue
1661 continue
1655
1662
1656 chunkremaining = chunkl - offset
1663 chunkremaining = chunkl - offset
1657
1664
1658 # Use all of unconsumed part of chunk.
1665 # Use all of unconsumed part of chunk.
1659 if left >= chunkremaining:
1666 if left >= chunkremaining:
1660 left -= chunkremaining
1667 left -= chunkremaining
1661 queue.popleft()
1668 queue.popleft()
1662 # offset == 0 is enabled by block above, so this won't merely
1669 # offset == 0 is enabled by block above, so this won't merely
1663 # copy via ``chunk[0:]``.
1670 # copy via ``chunk[0:]``.
1664 buf.append(chunk[offset:])
1671 buf.append(chunk[offset:])
1665 self._chunkoffset = 0
1672 self._chunkoffset = 0
1666
1673
1667 # Partial chunk needed.
1674 # Partial chunk needed.
1668 else:
1675 else:
1669 buf.append(chunk[offset:offset + left])
1676 buf.append(chunk[offset:offset + left])
1670 self._chunkoffset += left
1677 self._chunkoffset += left
1671 left -= chunkremaining
1678 left -= chunkremaining
1672
1679
1673 return ''.join(buf)
1680 return ''.join(buf)
1674
1681
1675 def filechunkiter(f, size=65536, limit=None):
1682 def filechunkiter(f, size=65536, limit=None):
1676 """Create a generator that produces the data in the file size
1683 """Create a generator that produces the data in the file size
1677 (default 65536) bytes at a time, up to optional limit (default is
1684 (default 65536) bytes at a time, up to optional limit (default is
1678 to read all data). Chunks may be less than size bytes if the
1685 to read all data). Chunks may be less than size bytes if the
1679 chunk is the last chunk in the file, or the file is a socket or
1686 chunk is the last chunk in the file, or the file is a socket or
1680 some other type of file that sometimes reads less data than is
1687 some other type of file that sometimes reads less data than is
1681 requested."""
1688 requested."""
1682 assert size >= 0
1689 assert size >= 0
1683 assert limit is None or limit >= 0
1690 assert limit is None or limit >= 0
1684 while True:
1691 while True:
1685 if limit is None:
1692 if limit is None:
1686 nbytes = size
1693 nbytes = size
1687 else:
1694 else:
1688 nbytes = min(limit, size)
1695 nbytes = min(limit, size)
1689 s = nbytes and f.read(nbytes)
1696 s = nbytes and f.read(nbytes)
1690 if not s:
1697 if not s:
1691 break
1698 break
1692 if limit:
1699 if limit:
1693 limit -= len(s)
1700 limit -= len(s)
1694 yield s
1701 yield s
1695
1702
1696 def makedate(timestamp=None):
1703 def makedate(timestamp=None):
1697 '''Return a unix timestamp (or the current time) as a (unixtime,
1704 '''Return a unix timestamp (or the current time) as a (unixtime,
1698 offset) tuple based off the local timezone.'''
1705 offset) tuple based off the local timezone.'''
1699 if timestamp is None:
1706 if timestamp is None:
1700 timestamp = time.time()
1707 timestamp = time.time()
1701 if timestamp < 0:
1708 if timestamp < 0:
1702 hint = _("check your clock")
1709 hint = _("check your clock")
1703 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1710 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1704 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1711 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1705 datetime.datetime.fromtimestamp(timestamp))
1712 datetime.datetime.fromtimestamp(timestamp))
1706 tz = delta.days * 86400 + delta.seconds
1713 tz = delta.days * 86400 + delta.seconds
1707 return timestamp, tz
1714 return timestamp, tz
1708
1715
1709 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1716 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1710 """represent a (unixtime, offset) tuple as a localized time.
1717 """represent a (unixtime, offset) tuple as a localized time.
1711 unixtime is seconds since the epoch, and offset is the time zone's
1718 unixtime is seconds since the epoch, and offset is the time zone's
1712 number of seconds away from UTC.
1719 number of seconds away from UTC.
1713
1720
1714 >>> datestr((0, 0))
1721 >>> datestr((0, 0))
1715 'Thu Jan 01 00:00:00 1970 +0000'
1722 'Thu Jan 01 00:00:00 1970 +0000'
1716 >>> datestr((42, 0))
1723 >>> datestr((42, 0))
1717 'Thu Jan 01 00:00:42 1970 +0000'
1724 'Thu Jan 01 00:00:42 1970 +0000'
1718 >>> datestr((-42, 0))
1725 >>> datestr((-42, 0))
1719 'Wed Dec 31 23:59:18 1969 +0000'
1726 'Wed Dec 31 23:59:18 1969 +0000'
1720 >>> datestr((0x7fffffff, 0))
1727 >>> datestr((0x7fffffff, 0))
1721 'Tue Jan 19 03:14:07 2038 +0000'
1728 'Tue Jan 19 03:14:07 2038 +0000'
1722 >>> datestr((-0x80000000, 0))
1729 >>> datestr((-0x80000000, 0))
1723 'Fri Dec 13 20:45:52 1901 +0000'
1730 'Fri Dec 13 20:45:52 1901 +0000'
1724 """
1731 """
1725 t, tz = date or makedate()
1732 t, tz = date or makedate()
1726 if "%1" in format or "%2" in format or "%z" in format:
1733 if "%1" in format or "%2" in format or "%z" in format:
1727 sign = (tz > 0) and "-" or "+"
1734 sign = (tz > 0) and "-" or "+"
1728 minutes = abs(tz) // 60
1735 minutes = abs(tz) // 60
1729 q, r = divmod(minutes, 60)
1736 q, r = divmod(minutes, 60)
1730 format = format.replace("%z", "%1%2")
1737 format = format.replace("%z", "%1%2")
1731 format = format.replace("%1", "%c%02d" % (sign, q))
1738 format = format.replace("%1", "%c%02d" % (sign, q))
1732 format = format.replace("%2", "%02d" % r)
1739 format = format.replace("%2", "%02d" % r)
1733 d = t - tz
1740 d = t - tz
1734 if d > 0x7fffffff:
1741 if d > 0x7fffffff:
1735 d = 0x7fffffff
1742 d = 0x7fffffff
1736 elif d < -0x80000000:
1743 elif d < -0x80000000:
1737 d = -0x80000000
1744 d = -0x80000000
1738 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1745 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1739 # because they use the gmtime() system call which is buggy on Windows
1746 # because they use the gmtime() system call which is buggy on Windows
1740 # for negative values.
1747 # for negative values.
1741 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1748 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1742 s = t.strftime(format)
1749 s = t.strftime(format)
1743 return s
1750 return s
1744
1751
1745 def shortdate(date=None):
1752 def shortdate(date=None):
1746 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1753 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1747 return datestr(date, format='%Y-%m-%d')
1754 return datestr(date, format='%Y-%m-%d')
1748
1755
1749 def parsetimezone(s):
1756 def parsetimezone(s):
1750 """find a trailing timezone, if any, in string, and return a
1757 """find a trailing timezone, if any, in string, and return a
1751 (offset, remainder) pair"""
1758 (offset, remainder) pair"""
1752
1759
1753 if s.endswith("GMT") or s.endswith("UTC"):
1760 if s.endswith("GMT") or s.endswith("UTC"):
1754 return 0, s[:-3].rstrip()
1761 return 0, s[:-3].rstrip()
1755
1762
1756 # Unix-style timezones [+-]hhmm
1763 # Unix-style timezones [+-]hhmm
1757 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1764 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1758 sign = (s[-5] == "+") and 1 or -1
1765 sign = (s[-5] == "+") and 1 or -1
1759 hours = int(s[-4:-2])
1766 hours = int(s[-4:-2])
1760 minutes = int(s[-2:])
1767 minutes = int(s[-2:])
1761 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1768 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1762
1769
1763 # ISO8601 trailing Z
1770 # ISO8601 trailing Z
1764 if s.endswith("Z") and s[-2:-1].isdigit():
1771 if s.endswith("Z") and s[-2:-1].isdigit():
1765 return 0, s[:-1]
1772 return 0, s[:-1]
1766
1773
1767 # ISO8601-style [+-]hh:mm
1774 # ISO8601-style [+-]hh:mm
1768 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1775 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1769 s[-5:-3].isdigit() and s[-2:].isdigit()):
1776 s[-5:-3].isdigit() and s[-2:].isdigit()):
1770 sign = (s[-6] == "+") and 1 or -1
1777 sign = (s[-6] == "+") and 1 or -1
1771 hours = int(s[-5:-3])
1778 hours = int(s[-5:-3])
1772 minutes = int(s[-2:])
1779 minutes = int(s[-2:])
1773 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1780 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1774
1781
1775 return None, s
1782 return None, s
1776
1783
1777 def strdate(string, format, defaults=[]):
1784 def strdate(string, format, defaults=[]):
1778 """parse a localized time string and return a (unixtime, offset) tuple.
1785 """parse a localized time string and return a (unixtime, offset) tuple.
1779 if the string cannot be parsed, ValueError is raised."""
1786 if the string cannot be parsed, ValueError is raised."""
1780 # NOTE: unixtime = localunixtime + offset
1787 # NOTE: unixtime = localunixtime + offset
1781 offset, date = parsetimezone(string)
1788 offset, date = parsetimezone(string)
1782
1789
1783 # add missing elements from defaults
1790 # add missing elements from defaults
1784 usenow = False # default to using biased defaults
1791 usenow = False # default to using biased defaults
1785 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1792 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1786 found = [True for p in part if ("%"+p) in format]
1793 found = [True for p in part if ("%"+p) in format]
1787 if not found:
1794 if not found:
1788 date += "@" + defaults[part][usenow]
1795 date += "@" + defaults[part][usenow]
1789 format += "@%" + part[0]
1796 format += "@%" + part[0]
1790 else:
1797 else:
1791 # We've found a specific time element, less specific time
1798 # We've found a specific time element, less specific time
1792 # elements are relative to today
1799 # elements are relative to today
1793 usenow = True
1800 usenow = True
1794
1801
1795 timetuple = time.strptime(date, format)
1802 timetuple = time.strptime(date, format)
1796 localunixtime = int(calendar.timegm(timetuple))
1803 localunixtime = int(calendar.timegm(timetuple))
1797 if offset is None:
1804 if offset is None:
1798 # local timezone
1805 # local timezone
1799 unixtime = int(time.mktime(timetuple))
1806 unixtime = int(time.mktime(timetuple))
1800 offset = unixtime - localunixtime
1807 offset = unixtime - localunixtime
1801 else:
1808 else:
1802 unixtime = localunixtime + offset
1809 unixtime = localunixtime + offset
1803 return unixtime, offset
1810 return unixtime, offset
1804
1811
1805 def parsedate(date, formats=None, bias=None):
1812 def parsedate(date, formats=None, bias=None):
1806 """parse a localized date/time and return a (unixtime, offset) tuple.
1813 """parse a localized date/time and return a (unixtime, offset) tuple.
1807
1814
1808 The date may be a "unixtime offset" string or in one of the specified
1815 The date may be a "unixtime offset" string or in one of the specified
1809 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1816 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1810
1817
1811 >>> parsedate(' today ') == parsedate(\
1818 >>> parsedate(' today ') == parsedate(\
1812 datetime.date.today().strftime('%b %d'))
1819 datetime.date.today().strftime('%b %d'))
1813 True
1820 True
1814 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1821 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1815 datetime.timedelta(days=1)\
1822 datetime.timedelta(days=1)\
1816 ).strftime('%b %d'))
1823 ).strftime('%b %d'))
1817 True
1824 True
1818 >>> now, tz = makedate()
1825 >>> now, tz = makedate()
1819 >>> strnow, strtz = parsedate('now')
1826 >>> strnow, strtz = parsedate('now')
1820 >>> (strnow - now) < 1
1827 >>> (strnow - now) < 1
1821 True
1828 True
1822 >>> tz == strtz
1829 >>> tz == strtz
1823 True
1830 True
1824 """
1831 """
1825 if bias is None:
1832 if bias is None:
1826 bias = {}
1833 bias = {}
1827 if not date:
1834 if not date:
1828 return 0, 0
1835 return 0, 0
1829 if isinstance(date, tuple) and len(date) == 2:
1836 if isinstance(date, tuple) and len(date) == 2:
1830 return date
1837 return date
1831 if not formats:
1838 if not formats:
1832 formats = defaultdateformats
1839 formats = defaultdateformats
1833 date = date.strip()
1840 date = date.strip()
1834
1841
1835 if date == 'now' or date == _('now'):
1842 if date == 'now' or date == _('now'):
1836 return makedate()
1843 return makedate()
1837 if date == 'today' or date == _('today'):
1844 if date == 'today' or date == _('today'):
1838 date = datetime.date.today().strftime('%b %d')
1845 date = datetime.date.today().strftime('%b %d')
1839 elif date == 'yesterday' or date == _('yesterday'):
1846 elif date == 'yesterday' or date == _('yesterday'):
1840 date = (datetime.date.today() -
1847 date = (datetime.date.today() -
1841 datetime.timedelta(days=1)).strftime('%b %d')
1848 datetime.timedelta(days=1)).strftime('%b %d')
1842
1849
1843 try:
1850 try:
1844 when, offset = map(int, date.split(' '))
1851 when, offset = map(int, date.split(' '))
1845 except ValueError:
1852 except ValueError:
1846 # fill out defaults
1853 # fill out defaults
1847 now = makedate()
1854 now = makedate()
1848 defaults = {}
1855 defaults = {}
1849 for part in ("d", "mb", "yY", "HI", "M", "S"):
1856 for part in ("d", "mb", "yY", "HI", "M", "S"):
1850 # this piece is for rounding the specific end of unknowns
1857 # this piece is for rounding the specific end of unknowns
1851 b = bias.get(part)
1858 b = bias.get(part)
1852 if b is None:
1859 if b is None:
1853 if part[0] in "HMS":
1860 if part[0] in "HMS":
1854 b = "00"
1861 b = "00"
1855 else:
1862 else:
1856 b = "0"
1863 b = "0"
1857
1864
1858 # this piece is for matching the generic end to today's date
1865 # this piece is for matching the generic end to today's date
1859 n = datestr(now, "%" + part[0])
1866 n = datestr(now, "%" + part[0])
1860
1867
1861 defaults[part] = (b, n)
1868 defaults[part] = (b, n)
1862
1869
1863 for format in formats:
1870 for format in formats:
1864 try:
1871 try:
1865 when, offset = strdate(date, format, defaults)
1872 when, offset = strdate(date, format, defaults)
1866 except (ValueError, OverflowError):
1873 except (ValueError, OverflowError):
1867 pass
1874 pass
1868 else:
1875 else:
1869 break
1876 break
1870 else:
1877 else:
1871 raise Abort(_('invalid date: %r') % date)
1878 raise Abort(_('invalid date: %r') % date)
1872 # validate explicit (probably user-specified) date and
1879 # validate explicit (probably user-specified) date and
1873 # time zone offset. values must fit in signed 32 bits for
1880 # time zone offset. values must fit in signed 32 bits for
1874 # current 32-bit linux runtimes. timezones go from UTC-12
1881 # current 32-bit linux runtimes. timezones go from UTC-12
1875 # to UTC+14
1882 # to UTC+14
1876 if when < -0x80000000 or when > 0x7fffffff:
1883 if when < -0x80000000 or when > 0x7fffffff:
1877 raise Abort(_('date exceeds 32 bits: %d') % when)
1884 raise Abort(_('date exceeds 32 bits: %d') % when)
1878 if offset < -50400 or offset > 43200:
1885 if offset < -50400 or offset > 43200:
1879 raise Abort(_('impossible time zone offset: %d') % offset)
1886 raise Abort(_('impossible time zone offset: %d') % offset)
1880 return when, offset
1887 return when, offset
1881
1888
1882 def matchdate(date):
1889 def matchdate(date):
1883 """Return a function that matches a given date match specifier
1890 """Return a function that matches a given date match specifier
1884
1891
1885 Formats include:
1892 Formats include:
1886
1893
1887 '{date}' match a given date to the accuracy provided
1894 '{date}' match a given date to the accuracy provided
1888
1895
1889 '<{date}' on or before a given date
1896 '<{date}' on or before a given date
1890
1897
1891 '>{date}' on or after a given date
1898 '>{date}' on or after a given date
1892
1899
1893 >>> p1 = parsedate("10:29:59")
1900 >>> p1 = parsedate("10:29:59")
1894 >>> p2 = parsedate("10:30:00")
1901 >>> p2 = parsedate("10:30:00")
1895 >>> p3 = parsedate("10:30:59")
1902 >>> p3 = parsedate("10:30:59")
1896 >>> p4 = parsedate("10:31:00")
1903 >>> p4 = parsedate("10:31:00")
1897 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1904 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1898 >>> f = matchdate("10:30")
1905 >>> f = matchdate("10:30")
1899 >>> f(p1[0])
1906 >>> f(p1[0])
1900 False
1907 False
1901 >>> f(p2[0])
1908 >>> f(p2[0])
1902 True
1909 True
1903 >>> f(p3[0])
1910 >>> f(p3[0])
1904 True
1911 True
1905 >>> f(p4[0])
1912 >>> f(p4[0])
1906 False
1913 False
1907 >>> f(p5[0])
1914 >>> f(p5[0])
1908 False
1915 False
1909 """
1916 """
1910
1917
1911 def lower(date):
1918 def lower(date):
1912 d = {'mb': "1", 'd': "1"}
1919 d = {'mb': "1", 'd': "1"}
1913 return parsedate(date, extendeddateformats, d)[0]
1920 return parsedate(date, extendeddateformats, d)[0]
1914
1921
1915 def upper(date):
1922 def upper(date):
1916 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1923 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1917 for days in ("31", "30", "29"):
1924 for days in ("31", "30", "29"):
1918 try:
1925 try:
1919 d["d"] = days
1926 d["d"] = days
1920 return parsedate(date, extendeddateformats, d)[0]
1927 return parsedate(date, extendeddateformats, d)[0]
1921 except Abort:
1928 except Abort:
1922 pass
1929 pass
1923 d["d"] = "28"
1930 d["d"] = "28"
1924 return parsedate(date, extendeddateformats, d)[0]
1931 return parsedate(date, extendeddateformats, d)[0]
1925
1932
1926 date = date.strip()
1933 date = date.strip()
1927
1934
1928 if not date:
1935 if not date:
1929 raise Abort(_("dates cannot consist entirely of whitespace"))
1936 raise Abort(_("dates cannot consist entirely of whitespace"))
1930 elif date[0] == "<":
1937 elif date[0] == "<":
1931 if not date[1:]:
1938 if not date[1:]:
1932 raise Abort(_("invalid day spec, use '<DATE'"))
1939 raise Abort(_("invalid day spec, use '<DATE'"))
1933 when = upper(date[1:])
1940 when = upper(date[1:])
1934 return lambda x: x <= when
1941 return lambda x: x <= when
1935 elif date[0] == ">":
1942 elif date[0] == ">":
1936 if not date[1:]:
1943 if not date[1:]:
1937 raise Abort(_("invalid day spec, use '>DATE'"))
1944 raise Abort(_("invalid day spec, use '>DATE'"))
1938 when = lower(date[1:])
1945 when = lower(date[1:])
1939 return lambda x: x >= when
1946 return lambda x: x >= when
1940 elif date[0] == "-":
1947 elif date[0] == "-":
1941 try:
1948 try:
1942 days = int(date[1:])
1949 days = int(date[1:])
1943 except ValueError:
1950 except ValueError:
1944 raise Abort(_("invalid day spec: %s") % date[1:])
1951 raise Abort(_("invalid day spec: %s") % date[1:])
1945 if days < 0:
1952 if days < 0:
1946 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1953 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1947 % date[1:])
1954 % date[1:])
1948 when = makedate()[0] - days * 3600 * 24
1955 when = makedate()[0] - days * 3600 * 24
1949 return lambda x: x >= when
1956 return lambda x: x >= when
1950 elif " to " in date:
1957 elif " to " in date:
1951 a, b = date.split(" to ")
1958 a, b = date.split(" to ")
1952 start, stop = lower(a), upper(b)
1959 start, stop = lower(a), upper(b)
1953 return lambda x: x >= start and x <= stop
1960 return lambda x: x >= start and x <= stop
1954 else:
1961 else:
1955 start, stop = lower(date), upper(date)
1962 start, stop = lower(date), upper(date)
1956 return lambda x: x >= start and x <= stop
1963 return lambda x: x >= start and x <= stop
1957
1964
1958 def stringmatcher(pattern):
1965 def stringmatcher(pattern):
1959 """
1966 """
1960 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1967 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1961 returns the matcher name, pattern, and matcher function.
1968 returns the matcher name, pattern, and matcher function.
1962 missing or unknown prefixes are treated as literal matches.
1969 missing or unknown prefixes are treated as literal matches.
1963
1970
1964 helper for tests:
1971 helper for tests:
1965 >>> def test(pattern, *tests):
1972 >>> def test(pattern, *tests):
1966 ... kind, pattern, matcher = stringmatcher(pattern)
1973 ... kind, pattern, matcher = stringmatcher(pattern)
1967 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1974 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1968
1975
1969 exact matching (no prefix):
1976 exact matching (no prefix):
1970 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1977 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1971 ('literal', 'abcdefg', [False, False, True])
1978 ('literal', 'abcdefg', [False, False, True])
1972
1979
1973 regex matching ('re:' prefix)
1980 regex matching ('re:' prefix)
1974 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1981 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1975 ('re', 'a.+b', [False, False, True])
1982 ('re', 'a.+b', [False, False, True])
1976
1983
1977 force exact matches ('literal:' prefix)
1984 force exact matches ('literal:' prefix)
1978 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1985 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1979 ('literal', 're:foobar', [False, True])
1986 ('literal', 're:foobar', [False, True])
1980
1987
1981 unknown prefixes are ignored and treated as literals
1988 unknown prefixes are ignored and treated as literals
1982 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1989 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1983 ('literal', 'foo:bar', [False, False, True])
1990 ('literal', 'foo:bar', [False, False, True])
1984 """
1991 """
1985 if pattern.startswith('re:'):
1992 if pattern.startswith('re:'):
1986 pattern = pattern[3:]
1993 pattern = pattern[3:]
1987 try:
1994 try:
1988 regex = remod.compile(pattern)
1995 regex = remod.compile(pattern)
1989 except remod.error as e:
1996 except remod.error as e:
1990 raise error.ParseError(_('invalid regular expression: %s')
1997 raise error.ParseError(_('invalid regular expression: %s')
1991 % e)
1998 % e)
1992 return 're', pattern, regex.search
1999 return 're', pattern, regex.search
1993 elif pattern.startswith('literal:'):
2000 elif pattern.startswith('literal:'):
1994 pattern = pattern[8:]
2001 pattern = pattern[8:]
1995 return 'literal', pattern, pattern.__eq__
2002 return 'literal', pattern, pattern.__eq__
1996
2003
1997 def shortuser(user):
2004 def shortuser(user):
1998 """Return a short representation of a user name or email address."""
2005 """Return a short representation of a user name or email address."""
1999 f = user.find('@')
2006 f = user.find('@')
2000 if f >= 0:
2007 if f >= 0:
2001 user = user[:f]
2008 user = user[:f]
2002 f = user.find('<')
2009 f = user.find('<')
2003 if f >= 0:
2010 if f >= 0:
2004 user = user[f + 1:]
2011 user = user[f + 1:]
2005 f = user.find(' ')
2012 f = user.find(' ')
2006 if f >= 0:
2013 if f >= 0:
2007 user = user[:f]
2014 user = user[:f]
2008 f = user.find('.')
2015 f = user.find('.')
2009 if f >= 0:
2016 if f >= 0:
2010 user = user[:f]
2017 user = user[:f]
2011 return user
2018 return user
2012
2019
2013 def emailuser(user):
2020 def emailuser(user):
2014 """Return the user portion of an email address."""
2021 """Return the user portion of an email address."""
2015 f = user.find('@')
2022 f = user.find('@')
2016 if f >= 0:
2023 if f >= 0:
2017 user = user[:f]
2024 user = user[:f]
2018 f = user.find('<')
2025 f = user.find('<')
2019 if f >= 0:
2026 if f >= 0:
2020 user = user[f + 1:]
2027 user = user[f + 1:]
2021 return user
2028 return user
2022
2029
2023 def email(author):
2030 def email(author):
2024 '''get email of author.'''
2031 '''get email of author.'''
2025 r = author.find('>')
2032 r = author.find('>')
2026 if r == -1:
2033 if r == -1:
2027 r = None
2034 r = None
2028 return author[author.find('<') + 1:r]
2035 return author[author.find('<') + 1:r]
2029
2036
2030 def ellipsis(text, maxlength=400):
2037 def ellipsis(text, maxlength=400):
2031 """Trim string to at most maxlength (default: 400) columns in display."""
2038 """Trim string to at most maxlength (default: 400) columns in display."""
2032 return encoding.trim(text, maxlength, ellipsis='...')
2039 return encoding.trim(text, maxlength, ellipsis='...')
2033
2040
2034 def unitcountfn(*unittable):
2041 def unitcountfn(*unittable):
2035 '''return a function that renders a readable count of some quantity'''
2042 '''return a function that renders a readable count of some quantity'''
2036
2043
2037 def go(count):
2044 def go(count):
2038 for multiplier, divisor, format in unittable:
2045 for multiplier, divisor, format in unittable:
2039 if count >= divisor * multiplier:
2046 if count >= divisor * multiplier:
2040 return format % (count / float(divisor))
2047 return format % (count / float(divisor))
2041 return unittable[-1][2] % count
2048 return unittable[-1][2] % count
2042
2049
2043 return go
2050 return go
2044
2051
2045 bytecount = unitcountfn(
2052 bytecount = unitcountfn(
2046 (100, 1 << 30, _('%.0f GB')),
2053 (100, 1 << 30, _('%.0f GB')),
2047 (10, 1 << 30, _('%.1f GB')),
2054 (10, 1 << 30, _('%.1f GB')),
2048 (1, 1 << 30, _('%.2f GB')),
2055 (1, 1 << 30, _('%.2f GB')),
2049 (100, 1 << 20, _('%.0f MB')),
2056 (100, 1 << 20, _('%.0f MB')),
2050 (10, 1 << 20, _('%.1f MB')),
2057 (10, 1 << 20, _('%.1f MB')),
2051 (1, 1 << 20, _('%.2f MB')),
2058 (1, 1 << 20, _('%.2f MB')),
2052 (100, 1 << 10, _('%.0f KB')),
2059 (100, 1 << 10, _('%.0f KB')),
2053 (10, 1 << 10, _('%.1f KB')),
2060 (10, 1 << 10, _('%.1f KB')),
2054 (1, 1 << 10, _('%.2f KB')),
2061 (1, 1 << 10, _('%.2f KB')),
2055 (1, 1, _('%.0f bytes')),
2062 (1, 1, _('%.0f bytes')),
2056 )
2063 )
2057
2064
2058 def uirepr(s):
2065 def uirepr(s):
2059 # Avoid double backslash in Windows path repr()
2066 # Avoid double backslash in Windows path repr()
2060 return repr(s).replace('\\\\', '\\')
2067 return repr(s).replace('\\\\', '\\')
2061
2068
2062 # delay import of textwrap
2069 # delay import of textwrap
2063 def MBTextWrapper(**kwargs):
2070 def MBTextWrapper(**kwargs):
2064 class tw(textwrap.TextWrapper):
2071 class tw(textwrap.TextWrapper):
2065 """
2072 """
2066 Extend TextWrapper for width-awareness.
2073 Extend TextWrapper for width-awareness.
2067
2074
2068 Neither number of 'bytes' in any encoding nor 'characters' is
2075 Neither number of 'bytes' in any encoding nor 'characters' is
2069 appropriate to calculate terminal columns for specified string.
2076 appropriate to calculate terminal columns for specified string.
2070
2077
2071 Original TextWrapper implementation uses built-in 'len()' directly,
2078 Original TextWrapper implementation uses built-in 'len()' directly,
2072 so overriding is needed to use width information of each characters.
2079 so overriding is needed to use width information of each characters.
2073
2080
2074 In addition, characters classified into 'ambiguous' width are
2081 In addition, characters classified into 'ambiguous' width are
2075 treated as wide in East Asian area, but as narrow in other.
2082 treated as wide in East Asian area, but as narrow in other.
2076
2083
2077 This requires use decision to determine width of such characters.
2084 This requires use decision to determine width of such characters.
2078 """
2085 """
2079 def _cutdown(self, ucstr, space_left):
2086 def _cutdown(self, ucstr, space_left):
2080 l = 0
2087 l = 0
2081 colwidth = encoding.ucolwidth
2088 colwidth = encoding.ucolwidth
2082 for i in xrange(len(ucstr)):
2089 for i in xrange(len(ucstr)):
2083 l += colwidth(ucstr[i])
2090 l += colwidth(ucstr[i])
2084 if space_left < l:
2091 if space_left < l:
2085 return (ucstr[:i], ucstr[i:])
2092 return (ucstr[:i], ucstr[i:])
2086 return ucstr, ''
2093 return ucstr, ''
2087
2094
2088 # overriding of base class
2095 # overriding of base class
2089 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2096 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2090 space_left = max(width - cur_len, 1)
2097 space_left = max(width - cur_len, 1)
2091
2098
2092 if self.break_long_words:
2099 if self.break_long_words:
2093 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2100 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2094 cur_line.append(cut)
2101 cur_line.append(cut)
2095 reversed_chunks[-1] = res
2102 reversed_chunks[-1] = res
2096 elif not cur_line:
2103 elif not cur_line:
2097 cur_line.append(reversed_chunks.pop())
2104 cur_line.append(reversed_chunks.pop())
2098
2105
2099 # this overriding code is imported from TextWrapper of Python 2.6
2106 # this overriding code is imported from TextWrapper of Python 2.6
2100 # to calculate columns of string by 'encoding.ucolwidth()'
2107 # to calculate columns of string by 'encoding.ucolwidth()'
2101 def _wrap_chunks(self, chunks):
2108 def _wrap_chunks(self, chunks):
2102 colwidth = encoding.ucolwidth
2109 colwidth = encoding.ucolwidth
2103
2110
2104 lines = []
2111 lines = []
2105 if self.width <= 0:
2112 if self.width <= 0:
2106 raise ValueError("invalid width %r (must be > 0)" % self.width)
2113 raise ValueError("invalid width %r (must be > 0)" % self.width)
2107
2114
2108 # Arrange in reverse order so items can be efficiently popped
2115 # Arrange in reverse order so items can be efficiently popped
2109 # from a stack of chucks.
2116 # from a stack of chucks.
2110 chunks.reverse()
2117 chunks.reverse()
2111
2118
2112 while chunks:
2119 while chunks:
2113
2120
2114 # Start the list of chunks that will make up the current line.
2121 # Start the list of chunks that will make up the current line.
2115 # cur_len is just the length of all the chunks in cur_line.
2122 # cur_len is just the length of all the chunks in cur_line.
2116 cur_line = []
2123 cur_line = []
2117 cur_len = 0
2124 cur_len = 0
2118
2125
2119 # Figure out which static string will prefix this line.
2126 # Figure out which static string will prefix this line.
2120 if lines:
2127 if lines:
2121 indent = self.subsequent_indent
2128 indent = self.subsequent_indent
2122 else:
2129 else:
2123 indent = self.initial_indent
2130 indent = self.initial_indent
2124
2131
2125 # Maximum width for this line.
2132 # Maximum width for this line.
2126 width = self.width - len(indent)
2133 width = self.width - len(indent)
2127
2134
2128 # First chunk on line is whitespace -- drop it, unless this
2135 # First chunk on line is whitespace -- drop it, unless this
2129 # is the very beginning of the text (i.e. no lines started yet).
2136 # is the very beginning of the text (i.e. no lines started yet).
2130 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2137 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2131 del chunks[-1]
2138 del chunks[-1]
2132
2139
2133 while chunks:
2140 while chunks:
2134 l = colwidth(chunks[-1])
2141 l = colwidth(chunks[-1])
2135
2142
2136 # Can at least squeeze this chunk onto the current line.
2143 # Can at least squeeze this chunk onto the current line.
2137 if cur_len + l <= width:
2144 if cur_len + l <= width:
2138 cur_line.append(chunks.pop())
2145 cur_line.append(chunks.pop())
2139 cur_len += l
2146 cur_len += l
2140
2147
2141 # Nope, this line is full.
2148 # Nope, this line is full.
2142 else:
2149 else:
2143 break
2150 break
2144
2151
2145 # The current line is full, and the next chunk is too big to
2152 # The current line is full, and the next chunk is too big to
2146 # fit on *any* line (not just this one).
2153 # fit on *any* line (not just this one).
2147 if chunks and colwidth(chunks[-1]) > width:
2154 if chunks and colwidth(chunks[-1]) > width:
2148 self._handle_long_word(chunks, cur_line, cur_len, width)
2155 self._handle_long_word(chunks, cur_line, cur_len, width)
2149
2156
2150 # If the last chunk on this line is all whitespace, drop it.
2157 # If the last chunk on this line is all whitespace, drop it.
2151 if (self.drop_whitespace and
2158 if (self.drop_whitespace and
2152 cur_line and cur_line[-1].strip() == ''):
2159 cur_line and cur_line[-1].strip() == ''):
2153 del cur_line[-1]
2160 del cur_line[-1]
2154
2161
2155 # Convert current line back to a string and store it in list
2162 # Convert current line back to a string and store it in list
2156 # of all lines (return value).
2163 # of all lines (return value).
2157 if cur_line:
2164 if cur_line:
2158 lines.append(indent + ''.join(cur_line))
2165 lines.append(indent + ''.join(cur_line))
2159
2166
2160 return lines
2167 return lines
2161
2168
2162 global MBTextWrapper
2169 global MBTextWrapper
2163 MBTextWrapper = tw
2170 MBTextWrapper = tw
2164 return tw(**kwargs)
2171 return tw(**kwargs)
2165
2172
2166 def wrap(line, width, initindent='', hangindent=''):
2173 def wrap(line, width, initindent='', hangindent=''):
2167 maxindent = max(len(hangindent), len(initindent))
2174 maxindent = max(len(hangindent), len(initindent))
2168 if width <= maxindent:
2175 if width <= maxindent:
2169 # adjust for weird terminal size
2176 # adjust for weird terminal size
2170 width = max(78, maxindent + 1)
2177 width = max(78, maxindent + 1)
2171 line = line.decode(encoding.encoding, encoding.encodingmode)
2178 line = line.decode(encoding.encoding, encoding.encodingmode)
2172 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2179 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2173 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2180 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2174 wrapper = MBTextWrapper(width=width,
2181 wrapper = MBTextWrapper(width=width,
2175 initial_indent=initindent,
2182 initial_indent=initindent,
2176 subsequent_indent=hangindent)
2183 subsequent_indent=hangindent)
2177 return wrapper.fill(line).encode(encoding.encoding)
2184 return wrapper.fill(line).encode(encoding.encoding)
2178
2185
2179 def iterlines(iterator):
2186 def iterlines(iterator):
2180 for chunk in iterator:
2187 for chunk in iterator:
2181 for line in chunk.splitlines():
2188 for line in chunk.splitlines():
2182 yield line
2189 yield line
2183
2190
2184 def expandpath(path):
2191 def expandpath(path):
2185 return os.path.expanduser(os.path.expandvars(path))
2192 return os.path.expanduser(os.path.expandvars(path))
2186
2193
2187 def hgcmd():
2194 def hgcmd():
2188 """Return the command used to execute current hg
2195 """Return the command used to execute current hg
2189
2196
2190 This is different from hgexecutable() because on Windows we want
2197 This is different from hgexecutable() because on Windows we want
2191 to avoid things opening new shell windows like batch files, so we
2198 to avoid things opening new shell windows like batch files, so we
2192 get either the python call or current executable.
2199 get either the python call or current executable.
2193 """
2200 """
2194 if mainfrozen():
2201 if mainfrozen():
2195 if getattr(sys, 'frozen', None) == 'macosx_app':
2202 if getattr(sys, 'frozen', None) == 'macosx_app':
2196 # Env variable set by py2app
2203 # Env variable set by py2app
2197 return [os.environ['EXECUTABLEPATH']]
2204 return [os.environ['EXECUTABLEPATH']]
2198 else:
2205 else:
2199 return [sys.executable]
2206 return [sys.executable]
2200 return gethgcmd()
2207 return gethgcmd()
2201
2208
2202 def rundetached(args, condfn):
2209 def rundetached(args, condfn):
2203 """Execute the argument list in a detached process.
2210 """Execute the argument list in a detached process.
2204
2211
2205 condfn is a callable which is called repeatedly and should return
2212 condfn is a callable which is called repeatedly and should return
2206 True once the child process is known to have started successfully.
2213 True once the child process is known to have started successfully.
2207 At this point, the child process PID is returned. If the child
2214 At this point, the child process PID is returned. If the child
2208 process fails to start or finishes before condfn() evaluates to
2215 process fails to start or finishes before condfn() evaluates to
2209 True, return -1.
2216 True, return -1.
2210 """
2217 """
2211 # Windows case is easier because the child process is either
2218 # Windows case is easier because the child process is either
2212 # successfully starting and validating the condition or exiting
2219 # successfully starting and validating the condition or exiting
2213 # on failure. We just poll on its PID. On Unix, if the child
2220 # on failure. We just poll on its PID. On Unix, if the child
2214 # process fails to start, it will be left in a zombie state until
2221 # process fails to start, it will be left in a zombie state until
2215 # the parent wait on it, which we cannot do since we expect a long
2222 # the parent wait on it, which we cannot do since we expect a long
2216 # running process on success. Instead we listen for SIGCHLD telling
2223 # running process on success. Instead we listen for SIGCHLD telling
2217 # us our child process terminated.
2224 # us our child process terminated.
2218 terminated = set()
2225 terminated = set()
2219 def handler(signum, frame):
2226 def handler(signum, frame):
2220 terminated.add(os.wait())
2227 terminated.add(os.wait())
2221 prevhandler = None
2228 prevhandler = None
2222 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2229 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2223 if SIGCHLD is not None:
2230 if SIGCHLD is not None:
2224 prevhandler = signal.signal(SIGCHLD, handler)
2231 prevhandler = signal.signal(SIGCHLD, handler)
2225 try:
2232 try:
2226 pid = spawndetached(args)
2233 pid = spawndetached(args)
2227 while not condfn():
2234 while not condfn():
2228 if ((pid in terminated or not testpid(pid))
2235 if ((pid in terminated or not testpid(pid))
2229 and not condfn()):
2236 and not condfn()):
2230 return -1
2237 return -1
2231 time.sleep(0.1)
2238 time.sleep(0.1)
2232 return pid
2239 return pid
2233 finally:
2240 finally:
2234 if prevhandler is not None:
2241 if prevhandler is not None:
2235 signal.signal(signal.SIGCHLD, prevhandler)
2242 signal.signal(signal.SIGCHLD, prevhandler)
2236
2243
2237 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2244 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2238 """Return the result of interpolating items in the mapping into string s.
2245 """Return the result of interpolating items in the mapping into string s.
2239
2246
2240 prefix is a single character string, or a two character string with
2247 prefix is a single character string, or a two character string with
2241 a backslash as the first character if the prefix needs to be escaped in
2248 a backslash as the first character if the prefix needs to be escaped in
2242 a regular expression.
2249 a regular expression.
2243
2250
2244 fn is an optional function that will be applied to the replacement text
2251 fn is an optional function that will be applied to the replacement text
2245 just before replacement.
2252 just before replacement.
2246
2253
2247 escape_prefix is an optional flag that allows using doubled prefix for
2254 escape_prefix is an optional flag that allows using doubled prefix for
2248 its escaping.
2255 its escaping.
2249 """
2256 """
2250 fn = fn or (lambda s: s)
2257 fn = fn or (lambda s: s)
2251 patterns = '|'.join(mapping.keys())
2258 patterns = '|'.join(mapping.keys())
2252 if escape_prefix:
2259 if escape_prefix:
2253 patterns += '|' + prefix
2260 patterns += '|' + prefix
2254 if len(prefix) > 1:
2261 if len(prefix) > 1:
2255 prefix_char = prefix[1:]
2262 prefix_char = prefix[1:]
2256 else:
2263 else:
2257 prefix_char = prefix
2264 prefix_char = prefix
2258 mapping[prefix_char] = prefix_char
2265 mapping[prefix_char] = prefix_char
2259 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2266 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2260 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2267 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2261
2268
2262 def getport(port):
2269 def getport(port):
2263 """Return the port for a given network service.
2270 """Return the port for a given network service.
2264
2271
2265 If port is an integer, it's returned as is. If it's a string, it's
2272 If port is an integer, it's returned as is. If it's a string, it's
2266 looked up using socket.getservbyname(). If there's no matching
2273 looked up using socket.getservbyname(). If there's no matching
2267 service, error.Abort is raised.
2274 service, error.Abort is raised.
2268 """
2275 """
2269 try:
2276 try:
2270 return int(port)
2277 return int(port)
2271 except ValueError:
2278 except ValueError:
2272 pass
2279 pass
2273
2280
2274 try:
2281 try:
2275 return socket.getservbyname(port)
2282 return socket.getservbyname(port)
2276 except socket.error:
2283 except socket.error:
2277 raise Abort(_("no port number associated with service '%s'") % port)
2284 raise Abort(_("no port number associated with service '%s'") % port)
2278
2285
2279 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2286 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2280 '0': False, 'no': False, 'false': False, 'off': False,
2287 '0': False, 'no': False, 'false': False, 'off': False,
2281 'never': False}
2288 'never': False}
2282
2289
2283 def parsebool(s):
2290 def parsebool(s):
2284 """Parse s into a boolean.
2291 """Parse s into a boolean.
2285
2292
2286 If s is not a valid boolean, returns None.
2293 If s is not a valid boolean, returns None.
2287 """
2294 """
2288 return _booleans.get(s.lower(), None)
2295 return _booleans.get(s.lower(), None)
2289
2296
2290 _hexdig = '0123456789ABCDEFabcdef'
2297 _hexdig = '0123456789ABCDEFabcdef'
2291 _hextochr = dict((a + b, chr(int(a + b, 16)))
2298 _hextochr = dict((a + b, chr(int(a + b, 16)))
2292 for a in _hexdig for b in _hexdig)
2299 for a in _hexdig for b in _hexdig)
2293
2300
2294 def _urlunquote(s):
2301 def _urlunquote(s):
2295 """Decode HTTP/HTML % encoding.
2302 """Decode HTTP/HTML % encoding.
2296
2303
2297 >>> _urlunquote('abc%20def')
2304 >>> _urlunquote('abc%20def')
2298 'abc def'
2305 'abc def'
2299 """
2306 """
2300 res = s.split('%')
2307 res = s.split('%')
2301 # fastpath
2308 # fastpath
2302 if len(res) == 1:
2309 if len(res) == 1:
2303 return s
2310 return s
2304 s = res[0]
2311 s = res[0]
2305 for item in res[1:]:
2312 for item in res[1:]:
2306 try:
2313 try:
2307 s += _hextochr[item[:2]] + item[2:]
2314 s += _hextochr[item[:2]] + item[2:]
2308 except KeyError:
2315 except KeyError:
2309 s += '%' + item
2316 s += '%' + item
2310 except UnicodeDecodeError:
2317 except UnicodeDecodeError:
2311 s += unichr(int(item[:2], 16)) + item[2:]
2318 s += unichr(int(item[:2], 16)) + item[2:]
2312 return s
2319 return s
2313
2320
2314 class url(object):
2321 class url(object):
2315 r"""Reliable URL parser.
2322 r"""Reliable URL parser.
2316
2323
2317 This parses URLs and provides attributes for the following
2324 This parses URLs and provides attributes for the following
2318 components:
2325 components:
2319
2326
2320 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2327 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2321
2328
2322 Missing components are set to None. The only exception is
2329 Missing components are set to None. The only exception is
2323 fragment, which is set to '' if present but empty.
2330 fragment, which is set to '' if present but empty.
2324
2331
2325 If parsefragment is False, fragment is included in query. If
2332 If parsefragment is False, fragment is included in query. If
2326 parsequery is False, query is included in path. If both are
2333 parsequery is False, query is included in path. If both are
2327 False, both fragment and query are included in path.
2334 False, both fragment and query are included in path.
2328
2335
2329 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2336 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2330
2337
2331 Note that for backward compatibility reasons, bundle URLs do not
2338 Note that for backward compatibility reasons, bundle URLs do not
2332 take host names. That means 'bundle://../' has a path of '../'.
2339 take host names. That means 'bundle://../' has a path of '../'.
2333
2340
2334 Examples:
2341 Examples:
2335
2342
2336 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2343 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2337 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2344 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2338 >>> url('ssh://[::1]:2200//home/joe/repo')
2345 >>> url('ssh://[::1]:2200//home/joe/repo')
2339 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2346 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2340 >>> url('file:///home/joe/repo')
2347 >>> url('file:///home/joe/repo')
2341 <url scheme: 'file', path: '/home/joe/repo'>
2348 <url scheme: 'file', path: '/home/joe/repo'>
2342 >>> url('file:///c:/temp/foo/')
2349 >>> url('file:///c:/temp/foo/')
2343 <url scheme: 'file', path: 'c:/temp/foo/'>
2350 <url scheme: 'file', path: 'c:/temp/foo/'>
2344 >>> url('bundle:foo')
2351 >>> url('bundle:foo')
2345 <url scheme: 'bundle', path: 'foo'>
2352 <url scheme: 'bundle', path: 'foo'>
2346 >>> url('bundle://../foo')
2353 >>> url('bundle://../foo')
2347 <url scheme: 'bundle', path: '../foo'>
2354 <url scheme: 'bundle', path: '../foo'>
2348 >>> url(r'c:\foo\bar')
2355 >>> url(r'c:\foo\bar')
2349 <url path: 'c:\\foo\\bar'>
2356 <url path: 'c:\\foo\\bar'>
2350 >>> url(r'\\blah\blah\blah')
2357 >>> url(r'\\blah\blah\blah')
2351 <url path: '\\\\blah\\blah\\blah'>
2358 <url path: '\\\\blah\\blah\\blah'>
2352 >>> url(r'\\blah\blah\blah#baz')
2359 >>> url(r'\\blah\blah\blah#baz')
2353 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2360 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2354 >>> url(r'file:///C:\users\me')
2361 >>> url(r'file:///C:\users\me')
2355 <url scheme: 'file', path: 'C:\\users\\me'>
2362 <url scheme: 'file', path: 'C:\\users\\me'>
2356
2363
2357 Authentication credentials:
2364 Authentication credentials:
2358
2365
2359 >>> url('ssh://joe:xyz@x/repo')
2366 >>> url('ssh://joe:xyz@x/repo')
2360 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2367 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2361 >>> url('ssh://joe@x/repo')
2368 >>> url('ssh://joe@x/repo')
2362 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2369 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2363
2370
2364 Query strings and fragments:
2371 Query strings and fragments:
2365
2372
2366 >>> url('http://host/a?b#c')
2373 >>> url('http://host/a?b#c')
2367 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2374 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2368 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2375 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2369 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2376 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2370 """
2377 """
2371
2378
2372 _safechars = "!~*'()+"
2379 _safechars = "!~*'()+"
2373 _safepchars = "/!~*'()+:\\"
2380 _safepchars = "/!~*'()+:\\"
2374 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2381 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2375
2382
2376 def __init__(self, path, parsequery=True, parsefragment=True):
2383 def __init__(self, path, parsequery=True, parsefragment=True):
2377 # We slowly chomp away at path until we have only the path left
2384 # We slowly chomp away at path until we have only the path left
2378 self.scheme = self.user = self.passwd = self.host = None
2385 self.scheme = self.user = self.passwd = self.host = None
2379 self.port = self.path = self.query = self.fragment = None
2386 self.port = self.path = self.query = self.fragment = None
2380 self._localpath = True
2387 self._localpath = True
2381 self._hostport = ''
2388 self._hostport = ''
2382 self._origpath = path
2389 self._origpath = path
2383
2390
2384 if parsefragment and '#' in path:
2391 if parsefragment and '#' in path:
2385 path, self.fragment = path.split('#', 1)
2392 path, self.fragment = path.split('#', 1)
2386 if not path:
2393 if not path:
2387 path = None
2394 path = None
2388
2395
2389 # special case for Windows drive letters and UNC paths
2396 # special case for Windows drive letters and UNC paths
2390 if hasdriveletter(path) or path.startswith(r'\\'):
2397 if hasdriveletter(path) or path.startswith(r'\\'):
2391 self.path = path
2398 self.path = path
2392 return
2399 return
2393
2400
2394 # For compatibility reasons, we can't handle bundle paths as
2401 # For compatibility reasons, we can't handle bundle paths as
2395 # normal URLS
2402 # normal URLS
2396 if path.startswith('bundle:'):
2403 if path.startswith('bundle:'):
2397 self.scheme = 'bundle'
2404 self.scheme = 'bundle'
2398 path = path[7:]
2405 path = path[7:]
2399 if path.startswith('//'):
2406 if path.startswith('//'):
2400 path = path[2:]
2407 path = path[2:]
2401 self.path = path
2408 self.path = path
2402 return
2409 return
2403
2410
2404 if self._matchscheme(path):
2411 if self._matchscheme(path):
2405 parts = path.split(':', 1)
2412 parts = path.split(':', 1)
2406 if parts[0]:
2413 if parts[0]:
2407 self.scheme, path = parts
2414 self.scheme, path = parts
2408 self._localpath = False
2415 self._localpath = False
2409
2416
2410 if not path:
2417 if not path:
2411 path = None
2418 path = None
2412 if self._localpath:
2419 if self._localpath:
2413 self.path = ''
2420 self.path = ''
2414 return
2421 return
2415 else:
2422 else:
2416 if self._localpath:
2423 if self._localpath:
2417 self.path = path
2424 self.path = path
2418 return
2425 return
2419
2426
2420 if parsequery and '?' in path:
2427 if parsequery and '?' in path:
2421 path, self.query = path.split('?', 1)
2428 path, self.query = path.split('?', 1)
2422 if not path:
2429 if not path:
2423 path = None
2430 path = None
2424 if not self.query:
2431 if not self.query:
2425 self.query = None
2432 self.query = None
2426
2433
2427 # // is required to specify a host/authority
2434 # // is required to specify a host/authority
2428 if path and path.startswith('//'):
2435 if path and path.startswith('//'):
2429 parts = path[2:].split('/', 1)
2436 parts = path[2:].split('/', 1)
2430 if len(parts) > 1:
2437 if len(parts) > 1:
2431 self.host, path = parts
2438 self.host, path = parts
2432 else:
2439 else:
2433 self.host = parts[0]
2440 self.host = parts[0]
2434 path = None
2441 path = None
2435 if not self.host:
2442 if not self.host:
2436 self.host = None
2443 self.host = None
2437 # path of file:///d is /d
2444 # path of file:///d is /d
2438 # path of file:///d:/ is d:/, not /d:/
2445 # path of file:///d:/ is d:/, not /d:/
2439 if path and not hasdriveletter(path):
2446 if path and not hasdriveletter(path):
2440 path = '/' + path
2447 path = '/' + path
2441
2448
2442 if self.host and '@' in self.host:
2449 if self.host and '@' in self.host:
2443 self.user, self.host = self.host.rsplit('@', 1)
2450 self.user, self.host = self.host.rsplit('@', 1)
2444 if ':' in self.user:
2451 if ':' in self.user:
2445 self.user, self.passwd = self.user.split(':', 1)
2452 self.user, self.passwd = self.user.split(':', 1)
2446 if not self.host:
2453 if not self.host:
2447 self.host = None
2454 self.host = None
2448
2455
2449 # Don't split on colons in IPv6 addresses without ports
2456 # Don't split on colons in IPv6 addresses without ports
2450 if (self.host and ':' in self.host and
2457 if (self.host and ':' in self.host and
2451 not (self.host.startswith('[') and self.host.endswith(']'))):
2458 not (self.host.startswith('[') and self.host.endswith(']'))):
2452 self._hostport = self.host
2459 self._hostport = self.host
2453 self.host, self.port = self.host.rsplit(':', 1)
2460 self.host, self.port = self.host.rsplit(':', 1)
2454 if not self.host:
2461 if not self.host:
2455 self.host = None
2462 self.host = None
2456
2463
2457 if (self.host and self.scheme == 'file' and
2464 if (self.host and self.scheme == 'file' and
2458 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2465 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2459 raise Abort(_('file:// URLs can only refer to localhost'))
2466 raise Abort(_('file:// URLs can only refer to localhost'))
2460
2467
2461 self.path = path
2468 self.path = path
2462
2469
2463 # leave the query string escaped
2470 # leave the query string escaped
2464 for a in ('user', 'passwd', 'host', 'port',
2471 for a in ('user', 'passwd', 'host', 'port',
2465 'path', 'fragment'):
2472 'path', 'fragment'):
2466 v = getattr(self, a)
2473 v = getattr(self, a)
2467 if v is not None:
2474 if v is not None:
2468 setattr(self, a, _urlunquote(v))
2475 setattr(self, a, _urlunquote(v))
2469
2476
2470 def __repr__(self):
2477 def __repr__(self):
2471 attrs = []
2478 attrs = []
2472 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2479 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2473 'query', 'fragment'):
2480 'query', 'fragment'):
2474 v = getattr(self, a)
2481 v = getattr(self, a)
2475 if v is not None:
2482 if v is not None:
2476 attrs.append('%s: %r' % (a, v))
2483 attrs.append('%s: %r' % (a, v))
2477 return '<url %s>' % ', '.join(attrs)
2484 return '<url %s>' % ', '.join(attrs)
2478
2485
2479 def __str__(self):
2486 def __str__(self):
2480 r"""Join the URL's components back into a URL string.
2487 r"""Join the URL's components back into a URL string.
2481
2488
2482 Examples:
2489 Examples:
2483
2490
2484 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2491 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2485 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2492 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2486 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2493 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2487 'http://user:pw@host:80/?foo=bar&baz=42'
2494 'http://user:pw@host:80/?foo=bar&baz=42'
2488 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2495 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2489 'http://user:pw@host:80/?foo=bar%3dbaz'
2496 'http://user:pw@host:80/?foo=bar%3dbaz'
2490 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2497 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2491 'ssh://user:pw@[::1]:2200//home/joe#'
2498 'ssh://user:pw@[::1]:2200//home/joe#'
2492 >>> str(url('http://localhost:80//'))
2499 >>> str(url('http://localhost:80//'))
2493 'http://localhost:80//'
2500 'http://localhost:80//'
2494 >>> str(url('http://localhost:80/'))
2501 >>> str(url('http://localhost:80/'))
2495 'http://localhost:80/'
2502 'http://localhost:80/'
2496 >>> str(url('http://localhost:80'))
2503 >>> str(url('http://localhost:80'))
2497 'http://localhost:80/'
2504 'http://localhost:80/'
2498 >>> str(url('bundle:foo'))
2505 >>> str(url('bundle:foo'))
2499 'bundle:foo'
2506 'bundle:foo'
2500 >>> str(url('bundle://../foo'))
2507 >>> str(url('bundle://../foo'))
2501 'bundle:../foo'
2508 'bundle:../foo'
2502 >>> str(url('path'))
2509 >>> str(url('path'))
2503 'path'
2510 'path'
2504 >>> str(url('file:///tmp/foo/bar'))
2511 >>> str(url('file:///tmp/foo/bar'))
2505 'file:///tmp/foo/bar'
2512 'file:///tmp/foo/bar'
2506 >>> str(url('file:///c:/tmp/foo/bar'))
2513 >>> str(url('file:///c:/tmp/foo/bar'))
2507 'file:///c:/tmp/foo/bar'
2514 'file:///c:/tmp/foo/bar'
2508 >>> print url(r'bundle:foo\bar')
2515 >>> print url(r'bundle:foo\bar')
2509 bundle:foo\bar
2516 bundle:foo\bar
2510 >>> print url(r'file:///D:\data\hg')
2517 >>> print url(r'file:///D:\data\hg')
2511 file:///D:\data\hg
2518 file:///D:\data\hg
2512 """
2519 """
2513 if self._localpath:
2520 if self._localpath:
2514 s = self.path
2521 s = self.path
2515 if self.scheme == 'bundle':
2522 if self.scheme == 'bundle':
2516 s = 'bundle:' + s
2523 s = 'bundle:' + s
2517 if self.fragment:
2524 if self.fragment:
2518 s += '#' + self.fragment
2525 s += '#' + self.fragment
2519 return s
2526 return s
2520
2527
2521 s = self.scheme + ':'
2528 s = self.scheme + ':'
2522 if self.user or self.passwd or self.host:
2529 if self.user or self.passwd or self.host:
2523 s += '//'
2530 s += '//'
2524 elif self.scheme and (not self.path or self.path.startswith('/')
2531 elif self.scheme and (not self.path or self.path.startswith('/')
2525 or hasdriveletter(self.path)):
2532 or hasdriveletter(self.path)):
2526 s += '//'
2533 s += '//'
2527 if hasdriveletter(self.path):
2534 if hasdriveletter(self.path):
2528 s += '/'
2535 s += '/'
2529 if self.user:
2536 if self.user:
2530 s += urlreq.quote(self.user, safe=self._safechars)
2537 s += urlreq.quote(self.user, safe=self._safechars)
2531 if self.passwd:
2538 if self.passwd:
2532 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2539 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2533 if self.user or self.passwd:
2540 if self.user or self.passwd:
2534 s += '@'
2541 s += '@'
2535 if self.host:
2542 if self.host:
2536 if not (self.host.startswith('[') and self.host.endswith(']')):
2543 if not (self.host.startswith('[') and self.host.endswith(']')):
2537 s += urlreq.quote(self.host)
2544 s += urlreq.quote(self.host)
2538 else:
2545 else:
2539 s += self.host
2546 s += self.host
2540 if self.port:
2547 if self.port:
2541 s += ':' + urlreq.quote(self.port)
2548 s += ':' + urlreq.quote(self.port)
2542 if self.host:
2549 if self.host:
2543 s += '/'
2550 s += '/'
2544 if self.path:
2551 if self.path:
2545 # TODO: similar to the query string, we should not unescape the
2552 # TODO: similar to the query string, we should not unescape the
2546 # path when we store it, the path might contain '%2f' = '/',
2553 # path when we store it, the path might contain '%2f' = '/',
2547 # which we should *not* escape.
2554 # which we should *not* escape.
2548 s += urlreq.quote(self.path, safe=self._safepchars)
2555 s += urlreq.quote(self.path, safe=self._safepchars)
2549 if self.query:
2556 if self.query:
2550 # we store the query in escaped form.
2557 # we store the query in escaped form.
2551 s += '?' + self.query
2558 s += '?' + self.query
2552 if self.fragment is not None:
2559 if self.fragment is not None:
2553 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2560 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2554 return s
2561 return s
2555
2562
2556 def authinfo(self):
2563 def authinfo(self):
2557 user, passwd = self.user, self.passwd
2564 user, passwd = self.user, self.passwd
2558 try:
2565 try:
2559 self.user, self.passwd = None, None
2566 self.user, self.passwd = None, None
2560 s = str(self)
2567 s = str(self)
2561 finally:
2568 finally:
2562 self.user, self.passwd = user, passwd
2569 self.user, self.passwd = user, passwd
2563 if not self.user:
2570 if not self.user:
2564 return (s, None)
2571 return (s, None)
2565 # authinfo[1] is passed to urllib2 password manager, and its
2572 # authinfo[1] is passed to urllib2 password manager, and its
2566 # URIs must not contain credentials. The host is passed in the
2573 # URIs must not contain credentials. The host is passed in the
2567 # URIs list because Python < 2.4.3 uses only that to search for
2574 # URIs list because Python < 2.4.3 uses only that to search for
2568 # a password.
2575 # a password.
2569 return (s, (None, (s, self.host),
2576 return (s, (None, (s, self.host),
2570 self.user, self.passwd or ''))
2577 self.user, self.passwd or ''))
2571
2578
2572 def isabs(self):
2579 def isabs(self):
2573 if self.scheme and self.scheme != 'file':
2580 if self.scheme and self.scheme != 'file':
2574 return True # remote URL
2581 return True # remote URL
2575 if hasdriveletter(self.path):
2582 if hasdriveletter(self.path):
2576 return True # absolute for our purposes - can't be joined()
2583 return True # absolute for our purposes - can't be joined()
2577 if self.path.startswith(r'\\'):
2584 if self.path.startswith(r'\\'):
2578 return True # Windows UNC path
2585 return True # Windows UNC path
2579 if self.path.startswith('/'):
2586 if self.path.startswith('/'):
2580 return True # POSIX-style
2587 return True # POSIX-style
2581 return False
2588 return False
2582
2589
2583 def localpath(self):
2590 def localpath(self):
2584 if self.scheme == 'file' or self.scheme == 'bundle':
2591 if self.scheme == 'file' or self.scheme == 'bundle':
2585 path = self.path or '/'
2592 path = self.path or '/'
2586 # For Windows, we need to promote hosts containing drive
2593 # For Windows, we need to promote hosts containing drive
2587 # letters to paths with drive letters.
2594 # letters to paths with drive letters.
2588 if hasdriveletter(self._hostport):
2595 if hasdriveletter(self._hostport):
2589 path = self._hostport + '/' + self.path
2596 path = self._hostport + '/' + self.path
2590 elif (self.host is not None and self.path
2597 elif (self.host is not None and self.path
2591 and not hasdriveletter(path)):
2598 and not hasdriveletter(path)):
2592 path = '/' + path
2599 path = '/' + path
2593 return path
2600 return path
2594 return self._origpath
2601 return self._origpath
2595
2602
2596 def islocal(self):
2603 def islocal(self):
2597 '''whether localpath will return something that posixfile can open'''
2604 '''whether localpath will return something that posixfile can open'''
2598 return (not self.scheme or self.scheme == 'file'
2605 return (not self.scheme or self.scheme == 'file'
2599 or self.scheme == 'bundle')
2606 or self.scheme == 'bundle')
2600
2607
2601 def hasscheme(path):
2608 def hasscheme(path):
2602 return bool(url(path).scheme)
2609 return bool(url(path).scheme)
2603
2610
2604 def hasdriveletter(path):
2611 def hasdriveletter(path):
2605 return path and path[1:2] == ':' and path[0:1].isalpha()
2612 return path and path[1:2] == ':' and path[0:1].isalpha()
2606
2613
2607 def urllocalpath(path):
2614 def urllocalpath(path):
2608 return url(path, parsequery=False, parsefragment=False).localpath()
2615 return url(path, parsequery=False, parsefragment=False).localpath()
2609
2616
2610 def hidepassword(u):
2617 def hidepassword(u):
2611 '''hide user credential in a url string'''
2618 '''hide user credential in a url string'''
2612 u = url(u)
2619 u = url(u)
2613 if u.passwd:
2620 if u.passwd:
2614 u.passwd = '***'
2621 u.passwd = '***'
2615 return str(u)
2622 return str(u)
2616
2623
2617 def removeauth(u):
2624 def removeauth(u):
2618 '''remove all authentication information from a url string'''
2625 '''remove all authentication information from a url string'''
2619 u = url(u)
2626 u = url(u)
2620 u.user = u.passwd = None
2627 u.user = u.passwd = None
2621 return str(u)
2628 return str(u)
2622
2629
2623 def isatty(fp):
2630 def isatty(fp):
2624 try:
2631 try:
2625 return fp.isatty()
2632 return fp.isatty()
2626 except AttributeError:
2633 except AttributeError:
2627 return False
2634 return False
2628
2635
2629 timecount = unitcountfn(
2636 timecount = unitcountfn(
2630 (1, 1e3, _('%.0f s')),
2637 (1, 1e3, _('%.0f s')),
2631 (100, 1, _('%.1f s')),
2638 (100, 1, _('%.1f s')),
2632 (10, 1, _('%.2f s')),
2639 (10, 1, _('%.2f s')),
2633 (1, 1, _('%.3f s')),
2640 (1, 1, _('%.3f s')),
2634 (100, 0.001, _('%.1f ms')),
2641 (100, 0.001, _('%.1f ms')),
2635 (10, 0.001, _('%.2f ms')),
2642 (10, 0.001, _('%.2f ms')),
2636 (1, 0.001, _('%.3f ms')),
2643 (1, 0.001, _('%.3f ms')),
2637 (100, 0.000001, _('%.1f us')),
2644 (100, 0.000001, _('%.1f us')),
2638 (10, 0.000001, _('%.2f us')),
2645 (10, 0.000001, _('%.2f us')),
2639 (1, 0.000001, _('%.3f us')),
2646 (1, 0.000001, _('%.3f us')),
2640 (100, 0.000000001, _('%.1f ns')),
2647 (100, 0.000000001, _('%.1f ns')),
2641 (10, 0.000000001, _('%.2f ns')),
2648 (10, 0.000000001, _('%.2f ns')),
2642 (1, 0.000000001, _('%.3f ns')),
2649 (1, 0.000000001, _('%.3f ns')),
2643 )
2650 )
2644
2651
2645 _timenesting = [0]
2652 _timenesting = [0]
2646
2653
2647 def timed(func):
2654 def timed(func):
2648 '''Report the execution time of a function call to stderr.
2655 '''Report the execution time of a function call to stderr.
2649
2656
2650 During development, use as a decorator when you need to measure
2657 During development, use as a decorator when you need to measure
2651 the cost of a function, e.g. as follows:
2658 the cost of a function, e.g. as follows:
2652
2659
2653 @util.timed
2660 @util.timed
2654 def foo(a, b, c):
2661 def foo(a, b, c):
2655 pass
2662 pass
2656 '''
2663 '''
2657
2664
2658 def wrapper(*args, **kwargs):
2665 def wrapper(*args, **kwargs):
2659 start = time.time()
2666 start = time.time()
2660 indent = 2
2667 indent = 2
2661 _timenesting[0] += indent
2668 _timenesting[0] += indent
2662 try:
2669 try:
2663 return func(*args, **kwargs)
2670 return func(*args, **kwargs)
2664 finally:
2671 finally:
2665 elapsed = time.time() - start
2672 elapsed = time.time() - start
2666 _timenesting[0] -= indent
2673 _timenesting[0] -= indent
2667 sys.stderr.write('%s%s: %s\n' %
2674 sys.stderr.write('%s%s: %s\n' %
2668 (' ' * _timenesting[0], func.__name__,
2675 (' ' * _timenesting[0], func.__name__,
2669 timecount(elapsed)))
2676 timecount(elapsed)))
2670 return wrapper
2677 return wrapper
2671
2678
2672 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2679 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2673 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2680 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2674
2681
2675 def sizetoint(s):
2682 def sizetoint(s):
2676 '''Convert a space specifier to a byte count.
2683 '''Convert a space specifier to a byte count.
2677
2684
2678 >>> sizetoint('30')
2685 >>> sizetoint('30')
2679 30
2686 30
2680 >>> sizetoint('2.2kb')
2687 >>> sizetoint('2.2kb')
2681 2252
2688 2252
2682 >>> sizetoint('6M')
2689 >>> sizetoint('6M')
2683 6291456
2690 6291456
2684 '''
2691 '''
2685 t = s.strip().lower()
2692 t = s.strip().lower()
2686 try:
2693 try:
2687 for k, u in _sizeunits:
2694 for k, u in _sizeunits:
2688 if t.endswith(k):
2695 if t.endswith(k):
2689 return int(float(t[:-len(k)]) * u)
2696 return int(float(t[:-len(k)]) * u)
2690 return int(t)
2697 return int(t)
2691 except ValueError:
2698 except ValueError:
2692 raise error.ParseError(_("couldn't parse size: %s") % s)
2699 raise error.ParseError(_("couldn't parse size: %s") % s)
2693
2700
2694 class hooks(object):
2701 class hooks(object):
2695 '''A collection of hook functions that can be used to extend a
2702 '''A collection of hook functions that can be used to extend a
2696 function's behavior. Hooks are called in lexicographic order,
2703 function's behavior. Hooks are called in lexicographic order,
2697 based on the names of their sources.'''
2704 based on the names of their sources.'''
2698
2705
2699 def __init__(self):
2706 def __init__(self):
2700 self._hooks = []
2707 self._hooks = []
2701
2708
2702 def add(self, source, hook):
2709 def add(self, source, hook):
2703 self._hooks.append((source, hook))
2710 self._hooks.append((source, hook))
2704
2711
2705 def __call__(self, *args):
2712 def __call__(self, *args):
2706 self._hooks.sort(key=lambda x: x[0])
2713 self._hooks.sort(key=lambda x: x[0])
2707 results = []
2714 results = []
2708 for source, hook in self._hooks:
2715 for source, hook in self._hooks:
2709 results.append(hook(*args))
2716 results.append(hook(*args))
2710 return results
2717 return results
2711
2718
2712 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2719 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2713 '''Yields lines for a nicely formatted stacktrace.
2720 '''Yields lines for a nicely formatted stacktrace.
2714 Skips the 'skip' last entries.
2721 Skips the 'skip' last entries.
2715 Each file+linenumber is formatted according to fileline.
2722 Each file+linenumber is formatted according to fileline.
2716 Each line is formatted according to line.
2723 Each line is formatted according to line.
2717 If line is None, it yields:
2724 If line is None, it yields:
2718 length of longest filepath+line number,
2725 length of longest filepath+line number,
2719 filepath+linenumber,
2726 filepath+linenumber,
2720 function
2727 function
2721
2728
2722 Not be used in production code but very convenient while developing.
2729 Not be used in production code but very convenient while developing.
2723 '''
2730 '''
2724 entries = [(fileline % (fn, ln), func)
2731 entries = [(fileline % (fn, ln), func)
2725 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2732 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2726 if entries:
2733 if entries:
2727 fnmax = max(len(entry[0]) for entry in entries)
2734 fnmax = max(len(entry[0]) for entry in entries)
2728 for fnln, func in entries:
2735 for fnln, func in entries:
2729 if line is None:
2736 if line is None:
2730 yield (fnmax, fnln, func)
2737 yield (fnmax, fnln, func)
2731 else:
2738 else:
2732 yield line % (fnmax, fnln, func)
2739 yield line % (fnmax, fnln, func)
2733
2740
2734 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2741 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2735 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2742 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2736 Skips the 'skip' last entries. By default it will flush stdout first.
2743 Skips the 'skip' last entries. By default it will flush stdout first.
2737 It can be used everywhere and intentionally does not require an ui object.
2744 It can be used everywhere and intentionally does not require an ui object.
2738 Not be used in production code but very convenient while developing.
2745 Not be used in production code but very convenient while developing.
2739 '''
2746 '''
2740 if otherf:
2747 if otherf:
2741 otherf.flush()
2748 otherf.flush()
2742 f.write('%s at:\n' % msg)
2749 f.write('%s at:\n' % msg)
2743 for line in getstackframes(skip + 1):
2750 for line in getstackframes(skip + 1):
2744 f.write(line)
2751 f.write(line)
2745 f.flush()
2752 f.flush()
2746
2753
2747 class dirs(object):
2754 class dirs(object):
2748 '''a multiset of directory names from a dirstate or manifest'''
2755 '''a multiset of directory names from a dirstate or manifest'''
2749
2756
2750 def __init__(self, map, skip=None):
2757 def __init__(self, map, skip=None):
2751 self._dirs = {}
2758 self._dirs = {}
2752 addpath = self.addpath
2759 addpath = self.addpath
2753 if safehasattr(map, 'iteritems') and skip is not None:
2760 if safehasattr(map, 'iteritems') and skip is not None:
2754 for f, s in map.iteritems():
2761 for f, s in map.iteritems():
2755 if s[0] != skip:
2762 if s[0] != skip:
2756 addpath(f)
2763 addpath(f)
2757 else:
2764 else:
2758 for f in map:
2765 for f in map:
2759 addpath(f)
2766 addpath(f)
2760
2767
2761 def addpath(self, path):
2768 def addpath(self, path):
2762 dirs = self._dirs
2769 dirs = self._dirs
2763 for base in finddirs(path):
2770 for base in finddirs(path):
2764 if base in dirs:
2771 if base in dirs:
2765 dirs[base] += 1
2772 dirs[base] += 1
2766 return
2773 return
2767 dirs[base] = 1
2774 dirs[base] = 1
2768
2775
2769 def delpath(self, path):
2776 def delpath(self, path):
2770 dirs = self._dirs
2777 dirs = self._dirs
2771 for base in finddirs(path):
2778 for base in finddirs(path):
2772 if dirs[base] > 1:
2779 if dirs[base] > 1:
2773 dirs[base] -= 1
2780 dirs[base] -= 1
2774 return
2781 return
2775 del dirs[base]
2782 del dirs[base]
2776
2783
2777 def __iter__(self):
2784 def __iter__(self):
2778 return self._dirs.iterkeys()
2785 return self._dirs.iterkeys()
2779
2786
2780 def __contains__(self, d):
2787 def __contains__(self, d):
2781 return d in self._dirs
2788 return d in self._dirs
2782
2789
2783 if safehasattr(parsers, 'dirs'):
2790 if safehasattr(parsers, 'dirs'):
2784 dirs = parsers.dirs
2791 dirs = parsers.dirs
2785
2792
2786 def finddirs(path):
2793 def finddirs(path):
2787 pos = path.rfind('/')
2794 pos = path.rfind('/')
2788 while pos != -1:
2795 while pos != -1:
2789 yield path[:pos]
2796 yield path[:pos]
2790 pos = path.rfind('/', 0, pos)
2797 pos = path.rfind('/', 0, pos)
2791
2798
2792 # compression utility
2799 # compression utility
2793
2800
2794 class nocompress(object):
2801 class nocompress(object):
2795 def compress(self, x):
2802 def compress(self, x):
2796 return x
2803 return x
2797 def flush(self):
2804 def flush(self):
2798 return ""
2805 return ""
2799
2806
2800 compressors = {
2807 compressors = {
2801 None: nocompress,
2808 None: nocompress,
2802 # lambda to prevent early import
2809 # lambda to prevent early import
2803 'BZ': lambda: bz2.BZ2Compressor(),
2810 'BZ': lambda: bz2.BZ2Compressor(),
2804 'GZ': lambda: zlib.compressobj(),
2811 'GZ': lambda: zlib.compressobj(),
2805 }
2812 }
2806 # also support the old form by courtesies
2813 # also support the old form by courtesies
2807 compressors['UN'] = compressors[None]
2814 compressors['UN'] = compressors[None]
2808
2815
2809 def _makedecompressor(decompcls):
2816 def _makedecompressor(decompcls):
2810 def generator(f):
2817 def generator(f):
2811 d = decompcls()
2818 d = decompcls()
2812 for chunk in filechunkiter(f):
2819 for chunk in filechunkiter(f):
2813 yield d.decompress(chunk)
2820 yield d.decompress(chunk)
2814 def func(fh):
2821 def func(fh):
2815 return chunkbuffer(generator(fh))
2822 return chunkbuffer(generator(fh))
2816 return func
2823 return func
2817
2824
2818 class ctxmanager(object):
2825 class ctxmanager(object):
2819 '''A context manager for use in 'with' blocks to allow multiple
2826 '''A context manager for use in 'with' blocks to allow multiple
2820 contexts to be entered at once. This is both safer and more
2827 contexts to be entered at once. This is both safer and more
2821 flexible than contextlib.nested.
2828 flexible than contextlib.nested.
2822
2829
2823 Once Mercurial supports Python 2.7+, this will become mostly
2830 Once Mercurial supports Python 2.7+, this will become mostly
2824 unnecessary.
2831 unnecessary.
2825 '''
2832 '''
2826
2833
2827 def __init__(self, *args):
2834 def __init__(self, *args):
2828 '''Accepts a list of no-argument functions that return context
2835 '''Accepts a list of no-argument functions that return context
2829 managers. These will be invoked at __call__ time.'''
2836 managers. These will be invoked at __call__ time.'''
2830 self._pending = args
2837 self._pending = args
2831 self._atexit = []
2838 self._atexit = []
2832
2839
2833 def __enter__(self):
2840 def __enter__(self):
2834 return self
2841 return self
2835
2842
2836 def enter(self):
2843 def enter(self):
2837 '''Create and enter context managers in the order in which they were
2844 '''Create and enter context managers in the order in which they were
2838 passed to the constructor.'''
2845 passed to the constructor.'''
2839 values = []
2846 values = []
2840 for func in self._pending:
2847 for func in self._pending:
2841 obj = func()
2848 obj = func()
2842 values.append(obj.__enter__())
2849 values.append(obj.__enter__())
2843 self._atexit.append(obj.__exit__)
2850 self._atexit.append(obj.__exit__)
2844 del self._pending
2851 del self._pending
2845 return values
2852 return values
2846
2853
2847 def atexit(self, func, *args, **kwargs):
2854 def atexit(self, func, *args, **kwargs):
2848 '''Add a function to call when this context manager exits. The
2855 '''Add a function to call when this context manager exits. The
2849 ordering of multiple atexit calls is unspecified, save that
2856 ordering of multiple atexit calls is unspecified, save that
2850 they will happen before any __exit__ functions.'''
2857 they will happen before any __exit__ functions.'''
2851 def wrapper(exc_type, exc_val, exc_tb):
2858 def wrapper(exc_type, exc_val, exc_tb):
2852 func(*args, **kwargs)
2859 func(*args, **kwargs)
2853 self._atexit.append(wrapper)
2860 self._atexit.append(wrapper)
2854 return func
2861 return func
2855
2862
2856 def __exit__(self, exc_type, exc_val, exc_tb):
2863 def __exit__(self, exc_type, exc_val, exc_tb):
2857 '''Context managers are exited in the reverse order from which
2864 '''Context managers are exited in the reverse order from which
2858 they were created.'''
2865 they were created.'''
2859 received = exc_type is not None
2866 received = exc_type is not None
2860 suppressed = False
2867 suppressed = False
2861 pending = None
2868 pending = None
2862 self._atexit.reverse()
2869 self._atexit.reverse()
2863 for exitfunc in self._atexit:
2870 for exitfunc in self._atexit:
2864 try:
2871 try:
2865 if exitfunc(exc_type, exc_val, exc_tb):
2872 if exitfunc(exc_type, exc_val, exc_tb):
2866 suppressed = True
2873 suppressed = True
2867 exc_type = None
2874 exc_type = None
2868 exc_val = None
2875 exc_val = None
2869 exc_tb = None
2876 exc_tb = None
2870 except BaseException:
2877 except BaseException:
2871 pending = sys.exc_info()
2878 pending = sys.exc_info()
2872 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2879 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2873 del self._atexit
2880 del self._atexit
2874 if pending:
2881 if pending:
2875 raise exc_val
2882 raise exc_val
2876 return received and suppressed
2883 return received and suppressed
2877
2884
2878 def _bz2():
2885 def _bz2():
2879 d = bz2.BZ2Decompressor()
2886 d = bz2.BZ2Decompressor()
2880 # Bzip2 stream start with BZ, but we stripped it.
2887 # Bzip2 stream start with BZ, but we stripped it.
2881 # we put it back for good measure.
2888 # we put it back for good measure.
2882 d.decompress('BZ')
2889 d.decompress('BZ')
2883 return d
2890 return d
2884
2891
2885 decompressors = {None: lambda fh: fh,
2892 decompressors = {None: lambda fh: fh,
2886 '_truncatedBZ': _makedecompressor(_bz2),
2893 '_truncatedBZ': _makedecompressor(_bz2),
2887 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2894 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2888 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2895 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2889 }
2896 }
2890 # also support the old form by courtesies
2897 # also support the old form by courtesies
2891 decompressors['UN'] = decompressors[None]
2898 decompressors['UN'] = decompressors[None]
2892
2899
2893 # convenient shortcut
2900 # convenient shortcut
2894 dst = debugstacktrace
2901 dst = debugstacktrace
@@ -1,260 +1,288 b''
1 This runs with TZ="GMT"
1 This runs with TZ="GMT"
2
2
3 $ hg init
3 $ hg init
4 $ echo "test-parse-date" > a
4 $ echo "test-parse-date" > a
5 $ hg add a
5 $ hg add a
6 $ hg ci -d "2006-02-01 13:00:30" -m "rev 0"
6 $ hg ci -d "2006-02-01 13:00:30" -m "rev 0"
7 $ echo "hi!" >> a
7 $ echo "hi!" >> a
8 $ hg ci -d "2006-02-01 13:00:30 -0500" -m "rev 1"
8 $ hg ci -d "2006-02-01 13:00:30 -0500" -m "rev 1"
9 $ hg tag -d "2006-04-15 13:30" "Hi"
9 $ hg tag -d "2006-04-15 13:30" "Hi"
10 $ hg backout --merge -d "2006-04-15 13:30 +0200" -m "rev 3" 1
10 $ hg backout --merge -d "2006-04-15 13:30 +0200" -m "rev 3" 1
11 reverting a
11 reverting a
12 created new head
12 created new head
13 changeset 3:107ce1ee2b43 backs out changeset 1:25a1420a55f8
13 changeset 3:107ce1ee2b43 backs out changeset 1:25a1420a55f8
14 merging with changeset 3:107ce1ee2b43
14 merging with changeset 3:107ce1ee2b43
15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
16 (branch merge, don't forget to commit)
16 (branch merge, don't forget to commit)
17 $ hg ci -d "1150000000 14400" -m "rev 4 (merge)"
17 $ hg ci -d "1150000000 14400" -m "rev 4 (merge)"
18 $ echo "fail" >> a
18 $ echo "fail" >> a
19 $ hg ci -d "should fail" -m "fail"
19 $ hg ci -d "should fail" -m "fail"
20 abort: invalid date: 'should fail'
20 abort: invalid date: 'should fail'
21 [255]
21 [255]
22 $ hg ci -d "100000000000000000 1400" -m "fail"
22 $ hg ci -d "100000000000000000 1400" -m "fail"
23 abort: date exceeds 32 bits: 100000000000000000
23 abort: date exceeds 32 bits: 100000000000000000
24 [255]
24 [255]
25 $ hg ci -d "100000 1400000" -m "fail"
25 $ hg ci -d "100000 1400000" -m "fail"
26 abort: impossible time zone offset: 1400000
26 abort: impossible time zone offset: 1400000
27 [255]
27 [255]
28
28
29 Check with local timezone other than GMT and with DST
29 Check with local timezone other than GMT and with DST
30
30
31 $ TZ="PST+8PDT+7,M4.1.0/02:00:00,M10.5.0/02:00:00"
31 $ TZ="PST+8PDT+7,M4.1.0/02:00:00,M10.5.0/02:00:00"
32 $ export TZ
32 $ export TZ
33
33
34 PST=UTC-8 / PDT=UTC-7
34 PST=UTC-8 / PDT=UTC-7
35 Summer time begins on April's first Sunday at 2:00am,
35 Summer time begins on April's first Sunday at 2:00am,
36 and ends on October's last Sunday at 2:00am.
36 and ends on October's last Sunday at 2:00am.
37
37
38 $ hg debugrebuildstate
38 $ hg debugrebuildstate
39 $ echo "a" > a
39 $ echo "a" > a
40 $ hg ci -d "2006-07-15 13:30" -m "summer@UTC-7"
40 $ hg ci -d "2006-07-15 13:30" -m "summer@UTC-7"
41 $ hg debugrebuildstate
41 $ hg debugrebuildstate
42 $ echo "b" > a
42 $ echo "b" > a
43 $ hg ci -d "2006-07-15 13:30 +0500" -m "summer@UTC+5"
43 $ hg ci -d "2006-07-15 13:30 +0500" -m "summer@UTC+5"
44 $ hg debugrebuildstate
44 $ hg debugrebuildstate
45 $ echo "c" > a
45 $ echo "c" > a
46 $ hg ci -d "2006-01-15 13:30" -m "winter@UTC-8"
46 $ hg ci -d "2006-01-15 13:30" -m "winter@UTC-8"
47 $ hg debugrebuildstate
47 $ hg debugrebuildstate
48 $ echo "d" > a
48 $ echo "d" > a
49 $ hg ci -d "2006-01-15 13:30 +0500" -m "winter@UTC+5"
49 $ hg ci -d "2006-01-15 13:30 +0500" -m "winter@UTC+5"
50 $ hg log --template '{date|date}\n'
50 $ hg log --template '{date|date}\n'
51 Sun Jan 15 13:30:00 2006 +0500
51 Sun Jan 15 13:30:00 2006 +0500
52 Sun Jan 15 13:30:00 2006 -0800
52 Sun Jan 15 13:30:00 2006 -0800
53 Sat Jul 15 13:30:00 2006 +0500
53 Sat Jul 15 13:30:00 2006 +0500
54 Sat Jul 15 13:30:00 2006 -0700
54 Sat Jul 15 13:30:00 2006 -0700
55 Sun Jun 11 00:26:40 2006 -0400
55 Sun Jun 11 00:26:40 2006 -0400
56 Sat Apr 15 13:30:00 2006 +0200
56 Sat Apr 15 13:30:00 2006 +0200
57 Sat Apr 15 13:30:00 2006 +0000
57 Sat Apr 15 13:30:00 2006 +0000
58 Wed Feb 01 13:00:30 2006 -0500
58 Wed Feb 01 13:00:30 2006 -0500
59 Wed Feb 01 13:00:30 2006 +0000
59 Wed Feb 01 13:00:30 2006 +0000
60
60
61 Test issue1014 (fractional timezones)
61 Test issue1014 (fractional timezones)
62
62
63 $ hg debugdate "1000000000 -16200" # 0430
63 $ hg debugdate "1000000000 -16200" # 0430
64 internal: 1000000000 -16200
64 internal: 1000000000 -16200
65 standard: Sun Sep 09 06:16:40 2001 +0430
65 standard: Sun Sep 09 06:16:40 2001 +0430
66 $ hg debugdate "1000000000 -15300" # 0415
66 $ hg debugdate "1000000000 -15300" # 0415
67 internal: 1000000000 -15300
67 internal: 1000000000 -15300
68 standard: Sun Sep 09 06:01:40 2001 +0415
68 standard: Sun Sep 09 06:01:40 2001 +0415
69 $ hg debugdate "1000000000 -14400" # 0400
69 $ hg debugdate "1000000000 -14400" # 0400
70 internal: 1000000000 -14400
70 internal: 1000000000 -14400
71 standard: Sun Sep 09 05:46:40 2001 +0400
71 standard: Sun Sep 09 05:46:40 2001 +0400
72 $ hg debugdate "1000000000 0" # GMT
72 $ hg debugdate "1000000000 0" # GMT
73 internal: 1000000000 0
73 internal: 1000000000 0
74 standard: Sun Sep 09 01:46:40 2001 +0000
74 standard: Sun Sep 09 01:46:40 2001 +0000
75 $ hg debugdate "1000000000 14400" # -0400
75 $ hg debugdate "1000000000 14400" # -0400
76 internal: 1000000000 14400
76 internal: 1000000000 14400
77 standard: Sat Sep 08 21:46:40 2001 -0400
77 standard: Sat Sep 08 21:46:40 2001 -0400
78 $ hg debugdate "1000000000 15300" # -0415
78 $ hg debugdate "1000000000 15300" # -0415
79 internal: 1000000000 15300
79 internal: 1000000000 15300
80 standard: Sat Sep 08 21:31:40 2001 -0415
80 standard: Sat Sep 08 21:31:40 2001 -0415
81 $ hg debugdate "1000000000 16200" # -0430
81 $ hg debugdate "1000000000 16200" # -0430
82 internal: 1000000000 16200
82 internal: 1000000000 16200
83 standard: Sat Sep 08 21:16:40 2001 -0430
83 standard: Sat Sep 08 21:16:40 2001 -0430
84 $ hg debugdate "Sat Sep 08 21:16:40 2001 +0430"
84 $ hg debugdate "Sat Sep 08 21:16:40 2001 +0430"
85 internal: 999967600 -16200
85 internal: 999967600 -16200
86 standard: Sat Sep 08 21:16:40 2001 +0430
86 standard: Sat Sep 08 21:16:40 2001 +0430
87 $ hg debugdate "Sat Sep 08 21:16:40 2001 -0430"
87 $ hg debugdate "Sat Sep 08 21:16:40 2001 -0430"
88 internal: 1000000000 16200
88 internal: 1000000000 16200
89 standard: Sat Sep 08 21:16:40 2001 -0430
89 standard: Sat Sep 08 21:16:40 2001 -0430
90
90
91 Test 12-hours times
91 Test 12-hours times
92
92
93 $ hg debugdate "2006-02-01 1:00:30PM +0000"
93 $ hg debugdate "2006-02-01 1:00:30PM +0000"
94 internal: 1138798830 0
94 internal: 1138798830 0
95 standard: Wed Feb 01 13:00:30 2006 +0000
95 standard: Wed Feb 01 13:00:30 2006 +0000
96 $ hg debugdate "1:00:30PM" > /dev/null
96 $ hg debugdate "1:00:30PM" > /dev/null
97
97
98 Normal range
98 Normal range
99
99
100 $ hg log -d -1
100 $ hg log -d -1
101
101
102 Negative range
102 Negative range
103
103
104 $ hg log -d "--2"
104 $ hg log -d "--2"
105 abort: -2 must be nonnegative (see "hg help dates")
105 abort: -2 must be nonnegative (see "hg help dates")
106 [255]
106 [255]
107
107
108 Whitespace only
108 Whitespace only
109
109
110 $ hg log -d " "
110 $ hg log -d " "
111 abort: dates cannot consist entirely of whitespace
111 abort: dates cannot consist entirely of whitespace
112 [255]
112 [255]
113
113
114 Test date formats with '>' or '<' accompanied by space characters
114 Test date formats with '>' or '<' accompanied by space characters
115
115
116 $ hg log -d '>' --template '{date|date}\n'
116 $ hg log -d '>' --template '{date|date}\n'
117 abort: invalid day spec, use '>DATE'
117 abort: invalid day spec, use '>DATE'
118 [255]
118 [255]
119 $ hg log -d '<' --template '{date|date}\n'
119 $ hg log -d '<' --template '{date|date}\n'
120 abort: invalid day spec, use '<DATE'
120 abort: invalid day spec, use '<DATE'
121 [255]
121 [255]
122
122
123 $ hg log -d ' >' --template '{date|date}\n'
123 $ hg log -d ' >' --template '{date|date}\n'
124 abort: invalid day spec, use '>DATE'
124 abort: invalid day spec, use '>DATE'
125 [255]
125 [255]
126 $ hg log -d ' <' --template '{date|date}\n'
126 $ hg log -d ' <' --template '{date|date}\n'
127 abort: invalid day spec, use '<DATE'
127 abort: invalid day spec, use '<DATE'
128 [255]
128 [255]
129
129
130 $ hg log -d '> ' --template '{date|date}\n'
130 $ hg log -d '> ' --template '{date|date}\n'
131 abort: invalid day spec, use '>DATE'
131 abort: invalid day spec, use '>DATE'
132 [255]
132 [255]
133 $ hg log -d '< ' --template '{date|date}\n'
133 $ hg log -d '< ' --template '{date|date}\n'
134 abort: invalid day spec, use '<DATE'
134 abort: invalid day spec, use '<DATE'
135 [255]
135 [255]
136
136
137 $ hg log -d ' > ' --template '{date|date}\n'
137 $ hg log -d ' > ' --template '{date|date}\n'
138 abort: invalid day spec, use '>DATE'
138 abort: invalid day spec, use '>DATE'
139 [255]
139 [255]
140 $ hg log -d ' < ' --template '{date|date}\n'
140 $ hg log -d ' < ' --template '{date|date}\n'
141 abort: invalid day spec, use '<DATE'
141 abort: invalid day spec, use '<DATE'
142 [255]
142 [255]
143
143
144 $ hg log -d '>02/01' --template '{date|date}\n'
144 $ hg log -d '>02/01' --template '{date|date}\n'
145 $ hg log -d '<02/01' --template '{date|date}\n'
145 $ hg log -d '<02/01' --template '{date|date}\n'
146 Sun Jan 15 13:30:00 2006 +0500
146 Sun Jan 15 13:30:00 2006 +0500
147 Sun Jan 15 13:30:00 2006 -0800
147 Sun Jan 15 13:30:00 2006 -0800
148 Sat Jul 15 13:30:00 2006 +0500
148 Sat Jul 15 13:30:00 2006 +0500
149 Sat Jul 15 13:30:00 2006 -0700
149 Sat Jul 15 13:30:00 2006 -0700
150 Sun Jun 11 00:26:40 2006 -0400
150 Sun Jun 11 00:26:40 2006 -0400
151 Sat Apr 15 13:30:00 2006 +0200
151 Sat Apr 15 13:30:00 2006 +0200
152 Sat Apr 15 13:30:00 2006 +0000
152 Sat Apr 15 13:30:00 2006 +0000
153 Wed Feb 01 13:00:30 2006 -0500
153 Wed Feb 01 13:00:30 2006 -0500
154 Wed Feb 01 13:00:30 2006 +0000
154 Wed Feb 01 13:00:30 2006 +0000
155
155
156 $ hg log -d ' >02/01' --template '{date|date}\n'
156 $ hg log -d ' >02/01' --template '{date|date}\n'
157 $ hg log -d ' <02/01' --template '{date|date}\n'
157 $ hg log -d ' <02/01' --template '{date|date}\n'
158 Sun Jan 15 13:30:00 2006 +0500
158 Sun Jan 15 13:30:00 2006 +0500
159 Sun Jan 15 13:30:00 2006 -0800
159 Sun Jan 15 13:30:00 2006 -0800
160 Sat Jul 15 13:30:00 2006 +0500
160 Sat Jul 15 13:30:00 2006 +0500
161 Sat Jul 15 13:30:00 2006 -0700
161 Sat Jul 15 13:30:00 2006 -0700
162 Sun Jun 11 00:26:40 2006 -0400
162 Sun Jun 11 00:26:40 2006 -0400
163 Sat Apr 15 13:30:00 2006 +0200
163 Sat Apr 15 13:30:00 2006 +0200
164 Sat Apr 15 13:30:00 2006 +0000
164 Sat Apr 15 13:30:00 2006 +0000
165 Wed Feb 01 13:00:30 2006 -0500
165 Wed Feb 01 13:00:30 2006 -0500
166 Wed Feb 01 13:00:30 2006 +0000
166 Wed Feb 01 13:00:30 2006 +0000
167
167
168 $ hg log -d '> 02/01' --template '{date|date}\n'
168 $ hg log -d '> 02/01' --template '{date|date}\n'
169 $ hg log -d '< 02/01' --template '{date|date}\n'
169 $ hg log -d '< 02/01' --template '{date|date}\n'
170 Sun Jan 15 13:30:00 2006 +0500
170 Sun Jan 15 13:30:00 2006 +0500
171 Sun Jan 15 13:30:00 2006 -0800
171 Sun Jan 15 13:30:00 2006 -0800
172 Sat Jul 15 13:30:00 2006 +0500
172 Sat Jul 15 13:30:00 2006 +0500
173 Sat Jul 15 13:30:00 2006 -0700
173 Sat Jul 15 13:30:00 2006 -0700
174 Sun Jun 11 00:26:40 2006 -0400
174 Sun Jun 11 00:26:40 2006 -0400
175 Sat Apr 15 13:30:00 2006 +0200
175 Sat Apr 15 13:30:00 2006 +0200
176 Sat Apr 15 13:30:00 2006 +0000
176 Sat Apr 15 13:30:00 2006 +0000
177 Wed Feb 01 13:00:30 2006 -0500
177 Wed Feb 01 13:00:30 2006 -0500
178 Wed Feb 01 13:00:30 2006 +0000
178 Wed Feb 01 13:00:30 2006 +0000
179
179
180 $ hg log -d ' > 02/01' --template '{date|date}\n'
180 $ hg log -d ' > 02/01' --template '{date|date}\n'
181 $ hg log -d ' < 02/01' --template '{date|date}\n'
181 $ hg log -d ' < 02/01' --template '{date|date}\n'
182 Sun Jan 15 13:30:00 2006 +0500
182 Sun Jan 15 13:30:00 2006 +0500
183 Sun Jan 15 13:30:00 2006 -0800
183 Sun Jan 15 13:30:00 2006 -0800
184 Sat Jul 15 13:30:00 2006 +0500
184 Sat Jul 15 13:30:00 2006 +0500
185 Sat Jul 15 13:30:00 2006 -0700
185 Sat Jul 15 13:30:00 2006 -0700
186 Sun Jun 11 00:26:40 2006 -0400
186 Sun Jun 11 00:26:40 2006 -0400
187 Sat Apr 15 13:30:00 2006 +0200
187 Sat Apr 15 13:30:00 2006 +0200
188 Sat Apr 15 13:30:00 2006 +0000
188 Sat Apr 15 13:30:00 2006 +0000
189 Wed Feb 01 13:00:30 2006 -0500
189 Wed Feb 01 13:00:30 2006 -0500
190 Wed Feb 01 13:00:30 2006 +0000
190 Wed Feb 01 13:00:30 2006 +0000
191
191
192 $ hg log -d '>02/01 ' --template '{date|date}\n'
192 $ hg log -d '>02/01 ' --template '{date|date}\n'
193 $ hg log -d '<02/01 ' --template '{date|date}\n'
193 $ hg log -d '<02/01 ' --template '{date|date}\n'
194 Sun Jan 15 13:30:00 2006 +0500
194 Sun Jan 15 13:30:00 2006 +0500
195 Sun Jan 15 13:30:00 2006 -0800
195 Sun Jan 15 13:30:00 2006 -0800
196 Sat Jul 15 13:30:00 2006 +0500
196 Sat Jul 15 13:30:00 2006 +0500
197 Sat Jul 15 13:30:00 2006 -0700
197 Sat Jul 15 13:30:00 2006 -0700
198 Sun Jun 11 00:26:40 2006 -0400
198 Sun Jun 11 00:26:40 2006 -0400
199 Sat Apr 15 13:30:00 2006 +0200
199 Sat Apr 15 13:30:00 2006 +0200
200 Sat Apr 15 13:30:00 2006 +0000
200 Sat Apr 15 13:30:00 2006 +0000
201 Wed Feb 01 13:00:30 2006 -0500
201 Wed Feb 01 13:00:30 2006 -0500
202 Wed Feb 01 13:00:30 2006 +0000
202 Wed Feb 01 13:00:30 2006 +0000
203
203
204 $ hg log -d ' >02/01 ' --template '{date|date}\n'
204 $ hg log -d ' >02/01 ' --template '{date|date}\n'
205 $ hg log -d ' <02/01 ' --template '{date|date}\n'
205 $ hg log -d ' <02/01 ' --template '{date|date}\n'
206 Sun Jan 15 13:30:00 2006 +0500
206 Sun Jan 15 13:30:00 2006 +0500
207 Sun Jan 15 13:30:00 2006 -0800
207 Sun Jan 15 13:30:00 2006 -0800
208 Sat Jul 15 13:30:00 2006 +0500
208 Sat Jul 15 13:30:00 2006 +0500
209 Sat Jul 15 13:30:00 2006 -0700
209 Sat Jul 15 13:30:00 2006 -0700
210 Sun Jun 11 00:26:40 2006 -0400
210 Sun Jun 11 00:26:40 2006 -0400
211 Sat Apr 15 13:30:00 2006 +0200
211 Sat Apr 15 13:30:00 2006 +0200
212 Sat Apr 15 13:30:00 2006 +0000
212 Sat Apr 15 13:30:00 2006 +0000
213 Wed Feb 01 13:00:30 2006 -0500
213 Wed Feb 01 13:00:30 2006 -0500
214 Wed Feb 01 13:00:30 2006 +0000
214 Wed Feb 01 13:00:30 2006 +0000
215
215
216 $ hg log -d '> 02/01 ' --template '{date|date}\n'
216 $ hg log -d '> 02/01 ' --template '{date|date}\n'
217 $ hg log -d '< 02/01 ' --template '{date|date}\n'
217 $ hg log -d '< 02/01 ' --template '{date|date}\n'
218 Sun Jan 15 13:30:00 2006 +0500
218 Sun Jan 15 13:30:00 2006 +0500
219 Sun Jan 15 13:30:00 2006 -0800
219 Sun Jan 15 13:30:00 2006 -0800
220 Sat Jul 15 13:30:00 2006 +0500
220 Sat Jul 15 13:30:00 2006 +0500
221 Sat Jul 15 13:30:00 2006 -0700
221 Sat Jul 15 13:30:00 2006 -0700
222 Sun Jun 11 00:26:40 2006 -0400
222 Sun Jun 11 00:26:40 2006 -0400
223 Sat Apr 15 13:30:00 2006 +0200
223 Sat Apr 15 13:30:00 2006 +0200
224 Sat Apr 15 13:30:00 2006 +0000
224 Sat Apr 15 13:30:00 2006 +0000
225 Wed Feb 01 13:00:30 2006 -0500
225 Wed Feb 01 13:00:30 2006 -0500
226 Wed Feb 01 13:00:30 2006 +0000
226 Wed Feb 01 13:00:30 2006 +0000
227
227
228 $ hg log -d ' > 02/01 ' --template '{date|date}\n'
228 $ hg log -d ' > 02/01 ' --template '{date|date}\n'
229 $ hg log -d ' < 02/01 ' --template '{date|date}\n'
229 $ hg log -d ' < 02/01 ' --template '{date|date}\n'
230 Sun Jan 15 13:30:00 2006 +0500
230 Sun Jan 15 13:30:00 2006 +0500
231 Sun Jan 15 13:30:00 2006 -0800
231 Sun Jan 15 13:30:00 2006 -0800
232 Sat Jul 15 13:30:00 2006 +0500
232 Sat Jul 15 13:30:00 2006 +0500
233 Sat Jul 15 13:30:00 2006 -0700
233 Sat Jul 15 13:30:00 2006 -0700
234 Sun Jun 11 00:26:40 2006 -0400
234 Sun Jun 11 00:26:40 2006 -0400
235 Sat Apr 15 13:30:00 2006 +0200
235 Sat Apr 15 13:30:00 2006 +0200
236 Sat Apr 15 13:30:00 2006 +0000
236 Sat Apr 15 13:30:00 2006 +0000
237 Wed Feb 01 13:00:30 2006 -0500
237 Wed Feb 01 13:00:30 2006 -0500
238 Wed Feb 01 13:00:30 2006 +0000
238 Wed Feb 01 13:00:30 2006 +0000
239
239
240 Test issue 3764 (interpreting 'today' and 'yesterday')
240 Test issue 3764 (interpreting 'today' and 'yesterday')
241 $ echo "hello" >> a
241 $ echo "hello" >> a
242 >>> import datetime
242 >>> import datetime
243 >>> today = datetime.date.today().strftime("%b %d")
243 >>> today = datetime.date.today().strftime("%b %d")
244 >>> yesterday = (datetime.date.today() - datetime.timedelta(days=1)).strftime("%b %d")
244 >>> yesterday = (datetime.date.today() - datetime.timedelta(days=1)).strftime("%b %d")
245 >>> dates = open('dates', 'w')
245 >>> dates = open('dates', 'w')
246 >>> dates.write(today + '\n')
246 >>> dates.write(today + '\n')
247 >>> dates.write(yesterday + '\n')
247 >>> dates.write(yesterday + '\n')
248 >>> dates.close()
248 >>> dates.close()
249 $ hg ci -d "`sed -n '1p' dates`" -m "today is a good day to code"
249 $ hg ci -d "`sed -n '1p' dates`" -m "today is a good day to code"
250 $ hg log -d today --template '{desc}\n'
250 $ hg log -d today --template '{desc}\n'
251 today is a good day to code
251 today is a good day to code
252 $ echo "goodbye" >> a
252 $ echo "goodbye" >> a
253 $ hg ci -d "`sed -n '2p' dates`" -m "the time traveler's code"
253 $ hg ci -d "`sed -n '2p' dates`" -m "the time traveler's code"
254 $ hg log -d yesterday --template '{desc}\n'
254 $ hg log -d yesterday --template '{desc}\n'
255 the time traveler's code
255 the time traveler's code
256 $ echo "foo" >> a
256 $ echo "foo" >> a
257 $ hg commit -d now -m 'Explicitly committed now.'
257 $ hg commit -d now -m 'Explicitly committed now.'
258 $ hg log -d today --template '{desc}\n'
258 $ hg log -d today --template '{desc}\n'
259 Explicitly committed now.
259 Explicitly committed now.
260 today is a good day to code
260 today is a good day to code
261
262 Test parsing various ISO8601 forms
263
264 $ hg debugdate "2016-07-27T12:10:21"
265 internal: 1469646621 * (glob)
266 standard: Wed Jul 27 12:10:21 2016 -0700
267 $ hg debugdate "2016-07-27T12:10:21Z"
268 internal: 1469621421 0
269 standard: Wed Jul 27 12:10:21 2016 +0000
270 $ hg debugdate "2016-07-27T12:10:21+00:00"
271 internal: 1469621421 0
272 standard: Wed Jul 27 12:10:21 2016 +0000
273 $ hg debugdate "2016-07-27T121021Z"
274 internal: 1469621421 0
275 standard: Wed Jul 27 12:10:21 2016 +0000
276
277 $ hg debugdate "2016-07-27 12:10:21"
278 internal: 1469646621 * (glob)
279 standard: Wed Jul 27 12:10:21 2016 -0700
280 $ hg debugdate "2016-07-27 12:10:21Z"
281 internal: 1469621421 0
282 standard: Wed Jul 27 12:10:21 2016 +0000
283 $ hg debugdate "2016-07-27 12:10:21+00:00"
284 internal: 1469621421 0
285 standard: Wed Jul 27 12:10:21 2016 +0000
286 $ hg debugdate "2016-07-27 121021Z"
287 internal: 1469621421 0
288 standard: Wed Jul 27 12:10:21 2016 +0000
General Comments 0
You need to be logged in to leave comments. Login now