##// END OF EJS Templates
util: simplify file I/O functions using context managers
Bryan O'Sullivan -
r27778:4d10600c default
parent child Browse files
Show More
@@ -1,2725 +1,2716
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import urllib
37 import urllib
38 import zlib
38 import zlib
39
39
40 from . import (
40 from . import (
41 encoding,
41 encoding,
42 error,
42 error,
43 i18n,
43 i18n,
44 osutil,
44 osutil,
45 parsers,
45 parsers,
46 )
46 )
47
47
48 if os.name == 'nt':
48 if os.name == 'nt':
49 from . import windows as platform
49 from . import windows as platform
50 else:
50 else:
51 from . import posix as platform
51 from . import posix as platform
52
52
53 md5 = hashlib.md5
53 md5 = hashlib.md5
54 sha1 = hashlib.sha1
54 sha1 = hashlib.sha1
55 sha512 = hashlib.sha512
55 sha512 = hashlib.sha512
56 _ = i18n._
56 _ = i18n._
57
57
58 cachestat = platform.cachestat
58 cachestat = platform.cachestat
59 checkexec = platform.checkexec
59 checkexec = platform.checkexec
60 checklink = platform.checklink
60 checklink = platform.checklink
61 copymode = platform.copymode
61 copymode = platform.copymode
62 executablepath = platform.executablepath
62 executablepath = platform.executablepath
63 expandglobs = platform.expandglobs
63 expandglobs = platform.expandglobs
64 explainexit = platform.explainexit
64 explainexit = platform.explainexit
65 findexe = platform.findexe
65 findexe = platform.findexe
66 gethgcmd = platform.gethgcmd
66 gethgcmd = platform.gethgcmd
67 getuser = platform.getuser
67 getuser = platform.getuser
68 groupmembers = platform.groupmembers
68 groupmembers = platform.groupmembers
69 groupname = platform.groupname
69 groupname = platform.groupname
70 hidewindow = platform.hidewindow
70 hidewindow = platform.hidewindow
71 isexec = platform.isexec
71 isexec = platform.isexec
72 isowner = platform.isowner
72 isowner = platform.isowner
73 localpath = platform.localpath
73 localpath = platform.localpath
74 lookupreg = platform.lookupreg
74 lookupreg = platform.lookupreg
75 makedir = platform.makedir
75 makedir = platform.makedir
76 nlinks = platform.nlinks
76 nlinks = platform.nlinks
77 normpath = platform.normpath
77 normpath = platform.normpath
78 normcase = platform.normcase
78 normcase = platform.normcase
79 normcasespec = platform.normcasespec
79 normcasespec = platform.normcasespec
80 normcasefallback = platform.normcasefallback
80 normcasefallback = platform.normcasefallback
81 openhardlinks = platform.openhardlinks
81 openhardlinks = platform.openhardlinks
82 oslink = platform.oslink
82 oslink = platform.oslink
83 parsepatchoutput = platform.parsepatchoutput
83 parsepatchoutput = platform.parsepatchoutput
84 pconvert = platform.pconvert
84 pconvert = platform.pconvert
85 poll = platform.poll
85 poll = platform.poll
86 popen = platform.popen
86 popen = platform.popen
87 posixfile = platform.posixfile
87 posixfile = platform.posixfile
88 quotecommand = platform.quotecommand
88 quotecommand = platform.quotecommand
89 readpipe = platform.readpipe
89 readpipe = platform.readpipe
90 rename = platform.rename
90 rename = platform.rename
91 removedirs = platform.removedirs
91 removedirs = platform.removedirs
92 samedevice = platform.samedevice
92 samedevice = platform.samedevice
93 samefile = platform.samefile
93 samefile = platform.samefile
94 samestat = platform.samestat
94 samestat = platform.samestat
95 setbinary = platform.setbinary
95 setbinary = platform.setbinary
96 setflags = platform.setflags
96 setflags = platform.setflags
97 setsignalhandler = platform.setsignalhandler
97 setsignalhandler = platform.setsignalhandler
98 shellquote = platform.shellquote
98 shellquote = platform.shellquote
99 spawndetached = platform.spawndetached
99 spawndetached = platform.spawndetached
100 split = platform.split
100 split = platform.split
101 sshargs = platform.sshargs
101 sshargs = platform.sshargs
102 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
102 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
103 statisexec = platform.statisexec
103 statisexec = platform.statisexec
104 statislink = platform.statislink
104 statislink = platform.statislink
105 termwidth = platform.termwidth
105 termwidth = platform.termwidth
106 testpid = platform.testpid
106 testpid = platform.testpid
107 umask = platform.umask
107 umask = platform.umask
108 unlink = platform.unlink
108 unlink = platform.unlink
109 unlinkpath = platform.unlinkpath
109 unlinkpath = platform.unlinkpath
110 username = platform.username
110 username = platform.username
111
111
112 # Python compatibility
112 # Python compatibility
113
113
114 _notset = object()
114 _notset = object()
115
115
116 # disable Python's problematic floating point timestamps (issue4836)
116 # disable Python's problematic floating point timestamps (issue4836)
117 # (Python hypocritically says you shouldn't change this behavior in
117 # (Python hypocritically says you shouldn't change this behavior in
118 # libraries, and sure enough Mercurial is not a library.)
118 # libraries, and sure enough Mercurial is not a library.)
119 os.stat_float_times(False)
119 os.stat_float_times(False)
120
120
121 def safehasattr(thing, attr):
121 def safehasattr(thing, attr):
122 return getattr(thing, attr, _notset) is not _notset
122 return getattr(thing, attr, _notset) is not _notset
123
123
124 DIGESTS = {
124 DIGESTS = {
125 'md5': md5,
125 'md5': md5,
126 'sha1': sha1,
126 'sha1': sha1,
127 'sha512': sha512,
127 'sha512': sha512,
128 }
128 }
129 # List of digest types from strongest to weakest
129 # List of digest types from strongest to weakest
130 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
130 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
131
131
132 for k in DIGESTS_BY_STRENGTH:
132 for k in DIGESTS_BY_STRENGTH:
133 assert k in DIGESTS
133 assert k in DIGESTS
134
134
135 class digester(object):
135 class digester(object):
136 """helper to compute digests.
136 """helper to compute digests.
137
137
138 This helper can be used to compute one or more digests given their name.
138 This helper can be used to compute one or more digests given their name.
139
139
140 >>> d = digester(['md5', 'sha1'])
140 >>> d = digester(['md5', 'sha1'])
141 >>> d.update('foo')
141 >>> d.update('foo')
142 >>> [k for k in sorted(d)]
142 >>> [k for k in sorted(d)]
143 ['md5', 'sha1']
143 ['md5', 'sha1']
144 >>> d['md5']
144 >>> d['md5']
145 'acbd18db4cc2f85cedef654fccc4a4d8'
145 'acbd18db4cc2f85cedef654fccc4a4d8'
146 >>> d['sha1']
146 >>> d['sha1']
147 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
147 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
148 >>> digester.preferred(['md5', 'sha1'])
148 >>> digester.preferred(['md5', 'sha1'])
149 'sha1'
149 'sha1'
150 """
150 """
151
151
152 def __init__(self, digests, s=''):
152 def __init__(self, digests, s=''):
153 self._hashes = {}
153 self._hashes = {}
154 for k in digests:
154 for k in digests:
155 if k not in DIGESTS:
155 if k not in DIGESTS:
156 raise Abort(_('unknown digest type: %s') % k)
156 raise Abort(_('unknown digest type: %s') % k)
157 self._hashes[k] = DIGESTS[k]()
157 self._hashes[k] = DIGESTS[k]()
158 if s:
158 if s:
159 self.update(s)
159 self.update(s)
160
160
161 def update(self, data):
161 def update(self, data):
162 for h in self._hashes.values():
162 for h in self._hashes.values():
163 h.update(data)
163 h.update(data)
164
164
165 def __getitem__(self, key):
165 def __getitem__(self, key):
166 if key not in DIGESTS:
166 if key not in DIGESTS:
167 raise Abort(_('unknown digest type: %s') % k)
167 raise Abort(_('unknown digest type: %s') % k)
168 return self._hashes[key].hexdigest()
168 return self._hashes[key].hexdigest()
169
169
170 def __iter__(self):
170 def __iter__(self):
171 return iter(self._hashes)
171 return iter(self._hashes)
172
172
173 @staticmethod
173 @staticmethod
174 def preferred(supported):
174 def preferred(supported):
175 """returns the strongest digest type in both supported and DIGESTS."""
175 """returns the strongest digest type in both supported and DIGESTS."""
176
176
177 for k in DIGESTS_BY_STRENGTH:
177 for k in DIGESTS_BY_STRENGTH:
178 if k in supported:
178 if k in supported:
179 return k
179 return k
180 return None
180 return None
181
181
182 class digestchecker(object):
182 class digestchecker(object):
183 """file handle wrapper that additionally checks content against a given
183 """file handle wrapper that additionally checks content against a given
184 size and digests.
184 size and digests.
185
185
186 d = digestchecker(fh, size, {'md5': '...'})
186 d = digestchecker(fh, size, {'md5': '...'})
187
187
188 When multiple digests are given, all of them are validated.
188 When multiple digests are given, all of them are validated.
189 """
189 """
190
190
191 def __init__(self, fh, size, digests):
191 def __init__(self, fh, size, digests):
192 self._fh = fh
192 self._fh = fh
193 self._size = size
193 self._size = size
194 self._got = 0
194 self._got = 0
195 self._digests = dict(digests)
195 self._digests = dict(digests)
196 self._digester = digester(self._digests.keys())
196 self._digester = digester(self._digests.keys())
197
197
198 def read(self, length=-1):
198 def read(self, length=-1):
199 content = self._fh.read(length)
199 content = self._fh.read(length)
200 self._digester.update(content)
200 self._digester.update(content)
201 self._got += len(content)
201 self._got += len(content)
202 return content
202 return content
203
203
204 def validate(self):
204 def validate(self):
205 if self._size != self._got:
205 if self._size != self._got:
206 raise Abort(_('size mismatch: expected %d, got %d') %
206 raise Abort(_('size mismatch: expected %d, got %d') %
207 (self._size, self._got))
207 (self._size, self._got))
208 for k, v in self._digests.items():
208 for k, v in self._digests.items():
209 if v != self._digester[k]:
209 if v != self._digester[k]:
210 # i18n: first parameter is a digest name
210 # i18n: first parameter is a digest name
211 raise Abort(_('%s mismatch: expected %s, got %s') %
211 raise Abort(_('%s mismatch: expected %s, got %s') %
212 (k, v, self._digester[k]))
212 (k, v, self._digester[k]))
213
213
214 try:
214 try:
215 buffer = buffer
215 buffer = buffer
216 except NameError:
216 except NameError:
217 if sys.version_info[0] < 3:
217 if sys.version_info[0] < 3:
218 def buffer(sliceable, offset=0):
218 def buffer(sliceable, offset=0):
219 return sliceable[offset:]
219 return sliceable[offset:]
220 else:
220 else:
221 def buffer(sliceable, offset=0):
221 def buffer(sliceable, offset=0):
222 return memoryview(sliceable)[offset:]
222 return memoryview(sliceable)[offset:]
223
223
224 closefds = os.name == 'posix'
224 closefds = os.name == 'posix'
225
225
226 _chunksize = 4096
226 _chunksize = 4096
227
227
228 class bufferedinputpipe(object):
228 class bufferedinputpipe(object):
229 """a manually buffered input pipe
229 """a manually buffered input pipe
230
230
231 Python will not let us use buffered IO and lazy reading with 'polling' at
231 Python will not let us use buffered IO and lazy reading with 'polling' at
232 the same time. We cannot probe the buffer state and select will not detect
232 the same time. We cannot probe the buffer state and select will not detect
233 that data are ready to read if they are already buffered.
233 that data are ready to read if they are already buffered.
234
234
235 This class let us work around that by implementing its own buffering
235 This class let us work around that by implementing its own buffering
236 (allowing efficient readline) while offering a way to know if the buffer is
236 (allowing efficient readline) while offering a way to know if the buffer is
237 empty from the output (allowing collaboration of the buffer with polling).
237 empty from the output (allowing collaboration of the buffer with polling).
238
238
239 This class lives in the 'util' module because it makes use of the 'os'
239 This class lives in the 'util' module because it makes use of the 'os'
240 module from the python stdlib.
240 module from the python stdlib.
241 """
241 """
242
242
243 def __init__(self, input):
243 def __init__(self, input):
244 self._input = input
244 self._input = input
245 self._buffer = []
245 self._buffer = []
246 self._eof = False
246 self._eof = False
247 self._lenbuf = 0
247 self._lenbuf = 0
248
248
249 @property
249 @property
250 def hasbuffer(self):
250 def hasbuffer(self):
251 """True is any data is currently buffered
251 """True is any data is currently buffered
252
252
253 This will be used externally a pre-step for polling IO. If there is
253 This will be used externally a pre-step for polling IO. If there is
254 already data then no polling should be set in place."""
254 already data then no polling should be set in place."""
255 return bool(self._buffer)
255 return bool(self._buffer)
256
256
257 @property
257 @property
258 def closed(self):
258 def closed(self):
259 return self._input.closed
259 return self._input.closed
260
260
261 def fileno(self):
261 def fileno(self):
262 return self._input.fileno()
262 return self._input.fileno()
263
263
264 def close(self):
264 def close(self):
265 return self._input.close()
265 return self._input.close()
266
266
267 def read(self, size):
267 def read(self, size):
268 while (not self._eof) and (self._lenbuf < size):
268 while (not self._eof) and (self._lenbuf < size):
269 self._fillbuffer()
269 self._fillbuffer()
270 return self._frombuffer(size)
270 return self._frombuffer(size)
271
271
272 def readline(self, *args, **kwargs):
272 def readline(self, *args, **kwargs):
273 if 1 < len(self._buffer):
273 if 1 < len(self._buffer):
274 # this should not happen because both read and readline end with a
274 # this should not happen because both read and readline end with a
275 # _frombuffer call that collapse it.
275 # _frombuffer call that collapse it.
276 self._buffer = [''.join(self._buffer)]
276 self._buffer = [''.join(self._buffer)]
277 self._lenbuf = len(self._buffer[0])
277 self._lenbuf = len(self._buffer[0])
278 lfi = -1
278 lfi = -1
279 if self._buffer:
279 if self._buffer:
280 lfi = self._buffer[-1].find('\n')
280 lfi = self._buffer[-1].find('\n')
281 while (not self._eof) and lfi < 0:
281 while (not self._eof) and lfi < 0:
282 self._fillbuffer()
282 self._fillbuffer()
283 if self._buffer:
283 if self._buffer:
284 lfi = self._buffer[-1].find('\n')
284 lfi = self._buffer[-1].find('\n')
285 size = lfi + 1
285 size = lfi + 1
286 if lfi < 0: # end of file
286 if lfi < 0: # end of file
287 size = self._lenbuf
287 size = self._lenbuf
288 elif 1 < len(self._buffer):
288 elif 1 < len(self._buffer):
289 # we need to take previous chunks into account
289 # we need to take previous chunks into account
290 size += self._lenbuf - len(self._buffer[-1])
290 size += self._lenbuf - len(self._buffer[-1])
291 return self._frombuffer(size)
291 return self._frombuffer(size)
292
292
293 def _frombuffer(self, size):
293 def _frombuffer(self, size):
294 """return at most 'size' data from the buffer
294 """return at most 'size' data from the buffer
295
295
296 The data are removed from the buffer."""
296 The data are removed from the buffer."""
297 if size == 0 or not self._buffer:
297 if size == 0 or not self._buffer:
298 return ''
298 return ''
299 buf = self._buffer[0]
299 buf = self._buffer[0]
300 if 1 < len(self._buffer):
300 if 1 < len(self._buffer):
301 buf = ''.join(self._buffer)
301 buf = ''.join(self._buffer)
302
302
303 data = buf[:size]
303 data = buf[:size]
304 buf = buf[len(data):]
304 buf = buf[len(data):]
305 if buf:
305 if buf:
306 self._buffer = [buf]
306 self._buffer = [buf]
307 self._lenbuf = len(buf)
307 self._lenbuf = len(buf)
308 else:
308 else:
309 self._buffer = []
309 self._buffer = []
310 self._lenbuf = 0
310 self._lenbuf = 0
311 return data
311 return data
312
312
313 def _fillbuffer(self):
313 def _fillbuffer(self):
314 """read data to the buffer"""
314 """read data to the buffer"""
315 data = os.read(self._input.fileno(), _chunksize)
315 data = os.read(self._input.fileno(), _chunksize)
316 if not data:
316 if not data:
317 self._eof = True
317 self._eof = True
318 else:
318 else:
319 self._lenbuf += len(data)
319 self._lenbuf += len(data)
320 self._buffer.append(data)
320 self._buffer.append(data)
321
321
322 def popen2(cmd, env=None, newlines=False):
322 def popen2(cmd, env=None, newlines=False):
323 # Setting bufsize to -1 lets the system decide the buffer size.
323 # Setting bufsize to -1 lets the system decide the buffer size.
324 # The default for bufsize is 0, meaning unbuffered. This leads to
324 # The default for bufsize is 0, meaning unbuffered. This leads to
325 # poor performance on Mac OS X: http://bugs.python.org/issue4194
325 # poor performance on Mac OS X: http://bugs.python.org/issue4194
326 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
326 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
327 close_fds=closefds,
327 close_fds=closefds,
328 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
328 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
329 universal_newlines=newlines,
329 universal_newlines=newlines,
330 env=env)
330 env=env)
331 return p.stdin, p.stdout
331 return p.stdin, p.stdout
332
332
333 def popen3(cmd, env=None, newlines=False):
333 def popen3(cmd, env=None, newlines=False):
334 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
334 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
335 return stdin, stdout, stderr
335 return stdin, stdout, stderr
336
336
337 def popen4(cmd, env=None, newlines=False, bufsize=-1):
337 def popen4(cmd, env=None, newlines=False, bufsize=-1):
338 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
338 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
339 close_fds=closefds,
339 close_fds=closefds,
340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
341 stderr=subprocess.PIPE,
341 stderr=subprocess.PIPE,
342 universal_newlines=newlines,
342 universal_newlines=newlines,
343 env=env)
343 env=env)
344 return p.stdin, p.stdout, p.stderr, p
344 return p.stdin, p.stdout, p.stderr, p
345
345
346 def version():
346 def version():
347 """Return version information if available."""
347 """Return version information if available."""
348 try:
348 try:
349 from . import __version__
349 from . import __version__
350 return __version__.version
350 return __version__.version
351 except ImportError:
351 except ImportError:
352 return 'unknown'
352 return 'unknown'
353
353
354 def versiontuple(v=None, n=4):
354 def versiontuple(v=None, n=4):
355 """Parses a Mercurial version string into an N-tuple.
355 """Parses a Mercurial version string into an N-tuple.
356
356
357 The version string to be parsed is specified with the ``v`` argument.
357 The version string to be parsed is specified with the ``v`` argument.
358 If it isn't defined, the current Mercurial version string will be parsed.
358 If it isn't defined, the current Mercurial version string will be parsed.
359
359
360 ``n`` can be 2, 3, or 4. Here is how some version strings map to
360 ``n`` can be 2, 3, or 4. Here is how some version strings map to
361 returned values:
361 returned values:
362
362
363 >>> v = '3.6.1+190-df9b73d2d444'
363 >>> v = '3.6.1+190-df9b73d2d444'
364 >>> versiontuple(v, 2)
364 >>> versiontuple(v, 2)
365 (3, 6)
365 (3, 6)
366 >>> versiontuple(v, 3)
366 >>> versiontuple(v, 3)
367 (3, 6, 1)
367 (3, 6, 1)
368 >>> versiontuple(v, 4)
368 >>> versiontuple(v, 4)
369 (3, 6, 1, '190-df9b73d2d444')
369 (3, 6, 1, '190-df9b73d2d444')
370
370
371 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
371 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
372 (3, 6, 1, '190-df9b73d2d444+20151118')
372 (3, 6, 1, '190-df9b73d2d444+20151118')
373
373
374 >>> v = '3.6'
374 >>> v = '3.6'
375 >>> versiontuple(v, 2)
375 >>> versiontuple(v, 2)
376 (3, 6)
376 (3, 6)
377 >>> versiontuple(v, 3)
377 >>> versiontuple(v, 3)
378 (3, 6, None)
378 (3, 6, None)
379 >>> versiontuple(v, 4)
379 >>> versiontuple(v, 4)
380 (3, 6, None, None)
380 (3, 6, None, None)
381 """
381 """
382 if not v:
382 if not v:
383 v = version()
383 v = version()
384 parts = v.split('+', 1)
384 parts = v.split('+', 1)
385 if len(parts) == 1:
385 if len(parts) == 1:
386 vparts, extra = parts[0], None
386 vparts, extra = parts[0], None
387 else:
387 else:
388 vparts, extra = parts
388 vparts, extra = parts
389
389
390 vints = []
390 vints = []
391 for i in vparts.split('.'):
391 for i in vparts.split('.'):
392 try:
392 try:
393 vints.append(int(i))
393 vints.append(int(i))
394 except ValueError:
394 except ValueError:
395 break
395 break
396 # (3, 6) -> (3, 6, None)
396 # (3, 6) -> (3, 6, None)
397 while len(vints) < 3:
397 while len(vints) < 3:
398 vints.append(None)
398 vints.append(None)
399
399
400 if n == 2:
400 if n == 2:
401 return (vints[0], vints[1])
401 return (vints[0], vints[1])
402 if n == 3:
402 if n == 3:
403 return (vints[0], vints[1], vints[2])
403 return (vints[0], vints[1], vints[2])
404 if n == 4:
404 if n == 4:
405 return (vints[0], vints[1], vints[2], extra)
405 return (vints[0], vints[1], vints[2], extra)
406
406
407 # used by parsedate
407 # used by parsedate
408 defaultdateformats = (
408 defaultdateformats = (
409 '%Y-%m-%d %H:%M:%S',
409 '%Y-%m-%d %H:%M:%S',
410 '%Y-%m-%d %I:%M:%S%p',
410 '%Y-%m-%d %I:%M:%S%p',
411 '%Y-%m-%d %H:%M',
411 '%Y-%m-%d %H:%M',
412 '%Y-%m-%d %I:%M%p',
412 '%Y-%m-%d %I:%M%p',
413 '%Y-%m-%d',
413 '%Y-%m-%d',
414 '%m-%d',
414 '%m-%d',
415 '%m/%d',
415 '%m/%d',
416 '%m/%d/%y',
416 '%m/%d/%y',
417 '%m/%d/%Y',
417 '%m/%d/%Y',
418 '%a %b %d %H:%M:%S %Y',
418 '%a %b %d %H:%M:%S %Y',
419 '%a %b %d %I:%M:%S%p %Y',
419 '%a %b %d %I:%M:%S%p %Y',
420 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
420 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
421 '%b %d %H:%M:%S %Y',
421 '%b %d %H:%M:%S %Y',
422 '%b %d %I:%M:%S%p %Y',
422 '%b %d %I:%M:%S%p %Y',
423 '%b %d %H:%M:%S',
423 '%b %d %H:%M:%S',
424 '%b %d %I:%M:%S%p',
424 '%b %d %I:%M:%S%p',
425 '%b %d %H:%M',
425 '%b %d %H:%M',
426 '%b %d %I:%M%p',
426 '%b %d %I:%M%p',
427 '%b %d %Y',
427 '%b %d %Y',
428 '%b %d',
428 '%b %d',
429 '%H:%M:%S',
429 '%H:%M:%S',
430 '%I:%M:%S%p',
430 '%I:%M:%S%p',
431 '%H:%M',
431 '%H:%M',
432 '%I:%M%p',
432 '%I:%M%p',
433 )
433 )
434
434
435 extendeddateformats = defaultdateformats + (
435 extendeddateformats = defaultdateformats + (
436 "%Y",
436 "%Y",
437 "%Y-%m",
437 "%Y-%m",
438 "%b",
438 "%b",
439 "%b %Y",
439 "%b %Y",
440 )
440 )
441
441
442 def cachefunc(func):
442 def cachefunc(func):
443 '''cache the result of function calls'''
443 '''cache the result of function calls'''
444 # XXX doesn't handle keywords args
444 # XXX doesn't handle keywords args
445 if func.func_code.co_argcount == 0:
445 if func.func_code.co_argcount == 0:
446 cache = []
446 cache = []
447 def f():
447 def f():
448 if len(cache) == 0:
448 if len(cache) == 0:
449 cache.append(func())
449 cache.append(func())
450 return cache[0]
450 return cache[0]
451 return f
451 return f
452 cache = {}
452 cache = {}
453 if func.func_code.co_argcount == 1:
453 if func.func_code.co_argcount == 1:
454 # we gain a small amount of time because
454 # we gain a small amount of time because
455 # we don't need to pack/unpack the list
455 # we don't need to pack/unpack the list
456 def f(arg):
456 def f(arg):
457 if arg not in cache:
457 if arg not in cache:
458 cache[arg] = func(arg)
458 cache[arg] = func(arg)
459 return cache[arg]
459 return cache[arg]
460 else:
460 else:
461 def f(*args):
461 def f(*args):
462 if args not in cache:
462 if args not in cache:
463 cache[args] = func(*args)
463 cache[args] = func(*args)
464 return cache[args]
464 return cache[args]
465
465
466 return f
466 return f
467
467
468 class sortdict(dict):
468 class sortdict(dict):
469 '''a simple sorted dictionary'''
469 '''a simple sorted dictionary'''
470 def __init__(self, data=None):
470 def __init__(self, data=None):
471 self._list = []
471 self._list = []
472 if data:
472 if data:
473 self.update(data)
473 self.update(data)
474 def copy(self):
474 def copy(self):
475 return sortdict(self)
475 return sortdict(self)
476 def __setitem__(self, key, val):
476 def __setitem__(self, key, val):
477 if key in self:
477 if key in self:
478 self._list.remove(key)
478 self._list.remove(key)
479 self._list.append(key)
479 self._list.append(key)
480 dict.__setitem__(self, key, val)
480 dict.__setitem__(self, key, val)
481 def __iter__(self):
481 def __iter__(self):
482 return self._list.__iter__()
482 return self._list.__iter__()
483 def update(self, src):
483 def update(self, src):
484 if isinstance(src, dict):
484 if isinstance(src, dict):
485 src = src.iteritems()
485 src = src.iteritems()
486 for k, v in src:
486 for k, v in src:
487 self[k] = v
487 self[k] = v
488 def clear(self):
488 def clear(self):
489 dict.clear(self)
489 dict.clear(self)
490 self._list = []
490 self._list = []
491 def items(self):
491 def items(self):
492 return [(k, self[k]) for k in self._list]
492 return [(k, self[k]) for k in self._list]
493 def __delitem__(self, key):
493 def __delitem__(self, key):
494 dict.__delitem__(self, key)
494 dict.__delitem__(self, key)
495 self._list.remove(key)
495 self._list.remove(key)
496 def pop(self, key, *args, **kwargs):
496 def pop(self, key, *args, **kwargs):
497 dict.pop(self, key, *args, **kwargs)
497 dict.pop(self, key, *args, **kwargs)
498 try:
498 try:
499 self._list.remove(key)
499 self._list.remove(key)
500 except ValueError:
500 except ValueError:
501 pass
501 pass
502 def keys(self):
502 def keys(self):
503 return self._list
503 return self._list
504 def iterkeys(self):
504 def iterkeys(self):
505 return self._list.__iter__()
505 return self._list.__iter__()
506 def iteritems(self):
506 def iteritems(self):
507 for k in self._list:
507 for k in self._list:
508 yield k, self[k]
508 yield k, self[k]
509 def insert(self, index, key, val):
509 def insert(self, index, key, val):
510 self._list.insert(index, key)
510 self._list.insert(index, key)
511 dict.__setitem__(self, key, val)
511 dict.__setitem__(self, key, val)
512
512
513 class _lrucachenode(object):
513 class _lrucachenode(object):
514 """A node in a doubly linked list.
514 """A node in a doubly linked list.
515
515
516 Holds a reference to nodes on either side as well as a key-value
516 Holds a reference to nodes on either side as well as a key-value
517 pair for the dictionary entry.
517 pair for the dictionary entry.
518 """
518 """
519 __slots__ = ('next', 'prev', 'key', 'value')
519 __slots__ = ('next', 'prev', 'key', 'value')
520
520
521 def __init__(self):
521 def __init__(self):
522 self.next = None
522 self.next = None
523 self.prev = None
523 self.prev = None
524
524
525 self.key = _notset
525 self.key = _notset
526 self.value = None
526 self.value = None
527
527
528 def markempty(self):
528 def markempty(self):
529 """Mark the node as emptied."""
529 """Mark the node as emptied."""
530 self.key = _notset
530 self.key = _notset
531
531
532 class lrucachedict(object):
532 class lrucachedict(object):
533 """Dict that caches most recent accesses and sets.
533 """Dict that caches most recent accesses and sets.
534
534
535 The dict consists of an actual backing dict - indexed by original
535 The dict consists of an actual backing dict - indexed by original
536 key - and a doubly linked circular list defining the order of entries in
536 key - and a doubly linked circular list defining the order of entries in
537 the cache.
537 the cache.
538
538
539 The head node is the newest entry in the cache. If the cache is full,
539 The head node is the newest entry in the cache. If the cache is full,
540 we recycle head.prev and make it the new head. Cache accesses result in
540 we recycle head.prev and make it the new head. Cache accesses result in
541 the node being moved to before the existing head and being marked as the
541 the node being moved to before the existing head and being marked as the
542 new head node.
542 new head node.
543 """
543 """
544 def __init__(self, max):
544 def __init__(self, max):
545 self._cache = {}
545 self._cache = {}
546
546
547 self._head = head = _lrucachenode()
547 self._head = head = _lrucachenode()
548 head.prev = head
548 head.prev = head
549 head.next = head
549 head.next = head
550 self._size = 1
550 self._size = 1
551 self._capacity = max
551 self._capacity = max
552
552
553 def __len__(self):
553 def __len__(self):
554 return len(self._cache)
554 return len(self._cache)
555
555
556 def __contains__(self, k):
556 def __contains__(self, k):
557 return k in self._cache
557 return k in self._cache
558
558
559 def __iter__(self):
559 def __iter__(self):
560 # We don't have to iterate in cache order, but why not.
560 # We don't have to iterate in cache order, but why not.
561 n = self._head
561 n = self._head
562 for i in range(len(self._cache)):
562 for i in range(len(self._cache)):
563 yield n.key
563 yield n.key
564 n = n.next
564 n = n.next
565
565
566 def __getitem__(self, k):
566 def __getitem__(self, k):
567 node = self._cache[k]
567 node = self._cache[k]
568 self._movetohead(node)
568 self._movetohead(node)
569 return node.value
569 return node.value
570
570
571 def __setitem__(self, k, v):
571 def __setitem__(self, k, v):
572 node = self._cache.get(k)
572 node = self._cache.get(k)
573 # Replace existing value and mark as newest.
573 # Replace existing value and mark as newest.
574 if node is not None:
574 if node is not None:
575 node.value = v
575 node.value = v
576 self._movetohead(node)
576 self._movetohead(node)
577 return
577 return
578
578
579 if self._size < self._capacity:
579 if self._size < self._capacity:
580 node = self._addcapacity()
580 node = self._addcapacity()
581 else:
581 else:
582 # Grab the last/oldest item.
582 # Grab the last/oldest item.
583 node = self._head.prev
583 node = self._head.prev
584
584
585 # At capacity. Kill the old entry.
585 # At capacity. Kill the old entry.
586 if node.key is not _notset:
586 if node.key is not _notset:
587 del self._cache[node.key]
587 del self._cache[node.key]
588
588
589 node.key = k
589 node.key = k
590 node.value = v
590 node.value = v
591 self._cache[k] = node
591 self._cache[k] = node
592 # And mark it as newest entry. No need to adjust order since it
592 # And mark it as newest entry. No need to adjust order since it
593 # is already self._head.prev.
593 # is already self._head.prev.
594 self._head = node
594 self._head = node
595
595
596 def __delitem__(self, k):
596 def __delitem__(self, k):
597 node = self._cache.pop(k)
597 node = self._cache.pop(k)
598 node.markempty()
598 node.markempty()
599
599
600 # Temporarily mark as newest item before re-adjusting head to make
600 # Temporarily mark as newest item before re-adjusting head to make
601 # this node the oldest item.
601 # this node the oldest item.
602 self._movetohead(node)
602 self._movetohead(node)
603 self._head = node.next
603 self._head = node.next
604
604
605 # Additional dict methods.
605 # Additional dict methods.
606
606
607 def get(self, k, default=None):
607 def get(self, k, default=None):
608 try:
608 try:
609 return self._cache[k]
609 return self._cache[k]
610 except KeyError:
610 except KeyError:
611 return default
611 return default
612
612
613 def clear(self):
613 def clear(self):
614 n = self._head
614 n = self._head
615 while n.key is not _notset:
615 while n.key is not _notset:
616 n.markempty()
616 n.markempty()
617 n = n.next
617 n = n.next
618
618
619 self._cache.clear()
619 self._cache.clear()
620
620
621 def copy(self):
621 def copy(self):
622 result = lrucachedict(self._capacity)
622 result = lrucachedict(self._capacity)
623 n = self._head.prev
623 n = self._head.prev
624 # Iterate in oldest-to-newest order, so the copy has the right ordering
624 # Iterate in oldest-to-newest order, so the copy has the right ordering
625 for i in range(len(self._cache)):
625 for i in range(len(self._cache)):
626 result[n.key] = n.value
626 result[n.key] = n.value
627 n = n.prev
627 n = n.prev
628 return result
628 return result
629
629
630 def _movetohead(self, node):
630 def _movetohead(self, node):
631 """Mark a node as the newest, making it the new head.
631 """Mark a node as the newest, making it the new head.
632
632
633 When a node is accessed, it becomes the freshest entry in the LRU
633 When a node is accessed, it becomes the freshest entry in the LRU
634 list, which is denoted by self._head.
634 list, which is denoted by self._head.
635
635
636 Visually, let's make ``N`` the new head node (* denotes head):
636 Visually, let's make ``N`` the new head node (* denotes head):
637
637
638 previous/oldest <-> head <-> next/next newest
638 previous/oldest <-> head <-> next/next newest
639
639
640 ----<->--- A* ---<->-----
640 ----<->--- A* ---<->-----
641 | |
641 | |
642 E <-> D <-> N <-> C <-> B
642 E <-> D <-> N <-> C <-> B
643
643
644 To:
644 To:
645
645
646 ----<->--- N* ---<->-----
646 ----<->--- N* ---<->-----
647 | |
647 | |
648 E <-> D <-> C <-> B <-> A
648 E <-> D <-> C <-> B <-> A
649
649
650 This requires the following moves:
650 This requires the following moves:
651
651
652 C.next = D (node.prev.next = node.next)
652 C.next = D (node.prev.next = node.next)
653 D.prev = C (node.next.prev = node.prev)
653 D.prev = C (node.next.prev = node.prev)
654 E.next = N (head.prev.next = node)
654 E.next = N (head.prev.next = node)
655 N.prev = E (node.prev = head.prev)
655 N.prev = E (node.prev = head.prev)
656 N.next = A (node.next = head)
656 N.next = A (node.next = head)
657 A.prev = N (head.prev = node)
657 A.prev = N (head.prev = node)
658 """
658 """
659 head = self._head
659 head = self._head
660 # C.next = D
660 # C.next = D
661 node.prev.next = node.next
661 node.prev.next = node.next
662 # D.prev = C
662 # D.prev = C
663 node.next.prev = node.prev
663 node.next.prev = node.prev
664 # N.prev = E
664 # N.prev = E
665 node.prev = head.prev
665 node.prev = head.prev
666 # N.next = A
666 # N.next = A
667 # It is tempting to do just "head" here, however if node is
667 # It is tempting to do just "head" here, however if node is
668 # adjacent to head, this will do bad things.
668 # adjacent to head, this will do bad things.
669 node.next = head.prev.next
669 node.next = head.prev.next
670 # E.next = N
670 # E.next = N
671 node.next.prev = node
671 node.next.prev = node
672 # A.prev = N
672 # A.prev = N
673 node.prev.next = node
673 node.prev.next = node
674
674
675 self._head = node
675 self._head = node
676
676
677 def _addcapacity(self):
677 def _addcapacity(self):
678 """Add a node to the circular linked list.
678 """Add a node to the circular linked list.
679
679
680 The new node is inserted before the head node.
680 The new node is inserted before the head node.
681 """
681 """
682 head = self._head
682 head = self._head
683 node = _lrucachenode()
683 node = _lrucachenode()
684 head.prev.next = node
684 head.prev.next = node
685 node.prev = head.prev
685 node.prev = head.prev
686 node.next = head
686 node.next = head
687 head.prev = node
687 head.prev = node
688 self._size += 1
688 self._size += 1
689 return node
689 return node
690
690
691 def lrucachefunc(func):
691 def lrucachefunc(func):
692 '''cache most recent results of function calls'''
692 '''cache most recent results of function calls'''
693 cache = {}
693 cache = {}
694 order = collections.deque()
694 order = collections.deque()
695 if func.func_code.co_argcount == 1:
695 if func.func_code.co_argcount == 1:
696 def f(arg):
696 def f(arg):
697 if arg not in cache:
697 if arg not in cache:
698 if len(cache) > 20:
698 if len(cache) > 20:
699 del cache[order.popleft()]
699 del cache[order.popleft()]
700 cache[arg] = func(arg)
700 cache[arg] = func(arg)
701 else:
701 else:
702 order.remove(arg)
702 order.remove(arg)
703 order.append(arg)
703 order.append(arg)
704 return cache[arg]
704 return cache[arg]
705 else:
705 else:
706 def f(*args):
706 def f(*args):
707 if args not in cache:
707 if args not in cache:
708 if len(cache) > 20:
708 if len(cache) > 20:
709 del cache[order.popleft()]
709 del cache[order.popleft()]
710 cache[args] = func(*args)
710 cache[args] = func(*args)
711 else:
711 else:
712 order.remove(args)
712 order.remove(args)
713 order.append(args)
713 order.append(args)
714 return cache[args]
714 return cache[args]
715
715
716 return f
716 return f
717
717
718 class propertycache(object):
718 class propertycache(object):
719 def __init__(self, func):
719 def __init__(self, func):
720 self.func = func
720 self.func = func
721 self.name = func.__name__
721 self.name = func.__name__
722 def __get__(self, obj, type=None):
722 def __get__(self, obj, type=None):
723 result = self.func(obj)
723 result = self.func(obj)
724 self.cachevalue(obj, result)
724 self.cachevalue(obj, result)
725 return result
725 return result
726
726
727 def cachevalue(self, obj, value):
727 def cachevalue(self, obj, value):
728 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
728 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
729 obj.__dict__[self.name] = value
729 obj.__dict__[self.name] = value
730
730
731 def pipefilter(s, cmd):
731 def pipefilter(s, cmd):
732 '''filter string S through command CMD, returning its output'''
732 '''filter string S through command CMD, returning its output'''
733 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
733 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
734 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
734 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
735 pout, perr = p.communicate(s)
735 pout, perr = p.communicate(s)
736 return pout
736 return pout
737
737
738 def tempfilter(s, cmd):
738 def tempfilter(s, cmd):
739 '''filter string S through a pair of temporary files with CMD.
739 '''filter string S through a pair of temporary files with CMD.
740 CMD is used as a template to create the real command to be run,
740 CMD is used as a template to create the real command to be run,
741 with the strings INFILE and OUTFILE replaced by the real names of
741 with the strings INFILE and OUTFILE replaced by the real names of
742 the temporary files generated.'''
742 the temporary files generated.'''
743 inname, outname = None, None
743 inname, outname = None, None
744 try:
744 try:
745 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
745 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
746 fp = os.fdopen(infd, 'wb')
746 fp = os.fdopen(infd, 'wb')
747 fp.write(s)
747 fp.write(s)
748 fp.close()
748 fp.close()
749 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
749 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
750 os.close(outfd)
750 os.close(outfd)
751 cmd = cmd.replace('INFILE', inname)
751 cmd = cmd.replace('INFILE', inname)
752 cmd = cmd.replace('OUTFILE', outname)
752 cmd = cmd.replace('OUTFILE', outname)
753 code = os.system(cmd)
753 code = os.system(cmd)
754 if sys.platform == 'OpenVMS' and code & 1:
754 if sys.platform == 'OpenVMS' and code & 1:
755 code = 0
755 code = 0
756 if code:
756 if code:
757 raise Abort(_("command '%s' failed: %s") %
757 raise Abort(_("command '%s' failed: %s") %
758 (cmd, explainexit(code)))
758 (cmd, explainexit(code)))
759 return readfile(outname)
759 return readfile(outname)
760 finally:
760 finally:
761 try:
761 try:
762 if inname:
762 if inname:
763 os.unlink(inname)
763 os.unlink(inname)
764 except OSError:
764 except OSError:
765 pass
765 pass
766 try:
766 try:
767 if outname:
767 if outname:
768 os.unlink(outname)
768 os.unlink(outname)
769 except OSError:
769 except OSError:
770 pass
770 pass
771
771
772 filtertable = {
772 filtertable = {
773 'tempfile:': tempfilter,
773 'tempfile:': tempfilter,
774 'pipe:': pipefilter,
774 'pipe:': pipefilter,
775 }
775 }
776
776
777 def filter(s, cmd):
777 def filter(s, cmd):
778 "filter a string through a command that transforms its input to its output"
778 "filter a string through a command that transforms its input to its output"
779 for name, fn in filtertable.iteritems():
779 for name, fn in filtertable.iteritems():
780 if cmd.startswith(name):
780 if cmd.startswith(name):
781 return fn(s, cmd[len(name):].lstrip())
781 return fn(s, cmd[len(name):].lstrip())
782 return pipefilter(s, cmd)
782 return pipefilter(s, cmd)
783
783
784 def binary(s):
784 def binary(s):
785 """return true if a string is binary data"""
785 """return true if a string is binary data"""
786 return bool(s and '\0' in s)
786 return bool(s and '\0' in s)
787
787
788 def increasingchunks(source, min=1024, max=65536):
788 def increasingchunks(source, min=1024, max=65536):
789 '''return no less than min bytes per chunk while data remains,
789 '''return no less than min bytes per chunk while data remains,
790 doubling min after each chunk until it reaches max'''
790 doubling min after each chunk until it reaches max'''
791 def log2(x):
791 def log2(x):
792 if not x:
792 if not x:
793 return 0
793 return 0
794 i = 0
794 i = 0
795 while x:
795 while x:
796 x >>= 1
796 x >>= 1
797 i += 1
797 i += 1
798 return i - 1
798 return i - 1
799
799
800 buf = []
800 buf = []
801 blen = 0
801 blen = 0
802 for chunk in source:
802 for chunk in source:
803 buf.append(chunk)
803 buf.append(chunk)
804 blen += len(chunk)
804 blen += len(chunk)
805 if blen >= min:
805 if blen >= min:
806 if min < max:
806 if min < max:
807 min = min << 1
807 min = min << 1
808 nmin = 1 << log2(blen)
808 nmin = 1 << log2(blen)
809 if nmin > min:
809 if nmin > min:
810 min = nmin
810 min = nmin
811 if min > max:
811 if min > max:
812 min = max
812 min = max
813 yield ''.join(buf)
813 yield ''.join(buf)
814 blen = 0
814 blen = 0
815 buf = []
815 buf = []
816 if buf:
816 if buf:
817 yield ''.join(buf)
817 yield ''.join(buf)
818
818
819 Abort = error.Abort
819 Abort = error.Abort
820
820
821 def always(fn):
821 def always(fn):
822 return True
822 return True
823
823
824 def never(fn):
824 def never(fn):
825 return False
825 return False
826
826
827 def nogc(func):
827 def nogc(func):
828 """disable garbage collector
828 """disable garbage collector
829
829
830 Python's garbage collector triggers a GC each time a certain number of
830 Python's garbage collector triggers a GC each time a certain number of
831 container objects (the number being defined by gc.get_threshold()) are
831 container objects (the number being defined by gc.get_threshold()) are
832 allocated even when marked not to be tracked by the collector. Tracking has
832 allocated even when marked not to be tracked by the collector. Tracking has
833 no effect on when GCs are triggered, only on what objects the GC looks
833 no effect on when GCs are triggered, only on what objects the GC looks
834 into. As a workaround, disable GC while building complex (huge)
834 into. As a workaround, disable GC while building complex (huge)
835 containers.
835 containers.
836
836
837 This garbage collector issue have been fixed in 2.7.
837 This garbage collector issue have been fixed in 2.7.
838 """
838 """
839 def wrapper(*args, **kwargs):
839 def wrapper(*args, **kwargs):
840 gcenabled = gc.isenabled()
840 gcenabled = gc.isenabled()
841 gc.disable()
841 gc.disable()
842 try:
842 try:
843 return func(*args, **kwargs)
843 return func(*args, **kwargs)
844 finally:
844 finally:
845 if gcenabled:
845 if gcenabled:
846 gc.enable()
846 gc.enable()
847 return wrapper
847 return wrapper
848
848
849 def pathto(root, n1, n2):
849 def pathto(root, n1, n2):
850 '''return the relative path from one place to another.
850 '''return the relative path from one place to another.
851 root should use os.sep to separate directories
851 root should use os.sep to separate directories
852 n1 should use os.sep to separate directories
852 n1 should use os.sep to separate directories
853 n2 should use "/" to separate directories
853 n2 should use "/" to separate directories
854 returns an os.sep-separated path.
854 returns an os.sep-separated path.
855
855
856 If n1 is a relative path, it's assumed it's
856 If n1 is a relative path, it's assumed it's
857 relative to root.
857 relative to root.
858 n2 should always be relative to root.
858 n2 should always be relative to root.
859 '''
859 '''
860 if not n1:
860 if not n1:
861 return localpath(n2)
861 return localpath(n2)
862 if os.path.isabs(n1):
862 if os.path.isabs(n1):
863 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
863 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
864 return os.path.join(root, localpath(n2))
864 return os.path.join(root, localpath(n2))
865 n2 = '/'.join((pconvert(root), n2))
865 n2 = '/'.join((pconvert(root), n2))
866 a, b = splitpath(n1), n2.split('/')
866 a, b = splitpath(n1), n2.split('/')
867 a.reverse()
867 a.reverse()
868 b.reverse()
868 b.reverse()
869 while a and b and a[-1] == b[-1]:
869 while a and b and a[-1] == b[-1]:
870 a.pop()
870 a.pop()
871 b.pop()
871 b.pop()
872 b.reverse()
872 b.reverse()
873 return os.sep.join((['..'] * len(a)) + b) or '.'
873 return os.sep.join((['..'] * len(a)) + b) or '.'
874
874
875 def mainfrozen():
875 def mainfrozen():
876 """return True if we are a frozen executable.
876 """return True if we are a frozen executable.
877
877
878 The code supports py2exe (most common, Windows only) and tools/freeze
878 The code supports py2exe (most common, Windows only) and tools/freeze
879 (portable, not much used).
879 (portable, not much used).
880 """
880 """
881 return (safehasattr(sys, "frozen") or # new py2exe
881 return (safehasattr(sys, "frozen") or # new py2exe
882 safehasattr(sys, "importers") or # old py2exe
882 safehasattr(sys, "importers") or # old py2exe
883 imp.is_frozen("__main__")) # tools/freeze
883 imp.is_frozen("__main__")) # tools/freeze
884
884
885 # the location of data files matching the source code
885 # the location of data files matching the source code
886 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
886 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
887 # executable version (py2exe) doesn't support __file__
887 # executable version (py2exe) doesn't support __file__
888 datapath = os.path.dirname(sys.executable)
888 datapath = os.path.dirname(sys.executable)
889 else:
889 else:
890 datapath = os.path.dirname(__file__)
890 datapath = os.path.dirname(__file__)
891
891
892 i18n.setdatapath(datapath)
892 i18n.setdatapath(datapath)
893
893
894 _hgexecutable = None
894 _hgexecutable = None
895
895
896 def hgexecutable():
896 def hgexecutable():
897 """return location of the 'hg' executable.
897 """return location of the 'hg' executable.
898
898
899 Defaults to $HG or 'hg' in the search path.
899 Defaults to $HG or 'hg' in the search path.
900 """
900 """
901 if _hgexecutable is None:
901 if _hgexecutable is None:
902 hg = os.environ.get('HG')
902 hg = os.environ.get('HG')
903 mainmod = sys.modules['__main__']
903 mainmod = sys.modules['__main__']
904 if hg:
904 if hg:
905 _sethgexecutable(hg)
905 _sethgexecutable(hg)
906 elif mainfrozen():
906 elif mainfrozen():
907 if getattr(sys, 'frozen', None) == 'macosx_app':
907 if getattr(sys, 'frozen', None) == 'macosx_app':
908 # Env variable set by py2app
908 # Env variable set by py2app
909 _sethgexecutable(os.environ['EXECUTABLEPATH'])
909 _sethgexecutable(os.environ['EXECUTABLEPATH'])
910 else:
910 else:
911 _sethgexecutable(sys.executable)
911 _sethgexecutable(sys.executable)
912 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
912 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
913 _sethgexecutable(mainmod.__file__)
913 _sethgexecutable(mainmod.__file__)
914 else:
914 else:
915 exe = findexe('hg') or os.path.basename(sys.argv[0])
915 exe = findexe('hg') or os.path.basename(sys.argv[0])
916 _sethgexecutable(exe)
916 _sethgexecutable(exe)
917 return _hgexecutable
917 return _hgexecutable
918
918
919 def _sethgexecutable(path):
919 def _sethgexecutable(path):
920 """set location of the 'hg' executable"""
920 """set location of the 'hg' executable"""
921 global _hgexecutable
921 global _hgexecutable
922 _hgexecutable = path
922 _hgexecutable = path
923
923
924 def _isstdout(f):
924 def _isstdout(f):
925 fileno = getattr(f, 'fileno', None)
925 fileno = getattr(f, 'fileno', None)
926 return fileno and fileno() == sys.__stdout__.fileno()
926 return fileno and fileno() == sys.__stdout__.fileno()
927
927
928 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
928 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
929 '''enhanced shell command execution.
929 '''enhanced shell command execution.
930 run with environment maybe modified, maybe in different dir.
930 run with environment maybe modified, maybe in different dir.
931
931
932 if command fails and onerr is None, return status, else raise onerr
932 if command fails and onerr is None, return status, else raise onerr
933 object as exception.
933 object as exception.
934
934
935 if out is specified, it is assumed to be a file-like object that has a
935 if out is specified, it is assumed to be a file-like object that has a
936 write() method. stdout and stderr will be redirected to out.'''
936 write() method. stdout and stderr will be redirected to out.'''
937 if environ is None:
937 if environ is None:
938 environ = {}
938 environ = {}
939 try:
939 try:
940 sys.stdout.flush()
940 sys.stdout.flush()
941 except Exception:
941 except Exception:
942 pass
942 pass
943 def py2shell(val):
943 def py2shell(val):
944 'convert python object into string that is useful to shell'
944 'convert python object into string that is useful to shell'
945 if val is None or val is False:
945 if val is None or val is False:
946 return '0'
946 return '0'
947 if val is True:
947 if val is True:
948 return '1'
948 return '1'
949 return str(val)
949 return str(val)
950 origcmd = cmd
950 origcmd = cmd
951 cmd = quotecommand(cmd)
951 cmd = quotecommand(cmd)
952 if sys.platform == 'plan9' and (sys.version_info[0] == 2
952 if sys.platform == 'plan9' and (sys.version_info[0] == 2
953 and sys.version_info[1] < 7):
953 and sys.version_info[1] < 7):
954 # subprocess kludge to work around issues in half-baked Python
954 # subprocess kludge to work around issues in half-baked Python
955 # ports, notably bichued/python:
955 # ports, notably bichued/python:
956 if not cwd is None:
956 if not cwd is None:
957 os.chdir(cwd)
957 os.chdir(cwd)
958 rc = os.system(cmd)
958 rc = os.system(cmd)
959 else:
959 else:
960 env = dict(os.environ)
960 env = dict(os.environ)
961 env.update((k, py2shell(v)) for k, v in environ.iteritems())
961 env.update((k, py2shell(v)) for k, v in environ.iteritems())
962 env['HG'] = hgexecutable()
962 env['HG'] = hgexecutable()
963 if out is None or _isstdout(out):
963 if out is None or _isstdout(out):
964 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
964 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
965 env=env, cwd=cwd)
965 env=env, cwd=cwd)
966 else:
966 else:
967 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
967 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
968 env=env, cwd=cwd, stdout=subprocess.PIPE,
968 env=env, cwd=cwd, stdout=subprocess.PIPE,
969 stderr=subprocess.STDOUT)
969 stderr=subprocess.STDOUT)
970 while True:
970 while True:
971 line = proc.stdout.readline()
971 line = proc.stdout.readline()
972 if not line:
972 if not line:
973 break
973 break
974 out.write(line)
974 out.write(line)
975 proc.wait()
975 proc.wait()
976 rc = proc.returncode
976 rc = proc.returncode
977 if sys.platform == 'OpenVMS' and rc & 1:
977 if sys.platform == 'OpenVMS' and rc & 1:
978 rc = 0
978 rc = 0
979 if rc and onerr:
979 if rc and onerr:
980 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
980 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
981 explainexit(rc)[0])
981 explainexit(rc)[0])
982 if errprefix:
982 if errprefix:
983 errmsg = '%s: %s' % (errprefix, errmsg)
983 errmsg = '%s: %s' % (errprefix, errmsg)
984 raise onerr(errmsg)
984 raise onerr(errmsg)
985 return rc
985 return rc
986
986
987 def checksignature(func):
987 def checksignature(func):
988 '''wrap a function with code to check for calling errors'''
988 '''wrap a function with code to check for calling errors'''
989 def check(*args, **kwargs):
989 def check(*args, **kwargs):
990 try:
990 try:
991 return func(*args, **kwargs)
991 return func(*args, **kwargs)
992 except TypeError:
992 except TypeError:
993 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
993 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
994 raise error.SignatureError
994 raise error.SignatureError
995 raise
995 raise
996
996
997 return check
997 return check
998
998
999 def copyfile(src, dest, hardlink=False, copystat=False):
999 def copyfile(src, dest, hardlink=False, copystat=False):
1000 '''copy a file, preserving mode and optionally other stat info like
1000 '''copy a file, preserving mode and optionally other stat info like
1001 atime/mtime'''
1001 atime/mtime'''
1002 if os.path.lexists(dest):
1002 if os.path.lexists(dest):
1003 unlink(dest)
1003 unlink(dest)
1004 # hardlinks are problematic on CIFS, quietly ignore this flag
1004 # hardlinks are problematic on CIFS, quietly ignore this flag
1005 # until we find a way to work around it cleanly (issue4546)
1005 # until we find a way to work around it cleanly (issue4546)
1006 if False and hardlink:
1006 if False and hardlink:
1007 try:
1007 try:
1008 oslink(src, dest)
1008 oslink(src, dest)
1009 return
1009 return
1010 except (IOError, OSError):
1010 except (IOError, OSError):
1011 pass # fall back to normal copy
1011 pass # fall back to normal copy
1012 if os.path.islink(src):
1012 if os.path.islink(src):
1013 os.symlink(os.readlink(src), dest)
1013 os.symlink(os.readlink(src), dest)
1014 # copytime is ignored for symlinks, but in general copytime isn't needed
1014 # copytime is ignored for symlinks, but in general copytime isn't needed
1015 # for them anyway
1015 # for them anyway
1016 else:
1016 else:
1017 try:
1017 try:
1018 shutil.copyfile(src, dest)
1018 shutil.copyfile(src, dest)
1019 if copystat:
1019 if copystat:
1020 # copystat also copies mode
1020 # copystat also copies mode
1021 shutil.copystat(src, dest)
1021 shutil.copystat(src, dest)
1022 else:
1022 else:
1023 shutil.copymode(src, dest)
1023 shutil.copymode(src, dest)
1024 except shutil.Error as inst:
1024 except shutil.Error as inst:
1025 raise Abort(str(inst))
1025 raise Abort(str(inst))
1026
1026
1027 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1027 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1028 """Copy a directory tree using hardlinks if possible."""
1028 """Copy a directory tree using hardlinks if possible."""
1029 num = 0
1029 num = 0
1030
1030
1031 if hardlink is None:
1031 if hardlink is None:
1032 hardlink = (os.stat(src).st_dev ==
1032 hardlink = (os.stat(src).st_dev ==
1033 os.stat(os.path.dirname(dst)).st_dev)
1033 os.stat(os.path.dirname(dst)).st_dev)
1034 if hardlink:
1034 if hardlink:
1035 topic = _('linking')
1035 topic = _('linking')
1036 else:
1036 else:
1037 topic = _('copying')
1037 topic = _('copying')
1038
1038
1039 if os.path.isdir(src):
1039 if os.path.isdir(src):
1040 os.mkdir(dst)
1040 os.mkdir(dst)
1041 for name, kind in osutil.listdir(src):
1041 for name, kind in osutil.listdir(src):
1042 srcname = os.path.join(src, name)
1042 srcname = os.path.join(src, name)
1043 dstname = os.path.join(dst, name)
1043 dstname = os.path.join(dst, name)
1044 def nprog(t, pos):
1044 def nprog(t, pos):
1045 if pos is not None:
1045 if pos is not None:
1046 return progress(t, pos + num)
1046 return progress(t, pos + num)
1047 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1047 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1048 num += n
1048 num += n
1049 else:
1049 else:
1050 if hardlink:
1050 if hardlink:
1051 try:
1051 try:
1052 oslink(src, dst)
1052 oslink(src, dst)
1053 except (IOError, OSError):
1053 except (IOError, OSError):
1054 hardlink = False
1054 hardlink = False
1055 shutil.copy(src, dst)
1055 shutil.copy(src, dst)
1056 else:
1056 else:
1057 shutil.copy(src, dst)
1057 shutil.copy(src, dst)
1058 num += 1
1058 num += 1
1059 progress(topic, num)
1059 progress(topic, num)
1060 progress(topic, None)
1060 progress(topic, None)
1061
1061
1062 return hardlink, num
1062 return hardlink, num
1063
1063
1064 _winreservednames = '''con prn aux nul
1064 _winreservednames = '''con prn aux nul
1065 com1 com2 com3 com4 com5 com6 com7 com8 com9
1065 com1 com2 com3 com4 com5 com6 com7 com8 com9
1066 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1066 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1067 _winreservedchars = ':*?"<>|'
1067 _winreservedchars = ':*?"<>|'
1068 def checkwinfilename(path):
1068 def checkwinfilename(path):
1069 r'''Check that the base-relative path is a valid filename on Windows.
1069 r'''Check that the base-relative path is a valid filename on Windows.
1070 Returns None if the path is ok, or a UI string describing the problem.
1070 Returns None if the path is ok, or a UI string describing the problem.
1071
1071
1072 >>> checkwinfilename("just/a/normal/path")
1072 >>> checkwinfilename("just/a/normal/path")
1073 >>> checkwinfilename("foo/bar/con.xml")
1073 >>> checkwinfilename("foo/bar/con.xml")
1074 "filename contains 'con', which is reserved on Windows"
1074 "filename contains 'con', which is reserved on Windows"
1075 >>> checkwinfilename("foo/con.xml/bar")
1075 >>> checkwinfilename("foo/con.xml/bar")
1076 "filename contains 'con', which is reserved on Windows"
1076 "filename contains 'con', which is reserved on Windows"
1077 >>> checkwinfilename("foo/bar/xml.con")
1077 >>> checkwinfilename("foo/bar/xml.con")
1078 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1078 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1079 "filename contains 'AUX', which is reserved on Windows"
1079 "filename contains 'AUX', which is reserved on Windows"
1080 >>> checkwinfilename("foo/bar/bla:.txt")
1080 >>> checkwinfilename("foo/bar/bla:.txt")
1081 "filename contains ':', which is reserved on Windows"
1081 "filename contains ':', which is reserved on Windows"
1082 >>> checkwinfilename("foo/bar/b\07la.txt")
1082 >>> checkwinfilename("foo/bar/b\07la.txt")
1083 "filename contains '\\x07', which is invalid on Windows"
1083 "filename contains '\\x07', which is invalid on Windows"
1084 >>> checkwinfilename("foo/bar/bla ")
1084 >>> checkwinfilename("foo/bar/bla ")
1085 "filename ends with ' ', which is not allowed on Windows"
1085 "filename ends with ' ', which is not allowed on Windows"
1086 >>> checkwinfilename("../bar")
1086 >>> checkwinfilename("../bar")
1087 >>> checkwinfilename("foo\\")
1087 >>> checkwinfilename("foo\\")
1088 "filename ends with '\\', which is invalid on Windows"
1088 "filename ends with '\\', which is invalid on Windows"
1089 >>> checkwinfilename("foo\\/bar")
1089 >>> checkwinfilename("foo\\/bar")
1090 "directory name ends with '\\', which is invalid on Windows"
1090 "directory name ends with '\\', which is invalid on Windows"
1091 '''
1091 '''
1092 if path.endswith('\\'):
1092 if path.endswith('\\'):
1093 return _("filename ends with '\\', which is invalid on Windows")
1093 return _("filename ends with '\\', which is invalid on Windows")
1094 if '\\/' in path:
1094 if '\\/' in path:
1095 return _("directory name ends with '\\', which is invalid on Windows")
1095 return _("directory name ends with '\\', which is invalid on Windows")
1096 for n in path.replace('\\', '/').split('/'):
1096 for n in path.replace('\\', '/').split('/'):
1097 if not n:
1097 if not n:
1098 continue
1098 continue
1099 for c in n:
1099 for c in n:
1100 if c in _winreservedchars:
1100 if c in _winreservedchars:
1101 return _("filename contains '%s', which is reserved "
1101 return _("filename contains '%s', which is reserved "
1102 "on Windows") % c
1102 "on Windows") % c
1103 if ord(c) <= 31:
1103 if ord(c) <= 31:
1104 return _("filename contains %r, which is invalid "
1104 return _("filename contains %r, which is invalid "
1105 "on Windows") % c
1105 "on Windows") % c
1106 base = n.split('.')[0]
1106 base = n.split('.')[0]
1107 if base and base.lower() in _winreservednames:
1107 if base and base.lower() in _winreservednames:
1108 return _("filename contains '%s', which is reserved "
1108 return _("filename contains '%s', which is reserved "
1109 "on Windows") % base
1109 "on Windows") % base
1110 t = n[-1]
1110 t = n[-1]
1111 if t in '. ' and n not in '..':
1111 if t in '. ' and n not in '..':
1112 return _("filename ends with '%s', which is not allowed "
1112 return _("filename ends with '%s', which is not allowed "
1113 "on Windows") % t
1113 "on Windows") % t
1114
1114
1115 if os.name == 'nt':
1115 if os.name == 'nt':
1116 checkosfilename = checkwinfilename
1116 checkosfilename = checkwinfilename
1117 else:
1117 else:
1118 checkosfilename = platform.checkosfilename
1118 checkosfilename = platform.checkosfilename
1119
1119
1120 def makelock(info, pathname):
1120 def makelock(info, pathname):
1121 try:
1121 try:
1122 return os.symlink(info, pathname)
1122 return os.symlink(info, pathname)
1123 except OSError as why:
1123 except OSError as why:
1124 if why.errno == errno.EEXIST:
1124 if why.errno == errno.EEXIST:
1125 raise
1125 raise
1126 except AttributeError: # no symlink in os
1126 except AttributeError: # no symlink in os
1127 pass
1127 pass
1128
1128
1129 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1129 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1130 os.write(ld, info)
1130 os.write(ld, info)
1131 os.close(ld)
1131 os.close(ld)
1132
1132
1133 def readlock(pathname):
1133 def readlock(pathname):
1134 try:
1134 try:
1135 return os.readlink(pathname)
1135 return os.readlink(pathname)
1136 except OSError as why:
1136 except OSError as why:
1137 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1137 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1138 raise
1138 raise
1139 except AttributeError: # no symlink in os
1139 except AttributeError: # no symlink in os
1140 pass
1140 pass
1141 fp = posixfile(pathname)
1141 fp = posixfile(pathname)
1142 r = fp.read()
1142 r = fp.read()
1143 fp.close()
1143 fp.close()
1144 return r
1144 return r
1145
1145
1146 def fstat(fp):
1146 def fstat(fp):
1147 '''stat file object that may not have fileno method.'''
1147 '''stat file object that may not have fileno method.'''
1148 try:
1148 try:
1149 return os.fstat(fp.fileno())
1149 return os.fstat(fp.fileno())
1150 except AttributeError:
1150 except AttributeError:
1151 return os.stat(fp.name)
1151 return os.stat(fp.name)
1152
1152
1153 # File system features
1153 # File system features
1154
1154
1155 def checkcase(path):
1155 def checkcase(path):
1156 """
1156 """
1157 Return true if the given path is on a case-sensitive filesystem
1157 Return true if the given path is on a case-sensitive filesystem
1158
1158
1159 Requires a path (like /foo/.hg) ending with a foldable final
1159 Requires a path (like /foo/.hg) ending with a foldable final
1160 directory component.
1160 directory component.
1161 """
1161 """
1162 s1 = os.lstat(path)
1162 s1 = os.lstat(path)
1163 d, b = os.path.split(path)
1163 d, b = os.path.split(path)
1164 b2 = b.upper()
1164 b2 = b.upper()
1165 if b == b2:
1165 if b == b2:
1166 b2 = b.lower()
1166 b2 = b.lower()
1167 if b == b2:
1167 if b == b2:
1168 return True # no evidence against case sensitivity
1168 return True # no evidence against case sensitivity
1169 p2 = os.path.join(d, b2)
1169 p2 = os.path.join(d, b2)
1170 try:
1170 try:
1171 s2 = os.lstat(p2)
1171 s2 = os.lstat(p2)
1172 if s2 == s1:
1172 if s2 == s1:
1173 return False
1173 return False
1174 return True
1174 return True
1175 except OSError:
1175 except OSError:
1176 return True
1176 return True
1177
1177
1178 try:
1178 try:
1179 import re2
1179 import re2
1180 _re2 = None
1180 _re2 = None
1181 except ImportError:
1181 except ImportError:
1182 _re2 = False
1182 _re2 = False
1183
1183
1184 class _re(object):
1184 class _re(object):
1185 def _checkre2(self):
1185 def _checkre2(self):
1186 global _re2
1186 global _re2
1187 try:
1187 try:
1188 # check if match works, see issue3964
1188 # check if match works, see issue3964
1189 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1189 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1190 except ImportError:
1190 except ImportError:
1191 _re2 = False
1191 _re2 = False
1192
1192
1193 def compile(self, pat, flags=0):
1193 def compile(self, pat, flags=0):
1194 '''Compile a regular expression, using re2 if possible
1194 '''Compile a regular expression, using re2 if possible
1195
1195
1196 For best performance, use only re2-compatible regexp features. The
1196 For best performance, use only re2-compatible regexp features. The
1197 only flags from the re module that are re2-compatible are
1197 only flags from the re module that are re2-compatible are
1198 IGNORECASE and MULTILINE.'''
1198 IGNORECASE and MULTILINE.'''
1199 if _re2 is None:
1199 if _re2 is None:
1200 self._checkre2()
1200 self._checkre2()
1201 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1201 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1202 if flags & remod.IGNORECASE:
1202 if flags & remod.IGNORECASE:
1203 pat = '(?i)' + pat
1203 pat = '(?i)' + pat
1204 if flags & remod.MULTILINE:
1204 if flags & remod.MULTILINE:
1205 pat = '(?m)' + pat
1205 pat = '(?m)' + pat
1206 try:
1206 try:
1207 return re2.compile(pat)
1207 return re2.compile(pat)
1208 except re2.error:
1208 except re2.error:
1209 pass
1209 pass
1210 return remod.compile(pat, flags)
1210 return remod.compile(pat, flags)
1211
1211
1212 @propertycache
1212 @propertycache
1213 def escape(self):
1213 def escape(self):
1214 '''Return the version of escape corresponding to self.compile.
1214 '''Return the version of escape corresponding to self.compile.
1215
1215
1216 This is imperfect because whether re2 or re is used for a particular
1216 This is imperfect because whether re2 or re is used for a particular
1217 function depends on the flags, etc, but it's the best we can do.
1217 function depends on the flags, etc, but it's the best we can do.
1218 '''
1218 '''
1219 global _re2
1219 global _re2
1220 if _re2 is None:
1220 if _re2 is None:
1221 self._checkre2()
1221 self._checkre2()
1222 if _re2:
1222 if _re2:
1223 return re2.escape
1223 return re2.escape
1224 else:
1224 else:
1225 return remod.escape
1225 return remod.escape
1226
1226
1227 re = _re()
1227 re = _re()
1228
1228
1229 _fspathcache = {}
1229 _fspathcache = {}
1230 def fspath(name, root):
1230 def fspath(name, root):
1231 '''Get name in the case stored in the filesystem
1231 '''Get name in the case stored in the filesystem
1232
1232
1233 The name should be relative to root, and be normcase-ed for efficiency.
1233 The name should be relative to root, and be normcase-ed for efficiency.
1234
1234
1235 Note that this function is unnecessary, and should not be
1235 Note that this function is unnecessary, and should not be
1236 called, for case-sensitive filesystems (simply because it's expensive).
1236 called, for case-sensitive filesystems (simply because it's expensive).
1237
1237
1238 The root should be normcase-ed, too.
1238 The root should be normcase-ed, too.
1239 '''
1239 '''
1240 def _makefspathcacheentry(dir):
1240 def _makefspathcacheentry(dir):
1241 return dict((normcase(n), n) for n in os.listdir(dir))
1241 return dict((normcase(n), n) for n in os.listdir(dir))
1242
1242
1243 seps = os.sep
1243 seps = os.sep
1244 if os.altsep:
1244 if os.altsep:
1245 seps = seps + os.altsep
1245 seps = seps + os.altsep
1246 # Protect backslashes. This gets silly very quickly.
1246 # Protect backslashes. This gets silly very quickly.
1247 seps.replace('\\','\\\\')
1247 seps.replace('\\','\\\\')
1248 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1248 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1249 dir = os.path.normpath(root)
1249 dir = os.path.normpath(root)
1250 result = []
1250 result = []
1251 for part, sep in pattern.findall(name):
1251 for part, sep in pattern.findall(name):
1252 if sep:
1252 if sep:
1253 result.append(sep)
1253 result.append(sep)
1254 continue
1254 continue
1255
1255
1256 if dir not in _fspathcache:
1256 if dir not in _fspathcache:
1257 _fspathcache[dir] = _makefspathcacheentry(dir)
1257 _fspathcache[dir] = _makefspathcacheentry(dir)
1258 contents = _fspathcache[dir]
1258 contents = _fspathcache[dir]
1259
1259
1260 found = contents.get(part)
1260 found = contents.get(part)
1261 if not found:
1261 if not found:
1262 # retry "once per directory" per "dirstate.walk" which
1262 # retry "once per directory" per "dirstate.walk" which
1263 # may take place for each patches of "hg qpush", for example
1263 # may take place for each patches of "hg qpush", for example
1264 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1264 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1265 found = contents.get(part)
1265 found = contents.get(part)
1266
1266
1267 result.append(found or part)
1267 result.append(found or part)
1268 dir = os.path.join(dir, part)
1268 dir = os.path.join(dir, part)
1269
1269
1270 return ''.join(result)
1270 return ''.join(result)
1271
1271
1272 def checknlink(testfile):
1272 def checknlink(testfile):
1273 '''check whether hardlink count reporting works properly'''
1273 '''check whether hardlink count reporting works properly'''
1274
1274
1275 # testfile may be open, so we need a separate file for checking to
1275 # testfile may be open, so we need a separate file for checking to
1276 # work around issue2543 (or testfile may get lost on Samba shares)
1276 # work around issue2543 (or testfile may get lost on Samba shares)
1277 f1 = testfile + ".hgtmp1"
1277 f1 = testfile + ".hgtmp1"
1278 if os.path.lexists(f1):
1278 if os.path.lexists(f1):
1279 return False
1279 return False
1280 try:
1280 try:
1281 posixfile(f1, 'w').close()
1281 posixfile(f1, 'w').close()
1282 except IOError:
1282 except IOError:
1283 return False
1283 return False
1284
1284
1285 f2 = testfile + ".hgtmp2"
1285 f2 = testfile + ".hgtmp2"
1286 fd = None
1286 fd = None
1287 try:
1287 try:
1288 oslink(f1, f2)
1288 oslink(f1, f2)
1289 # nlinks() may behave differently for files on Windows shares if
1289 # nlinks() may behave differently for files on Windows shares if
1290 # the file is open.
1290 # the file is open.
1291 fd = posixfile(f2)
1291 fd = posixfile(f2)
1292 return nlinks(f2) > 1
1292 return nlinks(f2) > 1
1293 except OSError:
1293 except OSError:
1294 return False
1294 return False
1295 finally:
1295 finally:
1296 if fd is not None:
1296 if fd is not None:
1297 fd.close()
1297 fd.close()
1298 for f in (f1, f2):
1298 for f in (f1, f2):
1299 try:
1299 try:
1300 os.unlink(f)
1300 os.unlink(f)
1301 except OSError:
1301 except OSError:
1302 pass
1302 pass
1303
1303
1304 def endswithsep(path):
1304 def endswithsep(path):
1305 '''Check path ends with os.sep or os.altsep.'''
1305 '''Check path ends with os.sep or os.altsep.'''
1306 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1306 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1307
1307
1308 def splitpath(path):
1308 def splitpath(path):
1309 '''Split path by os.sep.
1309 '''Split path by os.sep.
1310 Note that this function does not use os.altsep because this is
1310 Note that this function does not use os.altsep because this is
1311 an alternative of simple "xxx.split(os.sep)".
1311 an alternative of simple "xxx.split(os.sep)".
1312 It is recommended to use os.path.normpath() before using this
1312 It is recommended to use os.path.normpath() before using this
1313 function if need.'''
1313 function if need.'''
1314 return path.split(os.sep)
1314 return path.split(os.sep)
1315
1315
1316 def gui():
1316 def gui():
1317 '''Are we running in a GUI?'''
1317 '''Are we running in a GUI?'''
1318 if sys.platform == 'darwin':
1318 if sys.platform == 'darwin':
1319 if 'SSH_CONNECTION' in os.environ:
1319 if 'SSH_CONNECTION' in os.environ:
1320 # handle SSH access to a box where the user is logged in
1320 # handle SSH access to a box where the user is logged in
1321 return False
1321 return False
1322 elif getattr(osutil, 'isgui', None):
1322 elif getattr(osutil, 'isgui', None):
1323 # check if a CoreGraphics session is available
1323 # check if a CoreGraphics session is available
1324 return osutil.isgui()
1324 return osutil.isgui()
1325 else:
1325 else:
1326 # pure build; use a safe default
1326 # pure build; use a safe default
1327 return True
1327 return True
1328 else:
1328 else:
1329 return os.name == "nt" or os.environ.get("DISPLAY")
1329 return os.name == "nt" or os.environ.get("DISPLAY")
1330
1330
1331 def mktempcopy(name, emptyok=False, createmode=None):
1331 def mktempcopy(name, emptyok=False, createmode=None):
1332 """Create a temporary file with the same contents from name
1332 """Create a temporary file with the same contents from name
1333
1333
1334 The permission bits are copied from the original file.
1334 The permission bits are copied from the original file.
1335
1335
1336 If the temporary file is going to be truncated immediately, you
1336 If the temporary file is going to be truncated immediately, you
1337 can use emptyok=True as an optimization.
1337 can use emptyok=True as an optimization.
1338
1338
1339 Returns the name of the temporary file.
1339 Returns the name of the temporary file.
1340 """
1340 """
1341 d, fn = os.path.split(name)
1341 d, fn = os.path.split(name)
1342 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1342 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1343 os.close(fd)
1343 os.close(fd)
1344 # Temporary files are created with mode 0600, which is usually not
1344 # Temporary files are created with mode 0600, which is usually not
1345 # what we want. If the original file already exists, just copy
1345 # what we want. If the original file already exists, just copy
1346 # its mode. Otherwise, manually obey umask.
1346 # its mode. Otherwise, manually obey umask.
1347 copymode(name, temp, createmode)
1347 copymode(name, temp, createmode)
1348 if emptyok:
1348 if emptyok:
1349 return temp
1349 return temp
1350 try:
1350 try:
1351 try:
1351 try:
1352 ifp = posixfile(name, "rb")
1352 ifp = posixfile(name, "rb")
1353 except IOError as inst:
1353 except IOError as inst:
1354 if inst.errno == errno.ENOENT:
1354 if inst.errno == errno.ENOENT:
1355 return temp
1355 return temp
1356 if not getattr(inst, 'filename', None):
1356 if not getattr(inst, 'filename', None):
1357 inst.filename = name
1357 inst.filename = name
1358 raise
1358 raise
1359 ofp = posixfile(temp, "wb")
1359 ofp = posixfile(temp, "wb")
1360 for chunk in filechunkiter(ifp):
1360 for chunk in filechunkiter(ifp):
1361 ofp.write(chunk)
1361 ofp.write(chunk)
1362 ifp.close()
1362 ifp.close()
1363 ofp.close()
1363 ofp.close()
1364 except: # re-raises
1364 except: # re-raises
1365 try: os.unlink(temp)
1365 try: os.unlink(temp)
1366 except OSError: pass
1366 except OSError: pass
1367 raise
1367 raise
1368 return temp
1368 return temp
1369
1369
1370 class atomictempfile(object):
1370 class atomictempfile(object):
1371 '''writable file object that atomically updates a file
1371 '''writable file object that atomically updates a file
1372
1372
1373 All writes will go to a temporary copy of the original file. Call
1373 All writes will go to a temporary copy of the original file. Call
1374 close() when you are done writing, and atomictempfile will rename
1374 close() when you are done writing, and atomictempfile will rename
1375 the temporary copy to the original name, making the changes
1375 the temporary copy to the original name, making the changes
1376 visible. If the object is destroyed without being closed, all your
1376 visible. If the object is destroyed without being closed, all your
1377 writes are discarded.
1377 writes are discarded.
1378 '''
1378 '''
1379 def __init__(self, name, mode='w+b', createmode=None):
1379 def __init__(self, name, mode='w+b', createmode=None):
1380 self.__name = name # permanent name
1380 self.__name = name # permanent name
1381 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1381 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1382 createmode=createmode)
1382 createmode=createmode)
1383 self._fp = posixfile(self._tempname, mode)
1383 self._fp = posixfile(self._tempname, mode)
1384
1384
1385 # delegated methods
1385 # delegated methods
1386 self.write = self._fp.write
1386 self.write = self._fp.write
1387 self.seek = self._fp.seek
1387 self.seek = self._fp.seek
1388 self.tell = self._fp.tell
1388 self.tell = self._fp.tell
1389 self.fileno = self._fp.fileno
1389 self.fileno = self._fp.fileno
1390
1390
1391 def close(self):
1391 def close(self):
1392 if not self._fp.closed:
1392 if not self._fp.closed:
1393 self._fp.close()
1393 self._fp.close()
1394 rename(self._tempname, localpath(self.__name))
1394 rename(self._tempname, localpath(self.__name))
1395
1395
1396 def discard(self):
1396 def discard(self):
1397 if not self._fp.closed:
1397 if not self._fp.closed:
1398 try:
1398 try:
1399 os.unlink(self._tempname)
1399 os.unlink(self._tempname)
1400 except OSError:
1400 except OSError:
1401 pass
1401 pass
1402 self._fp.close()
1402 self._fp.close()
1403
1403
1404 def __del__(self):
1404 def __del__(self):
1405 if safehasattr(self, '_fp'): # constructor actually did something
1405 if safehasattr(self, '_fp'): # constructor actually did something
1406 self.discard()
1406 self.discard()
1407
1407
1408 def makedirs(name, mode=None, notindexed=False):
1408 def makedirs(name, mode=None, notindexed=False):
1409 """recursive directory creation with parent mode inheritance"""
1409 """recursive directory creation with parent mode inheritance"""
1410 try:
1410 try:
1411 makedir(name, notindexed)
1411 makedir(name, notindexed)
1412 except OSError as err:
1412 except OSError as err:
1413 if err.errno == errno.EEXIST:
1413 if err.errno == errno.EEXIST:
1414 return
1414 return
1415 if err.errno != errno.ENOENT or not name:
1415 if err.errno != errno.ENOENT or not name:
1416 raise
1416 raise
1417 parent = os.path.dirname(os.path.abspath(name))
1417 parent = os.path.dirname(os.path.abspath(name))
1418 if parent == name:
1418 if parent == name:
1419 raise
1419 raise
1420 makedirs(parent, mode, notindexed)
1420 makedirs(parent, mode, notindexed)
1421 makedir(name, notindexed)
1421 makedir(name, notindexed)
1422 if mode is not None:
1422 if mode is not None:
1423 os.chmod(name, mode)
1423 os.chmod(name, mode)
1424
1424
1425 def ensuredirs(name, mode=None, notindexed=False):
1425 def ensuredirs(name, mode=None, notindexed=False):
1426 """race-safe recursive directory creation
1426 """race-safe recursive directory creation
1427
1427
1428 Newly created directories are marked as "not to be indexed by
1428 Newly created directories are marked as "not to be indexed by
1429 the content indexing service", if ``notindexed`` is specified
1429 the content indexing service", if ``notindexed`` is specified
1430 for "write" mode access.
1430 for "write" mode access.
1431 """
1431 """
1432 if os.path.isdir(name):
1432 if os.path.isdir(name):
1433 return
1433 return
1434 parent = os.path.dirname(os.path.abspath(name))
1434 parent = os.path.dirname(os.path.abspath(name))
1435 if parent != name:
1435 if parent != name:
1436 ensuredirs(parent, mode, notindexed)
1436 ensuredirs(parent, mode, notindexed)
1437 try:
1437 try:
1438 makedir(name, notindexed)
1438 makedir(name, notindexed)
1439 except OSError as err:
1439 except OSError as err:
1440 if err.errno == errno.EEXIST and os.path.isdir(name):
1440 if err.errno == errno.EEXIST and os.path.isdir(name):
1441 # someone else seems to have won a directory creation race
1441 # someone else seems to have won a directory creation race
1442 return
1442 return
1443 raise
1443 raise
1444 if mode is not None:
1444 if mode is not None:
1445 os.chmod(name, mode)
1445 os.chmod(name, mode)
1446
1446
1447 def readfile(path):
1447 def readfile(path):
1448 fp = open(path, 'rb')
1448 with open(path, 'rb') as fp:
1449 try:
1450 return fp.read()
1449 return fp.read()
1451 finally:
1452 fp.close()
1453
1450
1454 def writefile(path, text):
1451 def writefile(path, text):
1455 fp = open(path, 'wb')
1452 with open(path, 'wb') as fp:
1456 try:
1457 fp.write(text)
1453 fp.write(text)
1458 finally:
1459 fp.close()
1460
1454
1461 def appendfile(path, text):
1455 def appendfile(path, text):
1462 fp = open(path, 'ab')
1456 with open(path, 'ab') as fp:
1463 try:
1464 fp.write(text)
1457 fp.write(text)
1465 finally:
1466 fp.close()
1467
1458
1468 class chunkbuffer(object):
1459 class chunkbuffer(object):
1469 """Allow arbitrary sized chunks of data to be efficiently read from an
1460 """Allow arbitrary sized chunks of data to be efficiently read from an
1470 iterator over chunks of arbitrary size."""
1461 iterator over chunks of arbitrary size."""
1471
1462
1472 def __init__(self, in_iter):
1463 def __init__(self, in_iter):
1473 """in_iter is the iterator that's iterating over the input chunks.
1464 """in_iter is the iterator that's iterating over the input chunks.
1474 targetsize is how big a buffer to try to maintain."""
1465 targetsize is how big a buffer to try to maintain."""
1475 def splitbig(chunks):
1466 def splitbig(chunks):
1476 for chunk in chunks:
1467 for chunk in chunks:
1477 if len(chunk) > 2**20:
1468 if len(chunk) > 2**20:
1478 pos = 0
1469 pos = 0
1479 while pos < len(chunk):
1470 while pos < len(chunk):
1480 end = pos + 2 ** 18
1471 end = pos + 2 ** 18
1481 yield chunk[pos:end]
1472 yield chunk[pos:end]
1482 pos = end
1473 pos = end
1483 else:
1474 else:
1484 yield chunk
1475 yield chunk
1485 self.iter = splitbig(in_iter)
1476 self.iter = splitbig(in_iter)
1486 self._queue = collections.deque()
1477 self._queue = collections.deque()
1487 self._chunkoffset = 0
1478 self._chunkoffset = 0
1488
1479
1489 def read(self, l=None):
1480 def read(self, l=None):
1490 """Read L bytes of data from the iterator of chunks of data.
1481 """Read L bytes of data from the iterator of chunks of data.
1491 Returns less than L bytes if the iterator runs dry.
1482 Returns less than L bytes if the iterator runs dry.
1492
1483
1493 If size parameter is omitted, read everything"""
1484 If size parameter is omitted, read everything"""
1494 if l is None:
1485 if l is None:
1495 return ''.join(self.iter)
1486 return ''.join(self.iter)
1496
1487
1497 left = l
1488 left = l
1498 buf = []
1489 buf = []
1499 queue = self._queue
1490 queue = self._queue
1500 while left > 0:
1491 while left > 0:
1501 # refill the queue
1492 # refill the queue
1502 if not queue:
1493 if not queue:
1503 target = 2**18
1494 target = 2**18
1504 for chunk in self.iter:
1495 for chunk in self.iter:
1505 queue.append(chunk)
1496 queue.append(chunk)
1506 target -= len(chunk)
1497 target -= len(chunk)
1507 if target <= 0:
1498 if target <= 0:
1508 break
1499 break
1509 if not queue:
1500 if not queue:
1510 break
1501 break
1511
1502
1512 # The easy way to do this would be to queue.popleft(), modify the
1503 # The easy way to do this would be to queue.popleft(), modify the
1513 # chunk (if necessary), then queue.appendleft(). However, for cases
1504 # chunk (if necessary), then queue.appendleft(). However, for cases
1514 # where we read partial chunk content, this incurs 2 dequeue
1505 # where we read partial chunk content, this incurs 2 dequeue
1515 # mutations and creates a new str for the remaining chunk in the
1506 # mutations and creates a new str for the remaining chunk in the
1516 # queue. Our code below avoids this overhead.
1507 # queue. Our code below avoids this overhead.
1517
1508
1518 chunk = queue[0]
1509 chunk = queue[0]
1519 chunkl = len(chunk)
1510 chunkl = len(chunk)
1520 offset = self._chunkoffset
1511 offset = self._chunkoffset
1521
1512
1522 # Use full chunk.
1513 # Use full chunk.
1523 if offset == 0 and left >= chunkl:
1514 if offset == 0 and left >= chunkl:
1524 left -= chunkl
1515 left -= chunkl
1525 queue.popleft()
1516 queue.popleft()
1526 buf.append(chunk)
1517 buf.append(chunk)
1527 # self._chunkoffset remains at 0.
1518 # self._chunkoffset remains at 0.
1528 continue
1519 continue
1529
1520
1530 chunkremaining = chunkl - offset
1521 chunkremaining = chunkl - offset
1531
1522
1532 # Use all of unconsumed part of chunk.
1523 # Use all of unconsumed part of chunk.
1533 if left >= chunkremaining:
1524 if left >= chunkremaining:
1534 left -= chunkremaining
1525 left -= chunkremaining
1535 queue.popleft()
1526 queue.popleft()
1536 # offset == 0 is enabled by block above, so this won't merely
1527 # offset == 0 is enabled by block above, so this won't merely
1537 # copy via ``chunk[0:]``.
1528 # copy via ``chunk[0:]``.
1538 buf.append(chunk[offset:])
1529 buf.append(chunk[offset:])
1539 self._chunkoffset = 0
1530 self._chunkoffset = 0
1540
1531
1541 # Partial chunk needed.
1532 # Partial chunk needed.
1542 else:
1533 else:
1543 buf.append(chunk[offset:offset + left])
1534 buf.append(chunk[offset:offset + left])
1544 self._chunkoffset += left
1535 self._chunkoffset += left
1545 left -= chunkremaining
1536 left -= chunkremaining
1546
1537
1547 return ''.join(buf)
1538 return ''.join(buf)
1548
1539
1549 def filechunkiter(f, size=65536, limit=None):
1540 def filechunkiter(f, size=65536, limit=None):
1550 """Create a generator that produces the data in the file size
1541 """Create a generator that produces the data in the file size
1551 (default 65536) bytes at a time, up to optional limit (default is
1542 (default 65536) bytes at a time, up to optional limit (default is
1552 to read all data). Chunks may be less than size bytes if the
1543 to read all data). Chunks may be less than size bytes if the
1553 chunk is the last chunk in the file, or the file is a socket or
1544 chunk is the last chunk in the file, or the file is a socket or
1554 some other type of file that sometimes reads less data than is
1545 some other type of file that sometimes reads less data than is
1555 requested."""
1546 requested."""
1556 assert size >= 0
1547 assert size >= 0
1557 assert limit is None or limit >= 0
1548 assert limit is None or limit >= 0
1558 while True:
1549 while True:
1559 if limit is None:
1550 if limit is None:
1560 nbytes = size
1551 nbytes = size
1561 else:
1552 else:
1562 nbytes = min(limit, size)
1553 nbytes = min(limit, size)
1563 s = nbytes and f.read(nbytes)
1554 s = nbytes and f.read(nbytes)
1564 if not s:
1555 if not s:
1565 break
1556 break
1566 if limit:
1557 if limit:
1567 limit -= len(s)
1558 limit -= len(s)
1568 yield s
1559 yield s
1569
1560
1570 def makedate(timestamp=None):
1561 def makedate(timestamp=None):
1571 '''Return a unix timestamp (or the current time) as a (unixtime,
1562 '''Return a unix timestamp (or the current time) as a (unixtime,
1572 offset) tuple based off the local timezone.'''
1563 offset) tuple based off the local timezone.'''
1573 if timestamp is None:
1564 if timestamp is None:
1574 timestamp = time.time()
1565 timestamp = time.time()
1575 if timestamp < 0:
1566 if timestamp < 0:
1576 hint = _("check your clock")
1567 hint = _("check your clock")
1577 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1568 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1578 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1569 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1579 datetime.datetime.fromtimestamp(timestamp))
1570 datetime.datetime.fromtimestamp(timestamp))
1580 tz = delta.days * 86400 + delta.seconds
1571 tz = delta.days * 86400 + delta.seconds
1581 return timestamp, tz
1572 return timestamp, tz
1582
1573
1583 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1574 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1584 """represent a (unixtime, offset) tuple as a localized time.
1575 """represent a (unixtime, offset) tuple as a localized time.
1585 unixtime is seconds since the epoch, and offset is the time zone's
1576 unixtime is seconds since the epoch, and offset is the time zone's
1586 number of seconds away from UTC. if timezone is false, do not
1577 number of seconds away from UTC. if timezone is false, do not
1587 append time zone to string."""
1578 append time zone to string."""
1588 t, tz = date or makedate()
1579 t, tz = date or makedate()
1589 if t < 0:
1580 if t < 0:
1590 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1581 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1591 tz = 0
1582 tz = 0
1592 if "%1" in format or "%2" in format or "%z" in format:
1583 if "%1" in format or "%2" in format or "%z" in format:
1593 sign = (tz > 0) and "-" or "+"
1584 sign = (tz > 0) and "-" or "+"
1594 minutes = abs(tz) // 60
1585 minutes = abs(tz) // 60
1595 q, r = divmod(minutes, 60)
1586 q, r = divmod(minutes, 60)
1596 format = format.replace("%z", "%1%2")
1587 format = format.replace("%z", "%1%2")
1597 format = format.replace("%1", "%c%02d" % (sign, q))
1588 format = format.replace("%1", "%c%02d" % (sign, q))
1598 format = format.replace("%2", "%02d" % r)
1589 format = format.replace("%2", "%02d" % r)
1599 try:
1590 try:
1600 t = time.gmtime(float(t) - tz)
1591 t = time.gmtime(float(t) - tz)
1601 except ValueError:
1592 except ValueError:
1602 # time was out of range
1593 # time was out of range
1603 t = time.gmtime(sys.maxint)
1594 t = time.gmtime(sys.maxint)
1604 s = time.strftime(format, t)
1595 s = time.strftime(format, t)
1605 return s
1596 return s
1606
1597
1607 def shortdate(date=None):
1598 def shortdate(date=None):
1608 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1599 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1609 return datestr(date, format='%Y-%m-%d')
1600 return datestr(date, format='%Y-%m-%d')
1610
1601
1611 def parsetimezone(tz):
1602 def parsetimezone(tz):
1612 """parse a timezone string and return an offset integer"""
1603 """parse a timezone string and return an offset integer"""
1613 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1604 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1614 sign = (tz[0] == "+") and 1 or -1
1605 sign = (tz[0] == "+") and 1 or -1
1615 hours = int(tz[1:3])
1606 hours = int(tz[1:3])
1616 minutes = int(tz[3:5])
1607 minutes = int(tz[3:5])
1617 return -sign * (hours * 60 + minutes) * 60
1608 return -sign * (hours * 60 + minutes) * 60
1618 if tz == "GMT" or tz == "UTC":
1609 if tz == "GMT" or tz == "UTC":
1619 return 0
1610 return 0
1620 return None
1611 return None
1621
1612
1622 def strdate(string, format, defaults=[]):
1613 def strdate(string, format, defaults=[]):
1623 """parse a localized time string and return a (unixtime, offset) tuple.
1614 """parse a localized time string and return a (unixtime, offset) tuple.
1624 if the string cannot be parsed, ValueError is raised."""
1615 if the string cannot be parsed, ValueError is raised."""
1625 # NOTE: unixtime = localunixtime + offset
1616 # NOTE: unixtime = localunixtime + offset
1626 offset, date = parsetimezone(string.split()[-1]), string
1617 offset, date = parsetimezone(string.split()[-1]), string
1627 if offset is not None:
1618 if offset is not None:
1628 date = " ".join(string.split()[:-1])
1619 date = " ".join(string.split()[:-1])
1629
1620
1630 # add missing elements from defaults
1621 # add missing elements from defaults
1631 usenow = False # default to using biased defaults
1622 usenow = False # default to using biased defaults
1632 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1623 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1633 found = [True for p in part if ("%"+p) in format]
1624 found = [True for p in part if ("%"+p) in format]
1634 if not found:
1625 if not found:
1635 date += "@" + defaults[part][usenow]
1626 date += "@" + defaults[part][usenow]
1636 format += "@%" + part[0]
1627 format += "@%" + part[0]
1637 else:
1628 else:
1638 # We've found a specific time element, less specific time
1629 # We've found a specific time element, less specific time
1639 # elements are relative to today
1630 # elements are relative to today
1640 usenow = True
1631 usenow = True
1641
1632
1642 timetuple = time.strptime(date, format)
1633 timetuple = time.strptime(date, format)
1643 localunixtime = int(calendar.timegm(timetuple))
1634 localunixtime = int(calendar.timegm(timetuple))
1644 if offset is None:
1635 if offset is None:
1645 # local timezone
1636 # local timezone
1646 unixtime = int(time.mktime(timetuple))
1637 unixtime = int(time.mktime(timetuple))
1647 offset = unixtime - localunixtime
1638 offset = unixtime - localunixtime
1648 else:
1639 else:
1649 unixtime = localunixtime + offset
1640 unixtime = localunixtime + offset
1650 return unixtime, offset
1641 return unixtime, offset
1651
1642
1652 def parsedate(date, formats=None, bias=None):
1643 def parsedate(date, formats=None, bias=None):
1653 """parse a localized date/time and return a (unixtime, offset) tuple.
1644 """parse a localized date/time and return a (unixtime, offset) tuple.
1654
1645
1655 The date may be a "unixtime offset" string or in one of the specified
1646 The date may be a "unixtime offset" string or in one of the specified
1656 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1647 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1657
1648
1658 >>> parsedate(' today ') == parsedate(\
1649 >>> parsedate(' today ') == parsedate(\
1659 datetime.date.today().strftime('%b %d'))
1650 datetime.date.today().strftime('%b %d'))
1660 True
1651 True
1661 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1652 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1662 datetime.timedelta(days=1)\
1653 datetime.timedelta(days=1)\
1663 ).strftime('%b %d'))
1654 ).strftime('%b %d'))
1664 True
1655 True
1665 >>> now, tz = makedate()
1656 >>> now, tz = makedate()
1666 >>> strnow, strtz = parsedate('now')
1657 >>> strnow, strtz = parsedate('now')
1667 >>> (strnow - now) < 1
1658 >>> (strnow - now) < 1
1668 True
1659 True
1669 >>> tz == strtz
1660 >>> tz == strtz
1670 True
1661 True
1671 """
1662 """
1672 if bias is None:
1663 if bias is None:
1673 bias = {}
1664 bias = {}
1674 if not date:
1665 if not date:
1675 return 0, 0
1666 return 0, 0
1676 if isinstance(date, tuple) and len(date) == 2:
1667 if isinstance(date, tuple) and len(date) == 2:
1677 return date
1668 return date
1678 if not formats:
1669 if not formats:
1679 formats = defaultdateformats
1670 formats = defaultdateformats
1680 date = date.strip()
1671 date = date.strip()
1681
1672
1682 if date == 'now' or date == _('now'):
1673 if date == 'now' or date == _('now'):
1683 return makedate()
1674 return makedate()
1684 if date == 'today' or date == _('today'):
1675 if date == 'today' or date == _('today'):
1685 date = datetime.date.today().strftime('%b %d')
1676 date = datetime.date.today().strftime('%b %d')
1686 elif date == 'yesterday' or date == _('yesterday'):
1677 elif date == 'yesterday' or date == _('yesterday'):
1687 date = (datetime.date.today() -
1678 date = (datetime.date.today() -
1688 datetime.timedelta(days=1)).strftime('%b %d')
1679 datetime.timedelta(days=1)).strftime('%b %d')
1689
1680
1690 try:
1681 try:
1691 when, offset = map(int, date.split(' '))
1682 when, offset = map(int, date.split(' '))
1692 except ValueError:
1683 except ValueError:
1693 # fill out defaults
1684 # fill out defaults
1694 now = makedate()
1685 now = makedate()
1695 defaults = {}
1686 defaults = {}
1696 for part in ("d", "mb", "yY", "HI", "M", "S"):
1687 for part in ("d", "mb", "yY", "HI", "M", "S"):
1697 # this piece is for rounding the specific end of unknowns
1688 # this piece is for rounding the specific end of unknowns
1698 b = bias.get(part)
1689 b = bias.get(part)
1699 if b is None:
1690 if b is None:
1700 if part[0] in "HMS":
1691 if part[0] in "HMS":
1701 b = "00"
1692 b = "00"
1702 else:
1693 else:
1703 b = "0"
1694 b = "0"
1704
1695
1705 # this piece is for matching the generic end to today's date
1696 # this piece is for matching the generic end to today's date
1706 n = datestr(now, "%" + part[0])
1697 n = datestr(now, "%" + part[0])
1707
1698
1708 defaults[part] = (b, n)
1699 defaults[part] = (b, n)
1709
1700
1710 for format in formats:
1701 for format in formats:
1711 try:
1702 try:
1712 when, offset = strdate(date, format, defaults)
1703 when, offset = strdate(date, format, defaults)
1713 except (ValueError, OverflowError):
1704 except (ValueError, OverflowError):
1714 pass
1705 pass
1715 else:
1706 else:
1716 break
1707 break
1717 else:
1708 else:
1718 raise Abort(_('invalid date: %r') % date)
1709 raise Abort(_('invalid date: %r') % date)
1719 # validate explicit (probably user-specified) date and
1710 # validate explicit (probably user-specified) date and
1720 # time zone offset. values must fit in signed 32 bits for
1711 # time zone offset. values must fit in signed 32 bits for
1721 # current 32-bit linux runtimes. timezones go from UTC-12
1712 # current 32-bit linux runtimes. timezones go from UTC-12
1722 # to UTC+14
1713 # to UTC+14
1723 if abs(when) > 0x7fffffff:
1714 if abs(when) > 0x7fffffff:
1724 raise Abort(_('date exceeds 32 bits: %d') % when)
1715 raise Abort(_('date exceeds 32 bits: %d') % when)
1725 if when < 0:
1716 if when < 0:
1726 raise Abort(_('negative date value: %d') % when)
1717 raise Abort(_('negative date value: %d') % when)
1727 if offset < -50400 or offset > 43200:
1718 if offset < -50400 or offset > 43200:
1728 raise Abort(_('impossible time zone offset: %d') % offset)
1719 raise Abort(_('impossible time zone offset: %d') % offset)
1729 return when, offset
1720 return when, offset
1730
1721
1731 def matchdate(date):
1722 def matchdate(date):
1732 """Return a function that matches a given date match specifier
1723 """Return a function that matches a given date match specifier
1733
1724
1734 Formats include:
1725 Formats include:
1735
1726
1736 '{date}' match a given date to the accuracy provided
1727 '{date}' match a given date to the accuracy provided
1737
1728
1738 '<{date}' on or before a given date
1729 '<{date}' on or before a given date
1739
1730
1740 '>{date}' on or after a given date
1731 '>{date}' on or after a given date
1741
1732
1742 >>> p1 = parsedate("10:29:59")
1733 >>> p1 = parsedate("10:29:59")
1743 >>> p2 = parsedate("10:30:00")
1734 >>> p2 = parsedate("10:30:00")
1744 >>> p3 = parsedate("10:30:59")
1735 >>> p3 = parsedate("10:30:59")
1745 >>> p4 = parsedate("10:31:00")
1736 >>> p4 = parsedate("10:31:00")
1746 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1737 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1747 >>> f = matchdate("10:30")
1738 >>> f = matchdate("10:30")
1748 >>> f(p1[0])
1739 >>> f(p1[0])
1749 False
1740 False
1750 >>> f(p2[0])
1741 >>> f(p2[0])
1751 True
1742 True
1752 >>> f(p3[0])
1743 >>> f(p3[0])
1753 True
1744 True
1754 >>> f(p4[0])
1745 >>> f(p4[0])
1755 False
1746 False
1756 >>> f(p5[0])
1747 >>> f(p5[0])
1757 False
1748 False
1758 """
1749 """
1759
1750
1760 def lower(date):
1751 def lower(date):
1761 d = {'mb': "1", 'd': "1"}
1752 d = {'mb': "1", 'd': "1"}
1762 return parsedate(date, extendeddateformats, d)[0]
1753 return parsedate(date, extendeddateformats, d)[0]
1763
1754
1764 def upper(date):
1755 def upper(date):
1765 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1756 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1766 for days in ("31", "30", "29"):
1757 for days in ("31", "30", "29"):
1767 try:
1758 try:
1768 d["d"] = days
1759 d["d"] = days
1769 return parsedate(date, extendeddateformats, d)[0]
1760 return parsedate(date, extendeddateformats, d)[0]
1770 except Abort:
1761 except Abort:
1771 pass
1762 pass
1772 d["d"] = "28"
1763 d["d"] = "28"
1773 return parsedate(date, extendeddateformats, d)[0]
1764 return parsedate(date, extendeddateformats, d)[0]
1774
1765
1775 date = date.strip()
1766 date = date.strip()
1776
1767
1777 if not date:
1768 if not date:
1778 raise Abort(_("dates cannot consist entirely of whitespace"))
1769 raise Abort(_("dates cannot consist entirely of whitespace"))
1779 elif date[0] == "<":
1770 elif date[0] == "<":
1780 if not date[1:]:
1771 if not date[1:]:
1781 raise Abort(_("invalid day spec, use '<DATE'"))
1772 raise Abort(_("invalid day spec, use '<DATE'"))
1782 when = upper(date[1:])
1773 when = upper(date[1:])
1783 return lambda x: x <= when
1774 return lambda x: x <= when
1784 elif date[0] == ">":
1775 elif date[0] == ">":
1785 if not date[1:]:
1776 if not date[1:]:
1786 raise Abort(_("invalid day spec, use '>DATE'"))
1777 raise Abort(_("invalid day spec, use '>DATE'"))
1787 when = lower(date[1:])
1778 when = lower(date[1:])
1788 return lambda x: x >= when
1779 return lambda x: x >= when
1789 elif date[0] == "-":
1780 elif date[0] == "-":
1790 try:
1781 try:
1791 days = int(date[1:])
1782 days = int(date[1:])
1792 except ValueError:
1783 except ValueError:
1793 raise Abort(_("invalid day spec: %s") % date[1:])
1784 raise Abort(_("invalid day spec: %s") % date[1:])
1794 if days < 0:
1785 if days < 0:
1795 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1786 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1796 % date[1:])
1787 % date[1:])
1797 when = makedate()[0] - days * 3600 * 24
1788 when = makedate()[0] - days * 3600 * 24
1798 return lambda x: x >= when
1789 return lambda x: x >= when
1799 elif " to " in date:
1790 elif " to " in date:
1800 a, b = date.split(" to ")
1791 a, b = date.split(" to ")
1801 start, stop = lower(a), upper(b)
1792 start, stop = lower(a), upper(b)
1802 return lambda x: x >= start and x <= stop
1793 return lambda x: x >= start and x <= stop
1803 else:
1794 else:
1804 start, stop = lower(date), upper(date)
1795 start, stop = lower(date), upper(date)
1805 return lambda x: x >= start and x <= stop
1796 return lambda x: x >= start and x <= stop
1806
1797
1807 def stringmatcher(pattern):
1798 def stringmatcher(pattern):
1808 """
1799 """
1809 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1800 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1810 returns the matcher name, pattern, and matcher function.
1801 returns the matcher name, pattern, and matcher function.
1811 missing or unknown prefixes are treated as literal matches.
1802 missing or unknown prefixes are treated as literal matches.
1812
1803
1813 helper for tests:
1804 helper for tests:
1814 >>> def test(pattern, *tests):
1805 >>> def test(pattern, *tests):
1815 ... kind, pattern, matcher = stringmatcher(pattern)
1806 ... kind, pattern, matcher = stringmatcher(pattern)
1816 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1807 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1817
1808
1818 exact matching (no prefix):
1809 exact matching (no prefix):
1819 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1810 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1820 ('literal', 'abcdefg', [False, False, True])
1811 ('literal', 'abcdefg', [False, False, True])
1821
1812
1822 regex matching ('re:' prefix)
1813 regex matching ('re:' prefix)
1823 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1814 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1824 ('re', 'a.+b', [False, False, True])
1815 ('re', 'a.+b', [False, False, True])
1825
1816
1826 force exact matches ('literal:' prefix)
1817 force exact matches ('literal:' prefix)
1827 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1818 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1828 ('literal', 're:foobar', [False, True])
1819 ('literal', 're:foobar', [False, True])
1829
1820
1830 unknown prefixes are ignored and treated as literals
1821 unknown prefixes are ignored and treated as literals
1831 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1822 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1832 ('literal', 'foo:bar', [False, False, True])
1823 ('literal', 'foo:bar', [False, False, True])
1833 """
1824 """
1834 if pattern.startswith('re:'):
1825 if pattern.startswith('re:'):
1835 pattern = pattern[3:]
1826 pattern = pattern[3:]
1836 try:
1827 try:
1837 regex = remod.compile(pattern)
1828 regex = remod.compile(pattern)
1838 except remod.error as e:
1829 except remod.error as e:
1839 raise error.ParseError(_('invalid regular expression: %s')
1830 raise error.ParseError(_('invalid regular expression: %s')
1840 % e)
1831 % e)
1841 return 're', pattern, regex.search
1832 return 're', pattern, regex.search
1842 elif pattern.startswith('literal:'):
1833 elif pattern.startswith('literal:'):
1843 pattern = pattern[8:]
1834 pattern = pattern[8:]
1844 return 'literal', pattern, pattern.__eq__
1835 return 'literal', pattern, pattern.__eq__
1845
1836
1846 def shortuser(user):
1837 def shortuser(user):
1847 """Return a short representation of a user name or email address."""
1838 """Return a short representation of a user name or email address."""
1848 f = user.find('@')
1839 f = user.find('@')
1849 if f >= 0:
1840 if f >= 0:
1850 user = user[:f]
1841 user = user[:f]
1851 f = user.find('<')
1842 f = user.find('<')
1852 if f >= 0:
1843 if f >= 0:
1853 user = user[f + 1:]
1844 user = user[f + 1:]
1854 f = user.find(' ')
1845 f = user.find(' ')
1855 if f >= 0:
1846 if f >= 0:
1856 user = user[:f]
1847 user = user[:f]
1857 f = user.find('.')
1848 f = user.find('.')
1858 if f >= 0:
1849 if f >= 0:
1859 user = user[:f]
1850 user = user[:f]
1860 return user
1851 return user
1861
1852
1862 def emailuser(user):
1853 def emailuser(user):
1863 """Return the user portion of an email address."""
1854 """Return the user portion of an email address."""
1864 f = user.find('@')
1855 f = user.find('@')
1865 if f >= 0:
1856 if f >= 0:
1866 user = user[:f]
1857 user = user[:f]
1867 f = user.find('<')
1858 f = user.find('<')
1868 if f >= 0:
1859 if f >= 0:
1869 user = user[f + 1:]
1860 user = user[f + 1:]
1870 return user
1861 return user
1871
1862
1872 def email(author):
1863 def email(author):
1873 '''get email of author.'''
1864 '''get email of author.'''
1874 r = author.find('>')
1865 r = author.find('>')
1875 if r == -1:
1866 if r == -1:
1876 r = None
1867 r = None
1877 return author[author.find('<') + 1:r]
1868 return author[author.find('<') + 1:r]
1878
1869
1879 def ellipsis(text, maxlength=400):
1870 def ellipsis(text, maxlength=400):
1880 """Trim string to at most maxlength (default: 400) columns in display."""
1871 """Trim string to at most maxlength (default: 400) columns in display."""
1881 return encoding.trim(text, maxlength, ellipsis='...')
1872 return encoding.trim(text, maxlength, ellipsis='...')
1882
1873
1883 def unitcountfn(*unittable):
1874 def unitcountfn(*unittable):
1884 '''return a function that renders a readable count of some quantity'''
1875 '''return a function that renders a readable count of some quantity'''
1885
1876
1886 def go(count):
1877 def go(count):
1887 for multiplier, divisor, format in unittable:
1878 for multiplier, divisor, format in unittable:
1888 if count >= divisor * multiplier:
1879 if count >= divisor * multiplier:
1889 return format % (count / float(divisor))
1880 return format % (count / float(divisor))
1890 return unittable[-1][2] % count
1881 return unittable[-1][2] % count
1891
1882
1892 return go
1883 return go
1893
1884
1894 bytecount = unitcountfn(
1885 bytecount = unitcountfn(
1895 (100, 1 << 30, _('%.0f GB')),
1886 (100, 1 << 30, _('%.0f GB')),
1896 (10, 1 << 30, _('%.1f GB')),
1887 (10, 1 << 30, _('%.1f GB')),
1897 (1, 1 << 30, _('%.2f GB')),
1888 (1, 1 << 30, _('%.2f GB')),
1898 (100, 1 << 20, _('%.0f MB')),
1889 (100, 1 << 20, _('%.0f MB')),
1899 (10, 1 << 20, _('%.1f MB')),
1890 (10, 1 << 20, _('%.1f MB')),
1900 (1, 1 << 20, _('%.2f MB')),
1891 (1, 1 << 20, _('%.2f MB')),
1901 (100, 1 << 10, _('%.0f KB')),
1892 (100, 1 << 10, _('%.0f KB')),
1902 (10, 1 << 10, _('%.1f KB')),
1893 (10, 1 << 10, _('%.1f KB')),
1903 (1, 1 << 10, _('%.2f KB')),
1894 (1, 1 << 10, _('%.2f KB')),
1904 (1, 1, _('%.0f bytes')),
1895 (1, 1, _('%.0f bytes')),
1905 )
1896 )
1906
1897
1907 def uirepr(s):
1898 def uirepr(s):
1908 # Avoid double backslash in Windows path repr()
1899 # Avoid double backslash in Windows path repr()
1909 return repr(s).replace('\\\\', '\\')
1900 return repr(s).replace('\\\\', '\\')
1910
1901
1911 # delay import of textwrap
1902 # delay import of textwrap
1912 def MBTextWrapper(**kwargs):
1903 def MBTextWrapper(**kwargs):
1913 class tw(textwrap.TextWrapper):
1904 class tw(textwrap.TextWrapper):
1914 """
1905 """
1915 Extend TextWrapper for width-awareness.
1906 Extend TextWrapper for width-awareness.
1916
1907
1917 Neither number of 'bytes' in any encoding nor 'characters' is
1908 Neither number of 'bytes' in any encoding nor 'characters' is
1918 appropriate to calculate terminal columns for specified string.
1909 appropriate to calculate terminal columns for specified string.
1919
1910
1920 Original TextWrapper implementation uses built-in 'len()' directly,
1911 Original TextWrapper implementation uses built-in 'len()' directly,
1921 so overriding is needed to use width information of each characters.
1912 so overriding is needed to use width information of each characters.
1922
1913
1923 In addition, characters classified into 'ambiguous' width are
1914 In addition, characters classified into 'ambiguous' width are
1924 treated as wide in East Asian area, but as narrow in other.
1915 treated as wide in East Asian area, but as narrow in other.
1925
1916
1926 This requires use decision to determine width of such characters.
1917 This requires use decision to determine width of such characters.
1927 """
1918 """
1928 def _cutdown(self, ucstr, space_left):
1919 def _cutdown(self, ucstr, space_left):
1929 l = 0
1920 l = 0
1930 colwidth = encoding.ucolwidth
1921 colwidth = encoding.ucolwidth
1931 for i in xrange(len(ucstr)):
1922 for i in xrange(len(ucstr)):
1932 l += colwidth(ucstr[i])
1923 l += colwidth(ucstr[i])
1933 if space_left < l:
1924 if space_left < l:
1934 return (ucstr[:i], ucstr[i:])
1925 return (ucstr[:i], ucstr[i:])
1935 return ucstr, ''
1926 return ucstr, ''
1936
1927
1937 # overriding of base class
1928 # overriding of base class
1938 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1929 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1939 space_left = max(width - cur_len, 1)
1930 space_left = max(width - cur_len, 1)
1940
1931
1941 if self.break_long_words:
1932 if self.break_long_words:
1942 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1933 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1943 cur_line.append(cut)
1934 cur_line.append(cut)
1944 reversed_chunks[-1] = res
1935 reversed_chunks[-1] = res
1945 elif not cur_line:
1936 elif not cur_line:
1946 cur_line.append(reversed_chunks.pop())
1937 cur_line.append(reversed_chunks.pop())
1947
1938
1948 # this overriding code is imported from TextWrapper of Python 2.6
1939 # this overriding code is imported from TextWrapper of Python 2.6
1949 # to calculate columns of string by 'encoding.ucolwidth()'
1940 # to calculate columns of string by 'encoding.ucolwidth()'
1950 def _wrap_chunks(self, chunks):
1941 def _wrap_chunks(self, chunks):
1951 colwidth = encoding.ucolwidth
1942 colwidth = encoding.ucolwidth
1952
1943
1953 lines = []
1944 lines = []
1954 if self.width <= 0:
1945 if self.width <= 0:
1955 raise ValueError("invalid width %r (must be > 0)" % self.width)
1946 raise ValueError("invalid width %r (must be > 0)" % self.width)
1956
1947
1957 # Arrange in reverse order so items can be efficiently popped
1948 # Arrange in reverse order so items can be efficiently popped
1958 # from a stack of chucks.
1949 # from a stack of chucks.
1959 chunks.reverse()
1950 chunks.reverse()
1960
1951
1961 while chunks:
1952 while chunks:
1962
1953
1963 # Start the list of chunks that will make up the current line.
1954 # Start the list of chunks that will make up the current line.
1964 # cur_len is just the length of all the chunks in cur_line.
1955 # cur_len is just the length of all the chunks in cur_line.
1965 cur_line = []
1956 cur_line = []
1966 cur_len = 0
1957 cur_len = 0
1967
1958
1968 # Figure out which static string will prefix this line.
1959 # Figure out which static string will prefix this line.
1969 if lines:
1960 if lines:
1970 indent = self.subsequent_indent
1961 indent = self.subsequent_indent
1971 else:
1962 else:
1972 indent = self.initial_indent
1963 indent = self.initial_indent
1973
1964
1974 # Maximum width for this line.
1965 # Maximum width for this line.
1975 width = self.width - len(indent)
1966 width = self.width - len(indent)
1976
1967
1977 # First chunk on line is whitespace -- drop it, unless this
1968 # First chunk on line is whitespace -- drop it, unless this
1978 # is the very beginning of the text (i.e. no lines started yet).
1969 # is the very beginning of the text (i.e. no lines started yet).
1979 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1970 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1980 del chunks[-1]
1971 del chunks[-1]
1981
1972
1982 while chunks:
1973 while chunks:
1983 l = colwidth(chunks[-1])
1974 l = colwidth(chunks[-1])
1984
1975
1985 # Can at least squeeze this chunk onto the current line.
1976 # Can at least squeeze this chunk onto the current line.
1986 if cur_len + l <= width:
1977 if cur_len + l <= width:
1987 cur_line.append(chunks.pop())
1978 cur_line.append(chunks.pop())
1988 cur_len += l
1979 cur_len += l
1989
1980
1990 # Nope, this line is full.
1981 # Nope, this line is full.
1991 else:
1982 else:
1992 break
1983 break
1993
1984
1994 # The current line is full, and the next chunk is too big to
1985 # The current line is full, and the next chunk is too big to
1995 # fit on *any* line (not just this one).
1986 # fit on *any* line (not just this one).
1996 if chunks and colwidth(chunks[-1]) > width:
1987 if chunks and colwidth(chunks[-1]) > width:
1997 self._handle_long_word(chunks, cur_line, cur_len, width)
1988 self._handle_long_word(chunks, cur_line, cur_len, width)
1998
1989
1999 # If the last chunk on this line is all whitespace, drop it.
1990 # If the last chunk on this line is all whitespace, drop it.
2000 if (self.drop_whitespace and
1991 if (self.drop_whitespace and
2001 cur_line and cur_line[-1].strip() == ''):
1992 cur_line and cur_line[-1].strip() == ''):
2002 del cur_line[-1]
1993 del cur_line[-1]
2003
1994
2004 # Convert current line back to a string and store it in list
1995 # Convert current line back to a string and store it in list
2005 # of all lines (return value).
1996 # of all lines (return value).
2006 if cur_line:
1997 if cur_line:
2007 lines.append(indent + ''.join(cur_line))
1998 lines.append(indent + ''.join(cur_line))
2008
1999
2009 return lines
2000 return lines
2010
2001
2011 global MBTextWrapper
2002 global MBTextWrapper
2012 MBTextWrapper = tw
2003 MBTextWrapper = tw
2013 return tw(**kwargs)
2004 return tw(**kwargs)
2014
2005
2015 def wrap(line, width, initindent='', hangindent=''):
2006 def wrap(line, width, initindent='', hangindent=''):
2016 maxindent = max(len(hangindent), len(initindent))
2007 maxindent = max(len(hangindent), len(initindent))
2017 if width <= maxindent:
2008 if width <= maxindent:
2018 # adjust for weird terminal size
2009 # adjust for weird terminal size
2019 width = max(78, maxindent + 1)
2010 width = max(78, maxindent + 1)
2020 line = line.decode(encoding.encoding, encoding.encodingmode)
2011 line = line.decode(encoding.encoding, encoding.encodingmode)
2021 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2012 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2022 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2013 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2023 wrapper = MBTextWrapper(width=width,
2014 wrapper = MBTextWrapper(width=width,
2024 initial_indent=initindent,
2015 initial_indent=initindent,
2025 subsequent_indent=hangindent)
2016 subsequent_indent=hangindent)
2026 return wrapper.fill(line).encode(encoding.encoding)
2017 return wrapper.fill(line).encode(encoding.encoding)
2027
2018
2028 def iterlines(iterator):
2019 def iterlines(iterator):
2029 for chunk in iterator:
2020 for chunk in iterator:
2030 for line in chunk.splitlines():
2021 for line in chunk.splitlines():
2031 yield line
2022 yield line
2032
2023
2033 def expandpath(path):
2024 def expandpath(path):
2034 return os.path.expanduser(os.path.expandvars(path))
2025 return os.path.expanduser(os.path.expandvars(path))
2035
2026
2036 def hgcmd():
2027 def hgcmd():
2037 """Return the command used to execute current hg
2028 """Return the command used to execute current hg
2038
2029
2039 This is different from hgexecutable() because on Windows we want
2030 This is different from hgexecutable() because on Windows we want
2040 to avoid things opening new shell windows like batch files, so we
2031 to avoid things opening new shell windows like batch files, so we
2041 get either the python call or current executable.
2032 get either the python call or current executable.
2042 """
2033 """
2043 if mainfrozen():
2034 if mainfrozen():
2044 if getattr(sys, 'frozen', None) == 'macosx_app':
2035 if getattr(sys, 'frozen', None) == 'macosx_app':
2045 # Env variable set by py2app
2036 # Env variable set by py2app
2046 return [os.environ['EXECUTABLEPATH']]
2037 return [os.environ['EXECUTABLEPATH']]
2047 else:
2038 else:
2048 return [sys.executable]
2039 return [sys.executable]
2049 return gethgcmd()
2040 return gethgcmd()
2050
2041
2051 def rundetached(args, condfn):
2042 def rundetached(args, condfn):
2052 """Execute the argument list in a detached process.
2043 """Execute the argument list in a detached process.
2053
2044
2054 condfn is a callable which is called repeatedly and should return
2045 condfn is a callable which is called repeatedly and should return
2055 True once the child process is known to have started successfully.
2046 True once the child process is known to have started successfully.
2056 At this point, the child process PID is returned. If the child
2047 At this point, the child process PID is returned. If the child
2057 process fails to start or finishes before condfn() evaluates to
2048 process fails to start or finishes before condfn() evaluates to
2058 True, return -1.
2049 True, return -1.
2059 """
2050 """
2060 # Windows case is easier because the child process is either
2051 # Windows case is easier because the child process is either
2061 # successfully starting and validating the condition or exiting
2052 # successfully starting and validating the condition or exiting
2062 # on failure. We just poll on its PID. On Unix, if the child
2053 # on failure. We just poll on its PID. On Unix, if the child
2063 # process fails to start, it will be left in a zombie state until
2054 # process fails to start, it will be left in a zombie state until
2064 # the parent wait on it, which we cannot do since we expect a long
2055 # the parent wait on it, which we cannot do since we expect a long
2065 # running process on success. Instead we listen for SIGCHLD telling
2056 # running process on success. Instead we listen for SIGCHLD telling
2066 # us our child process terminated.
2057 # us our child process terminated.
2067 terminated = set()
2058 terminated = set()
2068 def handler(signum, frame):
2059 def handler(signum, frame):
2069 terminated.add(os.wait())
2060 terminated.add(os.wait())
2070 prevhandler = None
2061 prevhandler = None
2071 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2062 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2072 if SIGCHLD is not None:
2063 if SIGCHLD is not None:
2073 prevhandler = signal.signal(SIGCHLD, handler)
2064 prevhandler = signal.signal(SIGCHLD, handler)
2074 try:
2065 try:
2075 pid = spawndetached(args)
2066 pid = spawndetached(args)
2076 while not condfn():
2067 while not condfn():
2077 if ((pid in terminated or not testpid(pid))
2068 if ((pid in terminated or not testpid(pid))
2078 and not condfn()):
2069 and not condfn()):
2079 return -1
2070 return -1
2080 time.sleep(0.1)
2071 time.sleep(0.1)
2081 return pid
2072 return pid
2082 finally:
2073 finally:
2083 if prevhandler is not None:
2074 if prevhandler is not None:
2084 signal.signal(signal.SIGCHLD, prevhandler)
2075 signal.signal(signal.SIGCHLD, prevhandler)
2085
2076
2086 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2077 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2087 """Return the result of interpolating items in the mapping into string s.
2078 """Return the result of interpolating items in the mapping into string s.
2088
2079
2089 prefix is a single character string, or a two character string with
2080 prefix is a single character string, or a two character string with
2090 a backslash as the first character if the prefix needs to be escaped in
2081 a backslash as the first character if the prefix needs to be escaped in
2091 a regular expression.
2082 a regular expression.
2092
2083
2093 fn is an optional function that will be applied to the replacement text
2084 fn is an optional function that will be applied to the replacement text
2094 just before replacement.
2085 just before replacement.
2095
2086
2096 escape_prefix is an optional flag that allows using doubled prefix for
2087 escape_prefix is an optional flag that allows using doubled prefix for
2097 its escaping.
2088 its escaping.
2098 """
2089 """
2099 fn = fn or (lambda s: s)
2090 fn = fn or (lambda s: s)
2100 patterns = '|'.join(mapping.keys())
2091 patterns = '|'.join(mapping.keys())
2101 if escape_prefix:
2092 if escape_prefix:
2102 patterns += '|' + prefix
2093 patterns += '|' + prefix
2103 if len(prefix) > 1:
2094 if len(prefix) > 1:
2104 prefix_char = prefix[1:]
2095 prefix_char = prefix[1:]
2105 else:
2096 else:
2106 prefix_char = prefix
2097 prefix_char = prefix
2107 mapping[prefix_char] = prefix_char
2098 mapping[prefix_char] = prefix_char
2108 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2099 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2109 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2100 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2110
2101
2111 def getport(port):
2102 def getport(port):
2112 """Return the port for a given network service.
2103 """Return the port for a given network service.
2113
2104
2114 If port is an integer, it's returned as is. If it's a string, it's
2105 If port is an integer, it's returned as is. If it's a string, it's
2115 looked up using socket.getservbyname(). If there's no matching
2106 looked up using socket.getservbyname(). If there's no matching
2116 service, error.Abort is raised.
2107 service, error.Abort is raised.
2117 """
2108 """
2118 try:
2109 try:
2119 return int(port)
2110 return int(port)
2120 except ValueError:
2111 except ValueError:
2121 pass
2112 pass
2122
2113
2123 try:
2114 try:
2124 return socket.getservbyname(port)
2115 return socket.getservbyname(port)
2125 except socket.error:
2116 except socket.error:
2126 raise Abort(_("no port number associated with service '%s'") % port)
2117 raise Abort(_("no port number associated with service '%s'") % port)
2127
2118
2128 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2119 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2129 '0': False, 'no': False, 'false': False, 'off': False,
2120 '0': False, 'no': False, 'false': False, 'off': False,
2130 'never': False}
2121 'never': False}
2131
2122
2132 def parsebool(s):
2123 def parsebool(s):
2133 """Parse s into a boolean.
2124 """Parse s into a boolean.
2134
2125
2135 If s is not a valid boolean, returns None.
2126 If s is not a valid boolean, returns None.
2136 """
2127 """
2137 return _booleans.get(s.lower(), None)
2128 return _booleans.get(s.lower(), None)
2138
2129
2139 _hexdig = '0123456789ABCDEFabcdef'
2130 _hexdig = '0123456789ABCDEFabcdef'
2140 _hextochr = dict((a + b, chr(int(a + b, 16)))
2131 _hextochr = dict((a + b, chr(int(a + b, 16)))
2141 for a in _hexdig for b in _hexdig)
2132 for a in _hexdig for b in _hexdig)
2142
2133
2143 def _urlunquote(s):
2134 def _urlunquote(s):
2144 """Decode HTTP/HTML % encoding.
2135 """Decode HTTP/HTML % encoding.
2145
2136
2146 >>> _urlunquote('abc%20def')
2137 >>> _urlunquote('abc%20def')
2147 'abc def'
2138 'abc def'
2148 """
2139 """
2149 res = s.split('%')
2140 res = s.split('%')
2150 # fastpath
2141 # fastpath
2151 if len(res) == 1:
2142 if len(res) == 1:
2152 return s
2143 return s
2153 s = res[0]
2144 s = res[0]
2154 for item in res[1:]:
2145 for item in res[1:]:
2155 try:
2146 try:
2156 s += _hextochr[item[:2]] + item[2:]
2147 s += _hextochr[item[:2]] + item[2:]
2157 except KeyError:
2148 except KeyError:
2158 s += '%' + item
2149 s += '%' + item
2159 except UnicodeDecodeError:
2150 except UnicodeDecodeError:
2160 s += unichr(int(item[:2], 16)) + item[2:]
2151 s += unichr(int(item[:2], 16)) + item[2:]
2161 return s
2152 return s
2162
2153
2163 class url(object):
2154 class url(object):
2164 r"""Reliable URL parser.
2155 r"""Reliable URL parser.
2165
2156
2166 This parses URLs and provides attributes for the following
2157 This parses URLs and provides attributes for the following
2167 components:
2158 components:
2168
2159
2169 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2160 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2170
2161
2171 Missing components are set to None. The only exception is
2162 Missing components are set to None. The only exception is
2172 fragment, which is set to '' if present but empty.
2163 fragment, which is set to '' if present but empty.
2173
2164
2174 If parsefragment is False, fragment is included in query. If
2165 If parsefragment is False, fragment is included in query. If
2175 parsequery is False, query is included in path. If both are
2166 parsequery is False, query is included in path. If both are
2176 False, both fragment and query are included in path.
2167 False, both fragment and query are included in path.
2177
2168
2178 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2169 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2179
2170
2180 Note that for backward compatibility reasons, bundle URLs do not
2171 Note that for backward compatibility reasons, bundle URLs do not
2181 take host names. That means 'bundle://../' has a path of '../'.
2172 take host names. That means 'bundle://../' has a path of '../'.
2182
2173
2183 Examples:
2174 Examples:
2184
2175
2185 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2176 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2186 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2177 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2187 >>> url('ssh://[::1]:2200//home/joe/repo')
2178 >>> url('ssh://[::1]:2200//home/joe/repo')
2188 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2179 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2189 >>> url('file:///home/joe/repo')
2180 >>> url('file:///home/joe/repo')
2190 <url scheme: 'file', path: '/home/joe/repo'>
2181 <url scheme: 'file', path: '/home/joe/repo'>
2191 >>> url('file:///c:/temp/foo/')
2182 >>> url('file:///c:/temp/foo/')
2192 <url scheme: 'file', path: 'c:/temp/foo/'>
2183 <url scheme: 'file', path: 'c:/temp/foo/'>
2193 >>> url('bundle:foo')
2184 >>> url('bundle:foo')
2194 <url scheme: 'bundle', path: 'foo'>
2185 <url scheme: 'bundle', path: 'foo'>
2195 >>> url('bundle://../foo')
2186 >>> url('bundle://../foo')
2196 <url scheme: 'bundle', path: '../foo'>
2187 <url scheme: 'bundle', path: '../foo'>
2197 >>> url(r'c:\foo\bar')
2188 >>> url(r'c:\foo\bar')
2198 <url path: 'c:\\foo\\bar'>
2189 <url path: 'c:\\foo\\bar'>
2199 >>> url(r'\\blah\blah\blah')
2190 >>> url(r'\\blah\blah\blah')
2200 <url path: '\\\\blah\\blah\\blah'>
2191 <url path: '\\\\blah\\blah\\blah'>
2201 >>> url(r'\\blah\blah\blah#baz')
2192 >>> url(r'\\blah\blah\blah#baz')
2202 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2193 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2203 >>> url(r'file:///C:\users\me')
2194 >>> url(r'file:///C:\users\me')
2204 <url scheme: 'file', path: 'C:\\users\\me'>
2195 <url scheme: 'file', path: 'C:\\users\\me'>
2205
2196
2206 Authentication credentials:
2197 Authentication credentials:
2207
2198
2208 >>> url('ssh://joe:xyz@x/repo')
2199 >>> url('ssh://joe:xyz@x/repo')
2209 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2200 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2210 >>> url('ssh://joe@x/repo')
2201 >>> url('ssh://joe@x/repo')
2211 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2202 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2212
2203
2213 Query strings and fragments:
2204 Query strings and fragments:
2214
2205
2215 >>> url('http://host/a?b#c')
2206 >>> url('http://host/a?b#c')
2216 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2207 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2217 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2208 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2218 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2209 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2219 """
2210 """
2220
2211
2221 _safechars = "!~*'()+"
2212 _safechars = "!~*'()+"
2222 _safepchars = "/!~*'()+:\\"
2213 _safepchars = "/!~*'()+:\\"
2223 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2214 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2224
2215
2225 def __init__(self, path, parsequery=True, parsefragment=True):
2216 def __init__(self, path, parsequery=True, parsefragment=True):
2226 # We slowly chomp away at path until we have only the path left
2217 # We slowly chomp away at path until we have only the path left
2227 self.scheme = self.user = self.passwd = self.host = None
2218 self.scheme = self.user = self.passwd = self.host = None
2228 self.port = self.path = self.query = self.fragment = None
2219 self.port = self.path = self.query = self.fragment = None
2229 self._localpath = True
2220 self._localpath = True
2230 self._hostport = ''
2221 self._hostport = ''
2231 self._origpath = path
2222 self._origpath = path
2232
2223
2233 if parsefragment and '#' in path:
2224 if parsefragment and '#' in path:
2234 path, self.fragment = path.split('#', 1)
2225 path, self.fragment = path.split('#', 1)
2235 if not path:
2226 if not path:
2236 path = None
2227 path = None
2237
2228
2238 # special case for Windows drive letters and UNC paths
2229 # special case for Windows drive letters and UNC paths
2239 if hasdriveletter(path) or path.startswith(r'\\'):
2230 if hasdriveletter(path) or path.startswith(r'\\'):
2240 self.path = path
2231 self.path = path
2241 return
2232 return
2242
2233
2243 # For compatibility reasons, we can't handle bundle paths as
2234 # For compatibility reasons, we can't handle bundle paths as
2244 # normal URLS
2235 # normal URLS
2245 if path.startswith('bundle:'):
2236 if path.startswith('bundle:'):
2246 self.scheme = 'bundle'
2237 self.scheme = 'bundle'
2247 path = path[7:]
2238 path = path[7:]
2248 if path.startswith('//'):
2239 if path.startswith('//'):
2249 path = path[2:]
2240 path = path[2:]
2250 self.path = path
2241 self.path = path
2251 return
2242 return
2252
2243
2253 if self._matchscheme(path):
2244 if self._matchscheme(path):
2254 parts = path.split(':', 1)
2245 parts = path.split(':', 1)
2255 if parts[0]:
2246 if parts[0]:
2256 self.scheme, path = parts
2247 self.scheme, path = parts
2257 self._localpath = False
2248 self._localpath = False
2258
2249
2259 if not path:
2250 if not path:
2260 path = None
2251 path = None
2261 if self._localpath:
2252 if self._localpath:
2262 self.path = ''
2253 self.path = ''
2263 return
2254 return
2264 else:
2255 else:
2265 if self._localpath:
2256 if self._localpath:
2266 self.path = path
2257 self.path = path
2267 return
2258 return
2268
2259
2269 if parsequery and '?' in path:
2260 if parsequery and '?' in path:
2270 path, self.query = path.split('?', 1)
2261 path, self.query = path.split('?', 1)
2271 if not path:
2262 if not path:
2272 path = None
2263 path = None
2273 if not self.query:
2264 if not self.query:
2274 self.query = None
2265 self.query = None
2275
2266
2276 # // is required to specify a host/authority
2267 # // is required to specify a host/authority
2277 if path and path.startswith('//'):
2268 if path and path.startswith('//'):
2278 parts = path[2:].split('/', 1)
2269 parts = path[2:].split('/', 1)
2279 if len(parts) > 1:
2270 if len(parts) > 1:
2280 self.host, path = parts
2271 self.host, path = parts
2281 else:
2272 else:
2282 self.host = parts[0]
2273 self.host = parts[0]
2283 path = None
2274 path = None
2284 if not self.host:
2275 if not self.host:
2285 self.host = None
2276 self.host = None
2286 # path of file:///d is /d
2277 # path of file:///d is /d
2287 # path of file:///d:/ is d:/, not /d:/
2278 # path of file:///d:/ is d:/, not /d:/
2288 if path and not hasdriveletter(path):
2279 if path and not hasdriveletter(path):
2289 path = '/' + path
2280 path = '/' + path
2290
2281
2291 if self.host and '@' in self.host:
2282 if self.host and '@' in self.host:
2292 self.user, self.host = self.host.rsplit('@', 1)
2283 self.user, self.host = self.host.rsplit('@', 1)
2293 if ':' in self.user:
2284 if ':' in self.user:
2294 self.user, self.passwd = self.user.split(':', 1)
2285 self.user, self.passwd = self.user.split(':', 1)
2295 if not self.host:
2286 if not self.host:
2296 self.host = None
2287 self.host = None
2297
2288
2298 # Don't split on colons in IPv6 addresses without ports
2289 # Don't split on colons in IPv6 addresses without ports
2299 if (self.host and ':' in self.host and
2290 if (self.host and ':' in self.host and
2300 not (self.host.startswith('[') and self.host.endswith(']'))):
2291 not (self.host.startswith('[') and self.host.endswith(']'))):
2301 self._hostport = self.host
2292 self._hostport = self.host
2302 self.host, self.port = self.host.rsplit(':', 1)
2293 self.host, self.port = self.host.rsplit(':', 1)
2303 if not self.host:
2294 if not self.host:
2304 self.host = None
2295 self.host = None
2305
2296
2306 if (self.host and self.scheme == 'file' and
2297 if (self.host and self.scheme == 'file' and
2307 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2298 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2308 raise Abort(_('file:// URLs can only refer to localhost'))
2299 raise Abort(_('file:// URLs can only refer to localhost'))
2309
2300
2310 self.path = path
2301 self.path = path
2311
2302
2312 # leave the query string escaped
2303 # leave the query string escaped
2313 for a in ('user', 'passwd', 'host', 'port',
2304 for a in ('user', 'passwd', 'host', 'port',
2314 'path', 'fragment'):
2305 'path', 'fragment'):
2315 v = getattr(self, a)
2306 v = getattr(self, a)
2316 if v is not None:
2307 if v is not None:
2317 setattr(self, a, _urlunquote(v))
2308 setattr(self, a, _urlunquote(v))
2318
2309
2319 def __repr__(self):
2310 def __repr__(self):
2320 attrs = []
2311 attrs = []
2321 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2312 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2322 'query', 'fragment'):
2313 'query', 'fragment'):
2323 v = getattr(self, a)
2314 v = getattr(self, a)
2324 if v is not None:
2315 if v is not None:
2325 attrs.append('%s: %r' % (a, v))
2316 attrs.append('%s: %r' % (a, v))
2326 return '<url %s>' % ', '.join(attrs)
2317 return '<url %s>' % ', '.join(attrs)
2327
2318
2328 def __str__(self):
2319 def __str__(self):
2329 r"""Join the URL's components back into a URL string.
2320 r"""Join the URL's components back into a URL string.
2330
2321
2331 Examples:
2322 Examples:
2332
2323
2333 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2324 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2334 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2325 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2335 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2326 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2336 'http://user:pw@host:80/?foo=bar&baz=42'
2327 'http://user:pw@host:80/?foo=bar&baz=42'
2337 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2328 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2338 'http://user:pw@host:80/?foo=bar%3dbaz'
2329 'http://user:pw@host:80/?foo=bar%3dbaz'
2339 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2330 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2340 'ssh://user:pw@[::1]:2200//home/joe#'
2331 'ssh://user:pw@[::1]:2200//home/joe#'
2341 >>> str(url('http://localhost:80//'))
2332 >>> str(url('http://localhost:80//'))
2342 'http://localhost:80//'
2333 'http://localhost:80//'
2343 >>> str(url('http://localhost:80/'))
2334 >>> str(url('http://localhost:80/'))
2344 'http://localhost:80/'
2335 'http://localhost:80/'
2345 >>> str(url('http://localhost:80'))
2336 >>> str(url('http://localhost:80'))
2346 'http://localhost:80/'
2337 'http://localhost:80/'
2347 >>> str(url('bundle:foo'))
2338 >>> str(url('bundle:foo'))
2348 'bundle:foo'
2339 'bundle:foo'
2349 >>> str(url('bundle://../foo'))
2340 >>> str(url('bundle://../foo'))
2350 'bundle:../foo'
2341 'bundle:../foo'
2351 >>> str(url('path'))
2342 >>> str(url('path'))
2352 'path'
2343 'path'
2353 >>> str(url('file:///tmp/foo/bar'))
2344 >>> str(url('file:///tmp/foo/bar'))
2354 'file:///tmp/foo/bar'
2345 'file:///tmp/foo/bar'
2355 >>> str(url('file:///c:/tmp/foo/bar'))
2346 >>> str(url('file:///c:/tmp/foo/bar'))
2356 'file:///c:/tmp/foo/bar'
2347 'file:///c:/tmp/foo/bar'
2357 >>> print url(r'bundle:foo\bar')
2348 >>> print url(r'bundle:foo\bar')
2358 bundle:foo\bar
2349 bundle:foo\bar
2359 >>> print url(r'file:///D:\data\hg')
2350 >>> print url(r'file:///D:\data\hg')
2360 file:///D:\data\hg
2351 file:///D:\data\hg
2361 """
2352 """
2362 if self._localpath:
2353 if self._localpath:
2363 s = self.path
2354 s = self.path
2364 if self.scheme == 'bundle':
2355 if self.scheme == 'bundle':
2365 s = 'bundle:' + s
2356 s = 'bundle:' + s
2366 if self.fragment:
2357 if self.fragment:
2367 s += '#' + self.fragment
2358 s += '#' + self.fragment
2368 return s
2359 return s
2369
2360
2370 s = self.scheme + ':'
2361 s = self.scheme + ':'
2371 if self.user or self.passwd or self.host:
2362 if self.user or self.passwd or self.host:
2372 s += '//'
2363 s += '//'
2373 elif self.scheme and (not self.path or self.path.startswith('/')
2364 elif self.scheme and (not self.path or self.path.startswith('/')
2374 or hasdriveletter(self.path)):
2365 or hasdriveletter(self.path)):
2375 s += '//'
2366 s += '//'
2376 if hasdriveletter(self.path):
2367 if hasdriveletter(self.path):
2377 s += '/'
2368 s += '/'
2378 if self.user:
2369 if self.user:
2379 s += urllib.quote(self.user, safe=self._safechars)
2370 s += urllib.quote(self.user, safe=self._safechars)
2380 if self.passwd:
2371 if self.passwd:
2381 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2372 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2382 if self.user or self.passwd:
2373 if self.user or self.passwd:
2383 s += '@'
2374 s += '@'
2384 if self.host:
2375 if self.host:
2385 if not (self.host.startswith('[') and self.host.endswith(']')):
2376 if not (self.host.startswith('[') and self.host.endswith(']')):
2386 s += urllib.quote(self.host)
2377 s += urllib.quote(self.host)
2387 else:
2378 else:
2388 s += self.host
2379 s += self.host
2389 if self.port:
2380 if self.port:
2390 s += ':' + urllib.quote(self.port)
2381 s += ':' + urllib.quote(self.port)
2391 if self.host:
2382 if self.host:
2392 s += '/'
2383 s += '/'
2393 if self.path:
2384 if self.path:
2394 # TODO: similar to the query string, we should not unescape the
2385 # TODO: similar to the query string, we should not unescape the
2395 # path when we store it, the path might contain '%2f' = '/',
2386 # path when we store it, the path might contain '%2f' = '/',
2396 # which we should *not* escape.
2387 # which we should *not* escape.
2397 s += urllib.quote(self.path, safe=self._safepchars)
2388 s += urllib.quote(self.path, safe=self._safepchars)
2398 if self.query:
2389 if self.query:
2399 # we store the query in escaped form.
2390 # we store the query in escaped form.
2400 s += '?' + self.query
2391 s += '?' + self.query
2401 if self.fragment is not None:
2392 if self.fragment is not None:
2402 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2393 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2403 return s
2394 return s
2404
2395
2405 def authinfo(self):
2396 def authinfo(self):
2406 user, passwd = self.user, self.passwd
2397 user, passwd = self.user, self.passwd
2407 try:
2398 try:
2408 self.user, self.passwd = None, None
2399 self.user, self.passwd = None, None
2409 s = str(self)
2400 s = str(self)
2410 finally:
2401 finally:
2411 self.user, self.passwd = user, passwd
2402 self.user, self.passwd = user, passwd
2412 if not self.user:
2403 if not self.user:
2413 return (s, None)
2404 return (s, None)
2414 # authinfo[1] is passed to urllib2 password manager, and its
2405 # authinfo[1] is passed to urllib2 password manager, and its
2415 # URIs must not contain credentials. The host is passed in the
2406 # URIs must not contain credentials. The host is passed in the
2416 # URIs list because Python < 2.4.3 uses only that to search for
2407 # URIs list because Python < 2.4.3 uses only that to search for
2417 # a password.
2408 # a password.
2418 return (s, (None, (s, self.host),
2409 return (s, (None, (s, self.host),
2419 self.user, self.passwd or ''))
2410 self.user, self.passwd or ''))
2420
2411
2421 def isabs(self):
2412 def isabs(self):
2422 if self.scheme and self.scheme != 'file':
2413 if self.scheme and self.scheme != 'file':
2423 return True # remote URL
2414 return True # remote URL
2424 if hasdriveletter(self.path):
2415 if hasdriveletter(self.path):
2425 return True # absolute for our purposes - can't be joined()
2416 return True # absolute for our purposes - can't be joined()
2426 if self.path.startswith(r'\\'):
2417 if self.path.startswith(r'\\'):
2427 return True # Windows UNC path
2418 return True # Windows UNC path
2428 if self.path.startswith('/'):
2419 if self.path.startswith('/'):
2429 return True # POSIX-style
2420 return True # POSIX-style
2430 return False
2421 return False
2431
2422
2432 def localpath(self):
2423 def localpath(self):
2433 if self.scheme == 'file' or self.scheme == 'bundle':
2424 if self.scheme == 'file' or self.scheme == 'bundle':
2434 path = self.path or '/'
2425 path = self.path or '/'
2435 # For Windows, we need to promote hosts containing drive
2426 # For Windows, we need to promote hosts containing drive
2436 # letters to paths with drive letters.
2427 # letters to paths with drive letters.
2437 if hasdriveletter(self._hostport):
2428 if hasdriveletter(self._hostport):
2438 path = self._hostport + '/' + self.path
2429 path = self._hostport + '/' + self.path
2439 elif (self.host is not None and self.path
2430 elif (self.host is not None and self.path
2440 and not hasdriveletter(path)):
2431 and not hasdriveletter(path)):
2441 path = '/' + path
2432 path = '/' + path
2442 return path
2433 return path
2443 return self._origpath
2434 return self._origpath
2444
2435
2445 def islocal(self):
2436 def islocal(self):
2446 '''whether localpath will return something that posixfile can open'''
2437 '''whether localpath will return something that posixfile can open'''
2447 return (not self.scheme or self.scheme == 'file'
2438 return (not self.scheme or self.scheme == 'file'
2448 or self.scheme == 'bundle')
2439 or self.scheme == 'bundle')
2449
2440
2450 def hasscheme(path):
2441 def hasscheme(path):
2451 return bool(url(path).scheme)
2442 return bool(url(path).scheme)
2452
2443
2453 def hasdriveletter(path):
2444 def hasdriveletter(path):
2454 return path and path[1:2] == ':' and path[0:1].isalpha()
2445 return path and path[1:2] == ':' and path[0:1].isalpha()
2455
2446
2456 def urllocalpath(path):
2447 def urllocalpath(path):
2457 return url(path, parsequery=False, parsefragment=False).localpath()
2448 return url(path, parsequery=False, parsefragment=False).localpath()
2458
2449
2459 def hidepassword(u):
2450 def hidepassword(u):
2460 '''hide user credential in a url string'''
2451 '''hide user credential in a url string'''
2461 u = url(u)
2452 u = url(u)
2462 if u.passwd:
2453 if u.passwd:
2463 u.passwd = '***'
2454 u.passwd = '***'
2464 return str(u)
2455 return str(u)
2465
2456
2466 def removeauth(u):
2457 def removeauth(u):
2467 '''remove all authentication information from a url string'''
2458 '''remove all authentication information from a url string'''
2468 u = url(u)
2459 u = url(u)
2469 u.user = u.passwd = None
2460 u.user = u.passwd = None
2470 return str(u)
2461 return str(u)
2471
2462
2472 def isatty(fp):
2463 def isatty(fp):
2473 try:
2464 try:
2474 return fp.isatty()
2465 return fp.isatty()
2475 except AttributeError:
2466 except AttributeError:
2476 return False
2467 return False
2477
2468
2478 timecount = unitcountfn(
2469 timecount = unitcountfn(
2479 (1, 1e3, _('%.0f s')),
2470 (1, 1e3, _('%.0f s')),
2480 (100, 1, _('%.1f s')),
2471 (100, 1, _('%.1f s')),
2481 (10, 1, _('%.2f s')),
2472 (10, 1, _('%.2f s')),
2482 (1, 1, _('%.3f s')),
2473 (1, 1, _('%.3f s')),
2483 (100, 0.001, _('%.1f ms')),
2474 (100, 0.001, _('%.1f ms')),
2484 (10, 0.001, _('%.2f ms')),
2475 (10, 0.001, _('%.2f ms')),
2485 (1, 0.001, _('%.3f ms')),
2476 (1, 0.001, _('%.3f ms')),
2486 (100, 0.000001, _('%.1f us')),
2477 (100, 0.000001, _('%.1f us')),
2487 (10, 0.000001, _('%.2f us')),
2478 (10, 0.000001, _('%.2f us')),
2488 (1, 0.000001, _('%.3f us')),
2479 (1, 0.000001, _('%.3f us')),
2489 (100, 0.000000001, _('%.1f ns')),
2480 (100, 0.000000001, _('%.1f ns')),
2490 (10, 0.000000001, _('%.2f ns')),
2481 (10, 0.000000001, _('%.2f ns')),
2491 (1, 0.000000001, _('%.3f ns')),
2482 (1, 0.000000001, _('%.3f ns')),
2492 )
2483 )
2493
2484
2494 _timenesting = [0]
2485 _timenesting = [0]
2495
2486
2496 def timed(func):
2487 def timed(func):
2497 '''Report the execution time of a function call to stderr.
2488 '''Report the execution time of a function call to stderr.
2498
2489
2499 During development, use as a decorator when you need to measure
2490 During development, use as a decorator when you need to measure
2500 the cost of a function, e.g. as follows:
2491 the cost of a function, e.g. as follows:
2501
2492
2502 @util.timed
2493 @util.timed
2503 def foo(a, b, c):
2494 def foo(a, b, c):
2504 pass
2495 pass
2505 '''
2496 '''
2506
2497
2507 def wrapper(*args, **kwargs):
2498 def wrapper(*args, **kwargs):
2508 start = time.time()
2499 start = time.time()
2509 indent = 2
2500 indent = 2
2510 _timenesting[0] += indent
2501 _timenesting[0] += indent
2511 try:
2502 try:
2512 return func(*args, **kwargs)
2503 return func(*args, **kwargs)
2513 finally:
2504 finally:
2514 elapsed = time.time() - start
2505 elapsed = time.time() - start
2515 _timenesting[0] -= indent
2506 _timenesting[0] -= indent
2516 sys.stderr.write('%s%s: %s\n' %
2507 sys.stderr.write('%s%s: %s\n' %
2517 (' ' * _timenesting[0], func.__name__,
2508 (' ' * _timenesting[0], func.__name__,
2518 timecount(elapsed)))
2509 timecount(elapsed)))
2519 return wrapper
2510 return wrapper
2520
2511
2521 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2512 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2522 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2513 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2523
2514
2524 def sizetoint(s):
2515 def sizetoint(s):
2525 '''Convert a space specifier to a byte count.
2516 '''Convert a space specifier to a byte count.
2526
2517
2527 >>> sizetoint('30')
2518 >>> sizetoint('30')
2528 30
2519 30
2529 >>> sizetoint('2.2kb')
2520 >>> sizetoint('2.2kb')
2530 2252
2521 2252
2531 >>> sizetoint('6M')
2522 >>> sizetoint('6M')
2532 6291456
2523 6291456
2533 '''
2524 '''
2534 t = s.strip().lower()
2525 t = s.strip().lower()
2535 try:
2526 try:
2536 for k, u in _sizeunits:
2527 for k, u in _sizeunits:
2537 if t.endswith(k):
2528 if t.endswith(k):
2538 return int(float(t[:-len(k)]) * u)
2529 return int(float(t[:-len(k)]) * u)
2539 return int(t)
2530 return int(t)
2540 except ValueError:
2531 except ValueError:
2541 raise error.ParseError(_("couldn't parse size: %s") % s)
2532 raise error.ParseError(_("couldn't parse size: %s") % s)
2542
2533
2543 class hooks(object):
2534 class hooks(object):
2544 '''A collection of hook functions that can be used to extend a
2535 '''A collection of hook functions that can be used to extend a
2545 function's behavior. Hooks are called in lexicographic order,
2536 function's behavior. Hooks are called in lexicographic order,
2546 based on the names of their sources.'''
2537 based on the names of their sources.'''
2547
2538
2548 def __init__(self):
2539 def __init__(self):
2549 self._hooks = []
2540 self._hooks = []
2550
2541
2551 def add(self, source, hook):
2542 def add(self, source, hook):
2552 self._hooks.append((source, hook))
2543 self._hooks.append((source, hook))
2553
2544
2554 def __call__(self, *args):
2545 def __call__(self, *args):
2555 self._hooks.sort(key=lambda x: x[0])
2546 self._hooks.sort(key=lambda x: x[0])
2556 results = []
2547 results = []
2557 for source, hook in self._hooks:
2548 for source, hook in self._hooks:
2558 results.append(hook(*args))
2549 results.append(hook(*args))
2559 return results
2550 return results
2560
2551
2561 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2552 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2562 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2553 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2563 Skips the 'skip' last entries. By default it will flush stdout first.
2554 Skips the 'skip' last entries. By default it will flush stdout first.
2564 It can be used everywhere and do intentionally not require an ui object.
2555 It can be used everywhere and do intentionally not require an ui object.
2565 Not be used in production code but very convenient while developing.
2556 Not be used in production code but very convenient while developing.
2566 '''
2557 '''
2567 if otherf:
2558 if otherf:
2568 otherf.flush()
2559 otherf.flush()
2569 f.write('%s at:\n' % msg)
2560 f.write('%s at:\n' % msg)
2570 entries = [('%s:%s' % (fn, ln), func)
2561 entries = [('%s:%s' % (fn, ln), func)
2571 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2562 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2572 if entries:
2563 if entries:
2573 fnmax = max(len(entry[0]) for entry in entries)
2564 fnmax = max(len(entry[0]) for entry in entries)
2574 for fnln, func in entries:
2565 for fnln, func in entries:
2575 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2566 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2576 f.flush()
2567 f.flush()
2577
2568
2578 class dirs(object):
2569 class dirs(object):
2579 '''a multiset of directory names from a dirstate or manifest'''
2570 '''a multiset of directory names from a dirstate or manifest'''
2580
2571
2581 def __init__(self, map, skip=None):
2572 def __init__(self, map, skip=None):
2582 self._dirs = {}
2573 self._dirs = {}
2583 addpath = self.addpath
2574 addpath = self.addpath
2584 if safehasattr(map, 'iteritems') and skip is not None:
2575 if safehasattr(map, 'iteritems') and skip is not None:
2585 for f, s in map.iteritems():
2576 for f, s in map.iteritems():
2586 if s[0] != skip:
2577 if s[0] != skip:
2587 addpath(f)
2578 addpath(f)
2588 else:
2579 else:
2589 for f in map:
2580 for f in map:
2590 addpath(f)
2581 addpath(f)
2591
2582
2592 def addpath(self, path):
2583 def addpath(self, path):
2593 dirs = self._dirs
2584 dirs = self._dirs
2594 for base in finddirs(path):
2585 for base in finddirs(path):
2595 if base in dirs:
2586 if base in dirs:
2596 dirs[base] += 1
2587 dirs[base] += 1
2597 return
2588 return
2598 dirs[base] = 1
2589 dirs[base] = 1
2599
2590
2600 def delpath(self, path):
2591 def delpath(self, path):
2601 dirs = self._dirs
2592 dirs = self._dirs
2602 for base in finddirs(path):
2593 for base in finddirs(path):
2603 if dirs[base] > 1:
2594 if dirs[base] > 1:
2604 dirs[base] -= 1
2595 dirs[base] -= 1
2605 return
2596 return
2606 del dirs[base]
2597 del dirs[base]
2607
2598
2608 def __iter__(self):
2599 def __iter__(self):
2609 return self._dirs.iterkeys()
2600 return self._dirs.iterkeys()
2610
2601
2611 def __contains__(self, d):
2602 def __contains__(self, d):
2612 return d in self._dirs
2603 return d in self._dirs
2613
2604
2614 if safehasattr(parsers, 'dirs'):
2605 if safehasattr(parsers, 'dirs'):
2615 dirs = parsers.dirs
2606 dirs = parsers.dirs
2616
2607
2617 def finddirs(path):
2608 def finddirs(path):
2618 pos = path.rfind('/')
2609 pos = path.rfind('/')
2619 while pos != -1:
2610 while pos != -1:
2620 yield path[:pos]
2611 yield path[:pos]
2621 pos = path.rfind('/', 0, pos)
2612 pos = path.rfind('/', 0, pos)
2622
2613
2623 # compression utility
2614 # compression utility
2624
2615
2625 class nocompress(object):
2616 class nocompress(object):
2626 def compress(self, x):
2617 def compress(self, x):
2627 return x
2618 return x
2628 def flush(self):
2619 def flush(self):
2629 return ""
2620 return ""
2630
2621
2631 compressors = {
2622 compressors = {
2632 None: nocompress,
2623 None: nocompress,
2633 # lambda to prevent early import
2624 # lambda to prevent early import
2634 'BZ': lambda: bz2.BZ2Compressor(),
2625 'BZ': lambda: bz2.BZ2Compressor(),
2635 'GZ': lambda: zlib.compressobj(),
2626 'GZ': lambda: zlib.compressobj(),
2636 }
2627 }
2637 # also support the old form by courtesies
2628 # also support the old form by courtesies
2638 compressors['UN'] = compressors[None]
2629 compressors['UN'] = compressors[None]
2639
2630
2640 def _makedecompressor(decompcls):
2631 def _makedecompressor(decompcls):
2641 def generator(f):
2632 def generator(f):
2642 d = decompcls()
2633 d = decompcls()
2643 for chunk in filechunkiter(f):
2634 for chunk in filechunkiter(f):
2644 yield d.decompress(chunk)
2635 yield d.decompress(chunk)
2645 def func(fh):
2636 def func(fh):
2646 return chunkbuffer(generator(fh))
2637 return chunkbuffer(generator(fh))
2647 return func
2638 return func
2648
2639
2649 class ctxmanager(object):
2640 class ctxmanager(object):
2650 '''A context manager for use in 'with' blocks to allow multiple
2641 '''A context manager for use in 'with' blocks to allow multiple
2651 contexts to be entered at once. This is both safer and more
2642 contexts to be entered at once. This is both safer and more
2652 flexible than contextlib.nested.
2643 flexible than contextlib.nested.
2653
2644
2654 Once Mercurial supports Python 2.7+, this will become mostly
2645 Once Mercurial supports Python 2.7+, this will become mostly
2655 unnecessary.
2646 unnecessary.
2656 '''
2647 '''
2657
2648
2658 def __init__(self, *args):
2649 def __init__(self, *args):
2659 '''Accepts a list of no-argument functions that return context
2650 '''Accepts a list of no-argument functions that return context
2660 managers. These will be invoked at __call__ time.'''
2651 managers. These will be invoked at __call__ time.'''
2661 self._pending = args
2652 self._pending = args
2662 self._atexit = []
2653 self._atexit = []
2663
2654
2664 def __enter__(self):
2655 def __enter__(self):
2665 return self
2656 return self
2666
2657
2667 def __call__(self):
2658 def __call__(self):
2668 '''Create and enter context managers in the order in which they were
2659 '''Create and enter context managers in the order in which they were
2669 passed to the constructor.'''
2660 passed to the constructor.'''
2670 values = []
2661 values = []
2671 for func in self._pending:
2662 for func in self._pending:
2672 obj = func()
2663 obj = func()
2673 values.append(obj.__enter__())
2664 values.append(obj.__enter__())
2674 self._atexit.append(obj.__exit__)
2665 self._atexit.append(obj.__exit__)
2675 del self._pending
2666 del self._pending
2676 return values
2667 return values
2677
2668
2678 def atexit(self, func, *args, **kwargs):
2669 def atexit(self, func, *args, **kwargs):
2679 '''Add a function to call when this context manager exits. The
2670 '''Add a function to call when this context manager exits. The
2680 ordering of multiple atexit calls is unspecified, save that
2671 ordering of multiple atexit calls is unspecified, save that
2681 they will happen before any __exit__ functions.'''
2672 they will happen before any __exit__ functions.'''
2682 def wrapper(exc_type, exc_val, exc_tb):
2673 def wrapper(exc_type, exc_val, exc_tb):
2683 func(*args, **kwargs)
2674 func(*args, **kwargs)
2684 self._atexit.append(wrapper)
2675 self._atexit.append(wrapper)
2685 return func
2676 return func
2686
2677
2687 def __exit__(self, exc_type, exc_val, exc_tb):
2678 def __exit__(self, exc_type, exc_val, exc_tb):
2688 '''Context managers are exited in the reverse order from which
2679 '''Context managers are exited in the reverse order from which
2689 they were created.'''
2680 they were created.'''
2690 received = exc_type is not None
2681 received = exc_type is not None
2691 suppressed = False
2682 suppressed = False
2692 pending = None
2683 pending = None
2693 self._atexit.reverse()
2684 self._atexit.reverse()
2694 for exitfunc in self._atexit:
2685 for exitfunc in self._atexit:
2695 try:
2686 try:
2696 if exitfunc(exc_type, exc_val, exc_tb):
2687 if exitfunc(exc_type, exc_val, exc_tb):
2697 suppressed = True
2688 suppressed = True
2698 exc_type = None
2689 exc_type = None
2699 exc_val = None
2690 exc_val = None
2700 exc_tb = None
2691 exc_tb = None
2701 except BaseException:
2692 except BaseException:
2702 pending = sys.exc_info()
2693 pending = sys.exc_info()
2703 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2694 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2704 del self._atexit
2695 del self._atexit
2705 if pending:
2696 if pending:
2706 raise exc_val
2697 raise exc_val
2707 return received and suppressed
2698 return received and suppressed
2708
2699
2709 def _bz2():
2700 def _bz2():
2710 d = bz2.BZ2Decompressor()
2701 d = bz2.BZ2Decompressor()
2711 # Bzip2 stream start with BZ, but we stripped it.
2702 # Bzip2 stream start with BZ, but we stripped it.
2712 # we put it back for good measure.
2703 # we put it back for good measure.
2713 d.decompress('BZ')
2704 d.decompress('BZ')
2714 return d
2705 return d
2715
2706
2716 decompressors = {None: lambda fh: fh,
2707 decompressors = {None: lambda fh: fh,
2717 '_truncatedBZ': _makedecompressor(_bz2),
2708 '_truncatedBZ': _makedecompressor(_bz2),
2718 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2709 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2719 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2710 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2720 }
2711 }
2721 # also support the old form by courtesies
2712 # also support the old form by courtesies
2722 decompressors['UN'] = decompressors[None]
2713 decompressors['UN'] = decompressors[None]
2723
2714
2724 # convenient shortcut
2715 # convenient shortcut
2725 dst = debugstacktrace
2716 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now