##// END OF EJS Templates
util: remove compressorobj API from compression engines...
Gregory Szorc -
r30359:673f0fdc default
parent child Browse files
Show More
@@ -1,3112 +1,3088
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import string
31 import string
32 import subprocess
32 import subprocess
33 import sys
33 import sys
34 import tempfile
34 import tempfile
35 import textwrap
35 import textwrap
36 import time
36 import time
37 import traceback
37 import traceback
38 import zlib
38 import zlib
39
39
40 from . import (
40 from . import (
41 encoding,
41 encoding,
42 error,
42 error,
43 i18n,
43 i18n,
44 osutil,
44 osutil,
45 parsers,
45 parsers,
46 pycompat,
46 pycompat,
47 )
47 )
48
48
49 for attr in (
49 for attr in (
50 'empty',
50 'empty',
51 'httplib',
51 'httplib',
52 'httpserver',
52 'httpserver',
53 'pickle',
53 'pickle',
54 'queue',
54 'queue',
55 'urlerr',
55 'urlerr',
56 'urlparse',
56 'urlparse',
57 # we do import urlreq, but we do it outside the loop
57 # we do import urlreq, but we do it outside the loop
58 #'urlreq',
58 #'urlreq',
59 'stringio',
59 'stringio',
60 'socketserver',
60 'socketserver',
61 'xmlrpclib',
61 'xmlrpclib',
62 ):
62 ):
63 a = pycompat.sysstr(attr)
63 a = pycompat.sysstr(attr)
64 globals()[a] = getattr(pycompat, a)
64 globals()[a] = getattr(pycompat, a)
65
65
66 # This line is to make pyflakes happy:
66 # This line is to make pyflakes happy:
67 urlreq = pycompat.urlreq
67 urlreq = pycompat.urlreq
68
68
69 if os.name == 'nt':
69 if os.name == 'nt':
70 from . import windows as platform
70 from . import windows as platform
71 else:
71 else:
72 from . import posix as platform
72 from . import posix as platform
73
73
74 _ = i18n._
74 _ = i18n._
75
75
76 bindunixsocket = platform.bindunixsocket
76 bindunixsocket = platform.bindunixsocket
77 cachestat = platform.cachestat
77 cachestat = platform.cachestat
78 checkexec = platform.checkexec
78 checkexec = platform.checkexec
79 checklink = platform.checklink
79 checklink = platform.checklink
80 copymode = platform.copymode
80 copymode = platform.copymode
81 executablepath = platform.executablepath
81 executablepath = platform.executablepath
82 expandglobs = platform.expandglobs
82 expandglobs = platform.expandglobs
83 explainexit = platform.explainexit
83 explainexit = platform.explainexit
84 findexe = platform.findexe
84 findexe = platform.findexe
85 gethgcmd = platform.gethgcmd
85 gethgcmd = platform.gethgcmd
86 getuser = platform.getuser
86 getuser = platform.getuser
87 getpid = os.getpid
87 getpid = os.getpid
88 groupmembers = platform.groupmembers
88 groupmembers = platform.groupmembers
89 groupname = platform.groupname
89 groupname = platform.groupname
90 hidewindow = platform.hidewindow
90 hidewindow = platform.hidewindow
91 isexec = platform.isexec
91 isexec = platform.isexec
92 isowner = platform.isowner
92 isowner = platform.isowner
93 localpath = platform.localpath
93 localpath = platform.localpath
94 lookupreg = platform.lookupreg
94 lookupreg = platform.lookupreg
95 makedir = platform.makedir
95 makedir = platform.makedir
96 nlinks = platform.nlinks
96 nlinks = platform.nlinks
97 normpath = platform.normpath
97 normpath = platform.normpath
98 normcase = platform.normcase
98 normcase = platform.normcase
99 normcasespec = platform.normcasespec
99 normcasespec = platform.normcasespec
100 normcasefallback = platform.normcasefallback
100 normcasefallback = platform.normcasefallback
101 openhardlinks = platform.openhardlinks
101 openhardlinks = platform.openhardlinks
102 oslink = platform.oslink
102 oslink = platform.oslink
103 parsepatchoutput = platform.parsepatchoutput
103 parsepatchoutput = platform.parsepatchoutput
104 pconvert = platform.pconvert
104 pconvert = platform.pconvert
105 poll = platform.poll
105 poll = platform.poll
106 popen = platform.popen
106 popen = platform.popen
107 posixfile = platform.posixfile
107 posixfile = platform.posixfile
108 quotecommand = platform.quotecommand
108 quotecommand = platform.quotecommand
109 readpipe = platform.readpipe
109 readpipe = platform.readpipe
110 rename = platform.rename
110 rename = platform.rename
111 removedirs = platform.removedirs
111 removedirs = platform.removedirs
112 samedevice = platform.samedevice
112 samedevice = platform.samedevice
113 samefile = platform.samefile
113 samefile = platform.samefile
114 samestat = platform.samestat
114 samestat = platform.samestat
115 setbinary = platform.setbinary
115 setbinary = platform.setbinary
116 setflags = platform.setflags
116 setflags = platform.setflags
117 setsignalhandler = platform.setsignalhandler
117 setsignalhandler = platform.setsignalhandler
118 shellquote = platform.shellquote
118 shellquote = platform.shellquote
119 spawndetached = platform.spawndetached
119 spawndetached = platform.spawndetached
120 split = platform.split
120 split = platform.split
121 sshargs = platform.sshargs
121 sshargs = platform.sshargs
122 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
122 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
123 statisexec = platform.statisexec
123 statisexec = platform.statisexec
124 statislink = platform.statislink
124 statislink = platform.statislink
125 testpid = platform.testpid
125 testpid = platform.testpid
126 umask = platform.umask
126 umask = platform.umask
127 unlink = platform.unlink
127 unlink = platform.unlink
128 unlinkpath = platform.unlinkpath
128 unlinkpath = platform.unlinkpath
129 username = platform.username
129 username = platform.username
130
130
131 # Python compatibility
131 # Python compatibility
132
132
133 _notset = object()
133 _notset = object()
134
134
135 # disable Python's problematic floating point timestamps (issue4836)
135 # disable Python's problematic floating point timestamps (issue4836)
136 # (Python hypocritically says you shouldn't change this behavior in
136 # (Python hypocritically says you shouldn't change this behavior in
137 # libraries, and sure enough Mercurial is not a library.)
137 # libraries, and sure enough Mercurial is not a library.)
138 os.stat_float_times(False)
138 os.stat_float_times(False)
139
139
140 def safehasattr(thing, attr):
140 def safehasattr(thing, attr):
141 return getattr(thing, attr, _notset) is not _notset
141 return getattr(thing, attr, _notset) is not _notset
142
142
143 DIGESTS = {
143 DIGESTS = {
144 'md5': hashlib.md5,
144 'md5': hashlib.md5,
145 'sha1': hashlib.sha1,
145 'sha1': hashlib.sha1,
146 'sha512': hashlib.sha512,
146 'sha512': hashlib.sha512,
147 }
147 }
148 # List of digest types from strongest to weakest
148 # List of digest types from strongest to weakest
149 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
149 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
150
150
151 for k in DIGESTS_BY_STRENGTH:
151 for k in DIGESTS_BY_STRENGTH:
152 assert k in DIGESTS
152 assert k in DIGESTS
153
153
154 class digester(object):
154 class digester(object):
155 """helper to compute digests.
155 """helper to compute digests.
156
156
157 This helper can be used to compute one or more digests given their name.
157 This helper can be used to compute one or more digests given their name.
158
158
159 >>> d = digester(['md5', 'sha1'])
159 >>> d = digester(['md5', 'sha1'])
160 >>> d.update('foo')
160 >>> d.update('foo')
161 >>> [k for k in sorted(d)]
161 >>> [k for k in sorted(d)]
162 ['md5', 'sha1']
162 ['md5', 'sha1']
163 >>> d['md5']
163 >>> d['md5']
164 'acbd18db4cc2f85cedef654fccc4a4d8'
164 'acbd18db4cc2f85cedef654fccc4a4d8'
165 >>> d['sha1']
165 >>> d['sha1']
166 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
166 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
167 >>> digester.preferred(['md5', 'sha1'])
167 >>> digester.preferred(['md5', 'sha1'])
168 'sha1'
168 'sha1'
169 """
169 """
170
170
171 def __init__(self, digests, s=''):
171 def __init__(self, digests, s=''):
172 self._hashes = {}
172 self._hashes = {}
173 for k in digests:
173 for k in digests:
174 if k not in DIGESTS:
174 if k not in DIGESTS:
175 raise Abort(_('unknown digest type: %s') % k)
175 raise Abort(_('unknown digest type: %s') % k)
176 self._hashes[k] = DIGESTS[k]()
176 self._hashes[k] = DIGESTS[k]()
177 if s:
177 if s:
178 self.update(s)
178 self.update(s)
179
179
180 def update(self, data):
180 def update(self, data):
181 for h in self._hashes.values():
181 for h in self._hashes.values():
182 h.update(data)
182 h.update(data)
183
183
184 def __getitem__(self, key):
184 def __getitem__(self, key):
185 if key not in DIGESTS:
185 if key not in DIGESTS:
186 raise Abort(_('unknown digest type: %s') % k)
186 raise Abort(_('unknown digest type: %s') % k)
187 return self._hashes[key].hexdigest()
187 return self._hashes[key].hexdigest()
188
188
189 def __iter__(self):
189 def __iter__(self):
190 return iter(self._hashes)
190 return iter(self._hashes)
191
191
192 @staticmethod
192 @staticmethod
193 def preferred(supported):
193 def preferred(supported):
194 """returns the strongest digest type in both supported and DIGESTS."""
194 """returns the strongest digest type in both supported and DIGESTS."""
195
195
196 for k in DIGESTS_BY_STRENGTH:
196 for k in DIGESTS_BY_STRENGTH:
197 if k in supported:
197 if k in supported:
198 return k
198 return k
199 return None
199 return None
200
200
201 class digestchecker(object):
201 class digestchecker(object):
202 """file handle wrapper that additionally checks content against a given
202 """file handle wrapper that additionally checks content against a given
203 size and digests.
203 size and digests.
204
204
205 d = digestchecker(fh, size, {'md5': '...'})
205 d = digestchecker(fh, size, {'md5': '...'})
206
206
207 When multiple digests are given, all of them are validated.
207 When multiple digests are given, all of them are validated.
208 """
208 """
209
209
210 def __init__(self, fh, size, digests):
210 def __init__(self, fh, size, digests):
211 self._fh = fh
211 self._fh = fh
212 self._size = size
212 self._size = size
213 self._got = 0
213 self._got = 0
214 self._digests = dict(digests)
214 self._digests = dict(digests)
215 self._digester = digester(self._digests.keys())
215 self._digester = digester(self._digests.keys())
216
216
217 def read(self, length=-1):
217 def read(self, length=-1):
218 content = self._fh.read(length)
218 content = self._fh.read(length)
219 self._digester.update(content)
219 self._digester.update(content)
220 self._got += len(content)
220 self._got += len(content)
221 return content
221 return content
222
222
223 def validate(self):
223 def validate(self):
224 if self._size != self._got:
224 if self._size != self._got:
225 raise Abort(_('size mismatch: expected %d, got %d') %
225 raise Abort(_('size mismatch: expected %d, got %d') %
226 (self._size, self._got))
226 (self._size, self._got))
227 for k, v in self._digests.items():
227 for k, v in self._digests.items():
228 if v != self._digester[k]:
228 if v != self._digester[k]:
229 # i18n: first parameter is a digest name
229 # i18n: first parameter is a digest name
230 raise Abort(_('%s mismatch: expected %s, got %s') %
230 raise Abort(_('%s mismatch: expected %s, got %s') %
231 (k, v, self._digester[k]))
231 (k, v, self._digester[k]))
232
232
233 try:
233 try:
234 buffer = buffer
234 buffer = buffer
235 except NameError:
235 except NameError:
236 if not pycompat.ispy3:
236 if not pycompat.ispy3:
237 def buffer(sliceable, offset=0):
237 def buffer(sliceable, offset=0):
238 return sliceable[offset:]
238 return sliceable[offset:]
239 else:
239 else:
240 def buffer(sliceable, offset=0):
240 def buffer(sliceable, offset=0):
241 return memoryview(sliceable)[offset:]
241 return memoryview(sliceable)[offset:]
242
242
243 closefds = os.name == 'posix'
243 closefds = os.name == 'posix'
244
244
245 _chunksize = 4096
245 _chunksize = 4096
246
246
247 class bufferedinputpipe(object):
247 class bufferedinputpipe(object):
248 """a manually buffered input pipe
248 """a manually buffered input pipe
249
249
250 Python will not let us use buffered IO and lazy reading with 'polling' at
250 Python will not let us use buffered IO and lazy reading with 'polling' at
251 the same time. We cannot probe the buffer state and select will not detect
251 the same time. We cannot probe the buffer state and select will not detect
252 that data are ready to read if they are already buffered.
252 that data are ready to read if they are already buffered.
253
253
254 This class let us work around that by implementing its own buffering
254 This class let us work around that by implementing its own buffering
255 (allowing efficient readline) while offering a way to know if the buffer is
255 (allowing efficient readline) while offering a way to know if the buffer is
256 empty from the output (allowing collaboration of the buffer with polling).
256 empty from the output (allowing collaboration of the buffer with polling).
257
257
258 This class lives in the 'util' module because it makes use of the 'os'
258 This class lives in the 'util' module because it makes use of the 'os'
259 module from the python stdlib.
259 module from the python stdlib.
260 """
260 """
261
261
262 def __init__(self, input):
262 def __init__(self, input):
263 self._input = input
263 self._input = input
264 self._buffer = []
264 self._buffer = []
265 self._eof = False
265 self._eof = False
266 self._lenbuf = 0
266 self._lenbuf = 0
267
267
268 @property
268 @property
269 def hasbuffer(self):
269 def hasbuffer(self):
270 """True is any data is currently buffered
270 """True is any data is currently buffered
271
271
272 This will be used externally a pre-step for polling IO. If there is
272 This will be used externally a pre-step for polling IO. If there is
273 already data then no polling should be set in place."""
273 already data then no polling should be set in place."""
274 return bool(self._buffer)
274 return bool(self._buffer)
275
275
276 @property
276 @property
277 def closed(self):
277 def closed(self):
278 return self._input.closed
278 return self._input.closed
279
279
280 def fileno(self):
280 def fileno(self):
281 return self._input.fileno()
281 return self._input.fileno()
282
282
283 def close(self):
283 def close(self):
284 return self._input.close()
284 return self._input.close()
285
285
286 def read(self, size):
286 def read(self, size):
287 while (not self._eof) and (self._lenbuf < size):
287 while (not self._eof) and (self._lenbuf < size):
288 self._fillbuffer()
288 self._fillbuffer()
289 return self._frombuffer(size)
289 return self._frombuffer(size)
290
290
291 def readline(self, *args, **kwargs):
291 def readline(self, *args, **kwargs):
292 if 1 < len(self._buffer):
292 if 1 < len(self._buffer):
293 # this should not happen because both read and readline end with a
293 # this should not happen because both read and readline end with a
294 # _frombuffer call that collapse it.
294 # _frombuffer call that collapse it.
295 self._buffer = [''.join(self._buffer)]
295 self._buffer = [''.join(self._buffer)]
296 self._lenbuf = len(self._buffer[0])
296 self._lenbuf = len(self._buffer[0])
297 lfi = -1
297 lfi = -1
298 if self._buffer:
298 if self._buffer:
299 lfi = self._buffer[-1].find('\n')
299 lfi = self._buffer[-1].find('\n')
300 while (not self._eof) and lfi < 0:
300 while (not self._eof) and lfi < 0:
301 self._fillbuffer()
301 self._fillbuffer()
302 if self._buffer:
302 if self._buffer:
303 lfi = self._buffer[-1].find('\n')
303 lfi = self._buffer[-1].find('\n')
304 size = lfi + 1
304 size = lfi + 1
305 if lfi < 0: # end of file
305 if lfi < 0: # end of file
306 size = self._lenbuf
306 size = self._lenbuf
307 elif 1 < len(self._buffer):
307 elif 1 < len(self._buffer):
308 # we need to take previous chunks into account
308 # we need to take previous chunks into account
309 size += self._lenbuf - len(self._buffer[-1])
309 size += self._lenbuf - len(self._buffer[-1])
310 return self._frombuffer(size)
310 return self._frombuffer(size)
311
311
312 def _frombuffer(self, size):
312 def _frombuffer(self, size):
313 """return at most 'size' data from the buffer
313 """return at most 'size' data from the buffer
314
314
315 The data are removed from the buffer."""
315 The data are removed from the buffer."""
316 if size == 0 or not self._buffer:
316 if size == 0 or not self._buffer:
317 return ''
317 return ''
318 buf = self._buffer[0]
318 buf = self._buffer[0]
319 if 1 < len(self._buffer):
319 if 1 < len(self._buffer):
320 buf = ''.join(self._buffer)
320 buf = ''.join(self._buffer)
321
321
322 data = buf[:size]
322 data = buf[:size]
323 buf = buf[len(data):]
323 buf = buf[len(data):]
324 if buf:
324 if buf:
325 self._buffer = [buf]
325 self._buffer = [buf]
326 self._lenbuf = len(buf)
326 self._lenbuf = len(buf)
327 else:
327 else:
328 self._buffer = []
328 self._buffer = []
329 self._lenbuf = 0
329 self._lenbuf = 0
330 return data
330 return data
331
331
332 def _fillbuffer(self):
332 def _fillbuffer(self):
333 """read data to the buffer"""
333 """read data to the buffer"""
334 data = os.read(self._input.fileno(), _chunksize)
334 data = os.read(self._input.fileno(), _chunksize)
335 if not data:
335 if not data:
336 self._eof = True
336 self._eof = True
337 else:
337 else:
338 self._lenbuf += len(data)
338 self._lenbuf += len(data)
339 self._buffer.append(data)
339 self._buffer.append(data)
340
340
341 def popen2(cmd, env=None, newlines=False):
341 def popen2(cmd, env=None, newlines=False):
342 # Setting bufsize to -1 lets the system decide the buffer size.
342 # Setting bufsize to -1 lets the system decide the buffer size.
343 # The default for bufsize is 0, meaning unbuffered. This leads to
343 # The default for bufsize is 0, meaning unbuffered. This leads to
344 # poor performance on Mac OS X: http://bugs.python.org/issue4194
344 # poor performance on Mac OS X: http://bugs.python.org/issue4194
345 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
345 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
346 close_fds=closefds,
346 close_fds=closefds,
347 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
347 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
348 universal_newlines=newlines,
348 universal_newlines=newlines,
349 env=env)
349 env=env)
350 return p.stdin, p.stdout
350 return p.stdin, p.stdout
351
351
352 def popen3(cmd, env=None, newlines=False):
352 def popen3(cmd, env=None, newlines=False):
353 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
353 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
354 return stdin, stdout, stderr
354 return stdin, stdout, stderr
355
355
356 def popen4(cmd, env=None, newlines=False, bufsize=-1):
356 def popen4(cmd, env=None, newlines=False, bufsize=-1):
357 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
357 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
358 close_fds=closefds,
358 close_fds=closefds,
359 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
359 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
360 stderr=subprocess.PIPE,
360 stderr=subprocess.PIPE,
361 universal_newlines=newlines,
361 universal_newlines=newlines,
362 env=env)
362 env=env)
363 return p.stdin, p.stdout, p.stderr, p
363 return p.stdin, p.stdout, p.stderr, p
364
364
365 def version():
365 def version():
366 """Return version information if available."""
366 """Return version information if available."""
367 try:
367 try:
368 from . import __version__
368 from . import __version__
369 return __version__.version
369 return __version__.version
370 except ImportError:
370 except ImportError:
371 return 'unknown'
371 return 'unknown'
372
372
373 def versiontuple(v=None, n=4):
373 def versiontuple(v=None, n=4):
374 """Parses a Mercurial version string into an N-tuple.
374 """Parses a Mercurial version string into an N-tuple.
375
375
376 The version string to be parsed is specified with the ``v`` argument.
376 The version string to be parsed is specified with the ``v`` argument.
377 If it isn't defined, the current Mercurial version string will be parsed.
377 If it isn't defined, the current Mercurial version string will be parsed.
378
378
379 ``n`` can be 2, 3, or 4. Here is how some version strings map to
379 ``n`` can be 2, 3, or 4. Here is how some version strings map to
380 returned values:
380 returned values:
381
381
382 >>> v = '3.6.1+190-df9b73d2d444'
382 >>> v = '3.6.1+190-df9b73d2d444'
383 >>> versiontuple(v, 2)
383 >>> versiontuple(v, 2)
384 (3, 6)
384 (3, 6)
385 >>> versiontuple(v, 3)
385 >>> versiontuple(v, 3)
386 (3, 6, 1)
386 (3, 6, 1)
387 >>> versiontuple(v, 4)
387 >>> versiontuple(v, 4)
388 (3, 6, 1, '190-df9b73d2d444')
388 (3, 6, 1, '190-df9b73d2d444')
389
389
390 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
390 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
391 (3, 6, 1, '190-df9b73d2d444+20151118')
391 (3, 6, 1, '190-df9b73d2d444+20151118')
392
392
393 >>> v = '3.6'
393 >>> v = '3.6'
394 >>> versiontuple(v, 2)
394 >>> versiontuple(v, 2)
395 (3, 6)
395 (3, 6)
396 >>> versiontuple(v, 3)
396 >>> versiontuple(v, 3)
397 (3, 6, None)
397 (3, 6, None)
398 >>> versiontuple(v, 4)
398 >>> versiontuple(v, 4)
399 (3, 6, None, None)
399 (3, 6, None, None)
400
400
401 >>> v = '3.9-rc'
401 >>> v = '3.9-rc'
402 >>> versiontuple(v, 2)
402 >>> versiontuple(v, 2)
403 (3, 9)
403 (3, 9)
404 >>> versiontuple(v, 3)
404 >>> versiontuple(v, 3)
405 (3, 9, None)
405 (3, 9, None)
406 >>> versiontuple(v, 4)
406 >>> versiontuple(v, 4)
407 (3, 9, None, 'rc')
407 (3, 9, None, 'rc')
408
408
409 >>> v = '3.9-rc+2-02a8fea4289b'
409 >>> v = '3.9-rc+2-02a8fea4289b'
410 >>> versiontuple(v, 2)
410 >>> versiontuple(v, 2)
411 (3, 9)
411 (3, 9)
412 >>> versiontuple(v, 3)
412 >>> versiontuple(v, 3)
413 (3, 9, None)
413 (3, 9, None)
414 >>> versiontuple(v, 4)
414 >>> versiontuple(v, 4)
415 (3, 9, None, 'rc+2-02a8fea4289b')
415 (3, 9, None, 'rc+2-02a8fea4289b')
416 """
416 """
417 if not v:
417 if not v:
418 v = version()
418 v = version()
419 parts = remod.split('[\+-]', v, 1)
419 parts = remod.split('[\+-]', v, 1)
420 if len(parts) == 1:
420 if len(parts) == 1:
421 vparts, extra = parts[0], None
421 vparts, extra = parts[0], None
422 else:
422 else:
423 vparts, extra = parts
423 vparts, extra = parts
424
424
425 vints = []
425 vints = []
426 for i in vparts.split('.'):
426 for i in vparts.split('.'):
427 try:
427 try:
428 vints.append(int(i))
428 vints.append(int(i))
429 except ValueError:
429 except ValueError:
430 break
430 break
431 # (3, 6) -> (3, 6, None)
431 # (3, 6) -> (3, 6, None)
432 while len(vints) < 3:
432 while len(vints) < 3:
433 vints.append(None)
433 vints.append(None)
434
434
435 if n == 2:
435 if n == 2:
436 return (vints[0], vints[1])
436 return (vints[0], vints[1])
437 if n == 3:
437 if n == 3:
438 return (vints[0], vints[1], vints[2])
438 return (vints[0], vints[1], vints[2])
439 if n == 4:
439 if n == 4:
440 return (vints[0], vints[1], vints[2], extra)
440 return (vints[0], vints[1], vints[2], extra)
441
441
442 # used by parsedate
442 # used by parsedate
443 defaultdateformats = (
443 defaultdateformats = (
444 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
444 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
445 '%Y-%m-%dT%H:%M', # without seconds
445 '%Y-%m-%dT%H:%M', # without seconds
446 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
446 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
447 '%Y-%m-%dT%H%M', # without seconds
447 '%Y-%m-%dT%H%M', # without seconds
448 '%Y-%m-%d %H:%M:%S', # our common legal variant
448 '%Y-%m-%d %H:%M:%S', # our common legal variant
449 '%Y-%m-%d %H:%M', # without seconds
449 '%Y-%m-%d %H:%M', # without seconds
450 '%Y-%m-%d %H%M%S', # without :
450 '%Y-%m-%d %H%M%S', # without :
451 '%Y-%m-%d %H%M', # without seconds
451 '%Y-%m-%d %H%M', # without seconds
452 '%Y-%m-%d %I:%M:%S%p',
452 '%Y-%m-%d %I:%M:%S%p',
453 '%Y-%m-%d %H:%M',
453 '%Y-%m-%d %H:%M',
454 '%Y-%m-%d %I:%M%p',
454 '%Y-%m-%d %I:%M%p',
455 '%Y-%m-%d',
455 '%Y-%m-%d',
456 '%m-%d',
456 '%m-%d',
457 '%m/%d',
457 '%m/%d',
458 '%m/%d/%y',
458 '%m/%d/%y',
459 '%m/%d/%Y',
459 '%m/%d/%Y',
460 '%a %b %d %H:%M:%S %Y',
460 '%a %b %d %H:%M:%S %Y',
461 '%a %b %d %I:%M:%S%p %Y',
461 '%a %b %d %I:%M:%S%p %Y',
462 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
462 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
463 '%b %d %H:%M:%S %Y',
463 '%b %d %H:%M:%S %Y',
464 '%b %d %I:%M:%S%p %Y',
464 '%b %d %I:%M:%S%p %Y',
465 '%b %d %H:%M:%S',
465 '%b %d %H:%M:%S',
466 '%b %d %I:%M:%S%p',
466 '%b %d %I:%M:%S%p',
467 '%b %d %H:%M',
467 '%b %d %H:%M',
468 '%b %d %I:%M%p',
468 '%b %d %I:%M%p',
469 '%b %d %Y',
469 '%b %d %Y',
470 '%b %d',
470 '%b %d',
471 '%H:%M:%S',
471 '%H:%M:%S',
472 '%I:%M:%S%p',
472 '%I:%M:%S%p',
473 '%H:%M',
473 '%H:%M',
474 '%I:%M%p',
474 '%I:%M%p',
475 )
475 )
476
476
477 extendeddateformats = defaultdateformats + (
477 extendeddateformats = defaultdateformats + (
478 "%Y",
478 "%Y",
479 "%Y-%m",
479 "%Y-%m",
480 "%b",
480 "%b",
481 "%b %Y",
481 "%b %Y",
482 )
482 )
483
483
484 def cachefunc(func):
484 def cachefunc(func):
485 '''cache the result of function calls'''
485 '''cache the result of function calls'''
486 # XXX doesn't handle keywords args
486 # XXX doesn't handle keywords args
487 if func.__code__.co_argcount == 0:
487 if func.__code__.co_argcount == 0:
488 cache = []
488 cache = []
489 def f():
489 def f():
490 if len(cache) == 0:
490 if len(cache) == 0:
491 cache.append(func())
491 cache.append(func())
492 return cache[0]
492 return cache[0]
493 return f
493 return f
494 cache = {}
494 cache = {}
495 if func.__code__.co_argcount == 1:
495 if func.__code__.co_argcount == 1:
496 # we gain a small amount of time because
496 # we gain a small amount of time because
497 # we don't need to pack/unpack the list
497 # we don't need to pack/unpack the list
498 def f(arg):
498 def f(arg):
499 if arg not in cache:
499 if arg not in cache:
500 cache[arg] = func(arg)
500 cache[arg] = func(arg)
501 return cache[arg]
501 return cache[arg]
502 else:
502 else:
503 def f(*args):
503 def f(*args):
504 if args not in cache:
504 if args not in cache:
505 cache[args] = func(*args)
505 cache[args] = func(*args)
506 return cache[args]
506 return cache[args]
507
507
508 return f
508 return f
509
509
510 class sortdict(dict):
510 class sortdict(dict):
511 '''a simple sorted dictionary'''
511 '''a simple sorted dictionary'''
512 def __init__(self, data=None):
512 def __init__(self, data=None):
513 self._list = []
513 self._list = []
514 if data:
514 if data:
515 self.update(data)
515 self.update(data)
516 def copy(self):
516 def copy(self):
517 return sortdict(self)
517 return sortdict(self)
518 def __setitem__(self, key, val):
518 def __setitem__(self, key, val):
519 if key in self:
519 if key in self:
520 self._list.remove(key)
520 self._list.remove(key)
521 self._list.append(key)
521 self._list.append(key)
522 dict.__setitem__(self, key, val)
522 dict.__setitem__(self, key, val)
523 def __iter__(self):
523 def __iter__(self):
524 return self._list.__iter__()
524 return self._list.__iter__()
525 def update(self, src):
525 def update(self, src):
526 if isinstance(src, dict):
526 if isinstance(src, dict):
527 src = src.iteritems()
527 src = src.iteritems()
528 for k, v in src:
528 for k, v in src:
529 self[k] = v
529 self[k] = v
530 def clear(self):
530 def clear(self):
531 dict.clear(self)
531 dict.clear(self)
532 self._list = []
532 self._list = []
533 def items(self):
533 def items(self):
534 return [(k, self[k]) for k in self._list]
534 return [(k, self[k]) for k in self._list]
535 def __delitem__(self, key):
535 def __delitem__(self, key):
536 dict.__delitem__(self, key)
536 dict.__delitem__(self, key)
537 self._list.remove(key)
537 self._list.remove(key)
538 def pop(self, key, *args, **kwargs):
538 def pop(self, key, *args, **kwargs):
539 dict.pop(self, key, *args, **kwargs)
539 dict.pop(self, key, *args, **kwargs)
540 try:
540 try:
541 self._list.remove(key)
541 self._list.remove(key)
542 except ValueError:
542 except ValueError:
543 pass
543 pass
544 def keys(self):
544 def keys(self):
545 return self._list
545 return self._list
546 def iterkeys(self):
546 def iterkeys(self):
547 return self._list.__iter__()
547 return self._list.__iter__()
548 def iteritems(self):
548 def iteritems(self):
549 for k in self._list:
549 for k in self._list:
550 yield k, self[k]
550 yield k, self[k]
551 def insert(self, index, key, val):
551 def insert(self, index, key, val):
552 self._list.insert(index, key)
552 self._list.insert(index, key)
553 dict.__setitem__(self, key, val)
553 dict.__setitem__(self, key, val)
554 def __repr__(self):
554 def __repr__(self):
555 if not self:
555 if not self:
556 return '%s()' % self.__class__.__name__
556 return '%s()' % self.__class__.__name__
557 return '%s(%r)' % (self.__class__.__name__, self.items())
557 return '%s(%r)' % (self.__class__.__name__, self.items())
558
558
559 class _lrucachenode(object):
559 class _lrucachenode(object):
560 """A node in a doubly linked list.
560 """A node in a doubly linked list.
561
561
562 Holds a reference to nodes on either side as well as a key-value
562 Holds a reference to nodes on either side as well as a key-value
563 pair for the dictionary entry.
563 pair for the dictionary entry.
564 """
564 """
565 __slots__ = (u'next', u'prev', u'key', u'value')
565 __slots__ = (u'next', u'prev', u'key', u'value')
566
566
567 def __init__(self):
567 def __init__(self):
568 self.next = None
568 self.next = None
569 self.prev = None
569 self.prev = None
570
570
571 self.key = _notset
571 self.key = _notset
572 self.value = None
572 self.value = None
573
573
574 def markempty(self):
574 def markempty(self):
575 """Mark the node as emptied."""
575 """Mark the node as emptied."""
576 self.key = _notset
576 self.key = _notset
577
577
578 class lrucachedict(object):
578 class lrucachedict(object):
579 """Dict that caches most recent accesses and sets.
579 """Dict that caches most recent accesses and sets.
580
580
581 The dict consists of an actual backing dict - indexed by original
581 The dict consists of an actual backing dict - indexed by original
582 key - and a doubly linked circular list defining the order of entries in
582 key - and a doubly linked circular list defining the order of entries in
583 the cache.
583 the cache.
584
584
585 The head node is the newest entry in the cache. If the cache is full,
585 The head node is the newest entry in the cache. If the cache is full,
586 we recycle head.prev and make it the new head. Cache accesses result in
586 we recycle head.prev and make it the new head. Cache accesses result in
587 the node being moved to before the existing head and being marked as the
587 the node being moved to before the existing head and being marked as the
588 new head node.
588 new head node.
589 """
589 """
590 def __init__(self, max):
590 def __init__(self, max):
591 self._cache = {}
591 self._cache = {}
592
592
593 self._head = head = _lrucachenode()
593 self._head = head = _lrucachenode()
594 head.prev = head
594 head.prev = head
595 head.next = head
595 head.next = head
596 self._size = 1
596 self._size = 1
597 self._capacity = max
597 self._capacity = max
598
598
599 def __len__(self):
599 def __len__(self):
600 return len(self._cache)
600 return len(self._cache)
601
601
602 def __contains__(self, k):
602 def __contains__(self, k):
603 return k in self._cache
603 return k in self._cache
604
604
605 def __iter__(self):
605 def __iter__(self):
606 # We don't have to iterate in cache order, but why not.
606 # We don't have to iterate in cache order, but why not.
607 n = self._head
607 n = self._head
608 for i in range(len(self._cache)):
608 for i in range(len(self._cache)):
609 yield n.key
609 yield n.key
610 n = n.next
610 n = n.next
611
611
612 def __getitem__(self, k):
612 def __getitem__(self, k):
613 node = self._cache[k]
613 node = self._cache[k]
614 self._movetohead(node)
614 self._movetohead(node)
615 return node.value
615 return node.value
616
616
617 def __setitem__(self, k, v):
617 def __setitem__(self, k, v):
618 node = self._cache.get(k)
618 node = self._cache.get(k)
619 # Replace existing value and mark as newest.
619 # Replace existing value and mark as newest.
620 if node is not None:
620 if node is not None:
621 node.value = v
621 node.value = v
622 self._movetohead(node)
622 self._movetohead(node)
623 return
623 return
624
624
625 if self._size < self._capacity:
625 if self._size < self._capacity:
626 node = self._addcapacity()
626 node = self._addcapacity()
627 else:
627 else:
628 # Grab the last/oldest item.
628 # Grab the last/oldest item.
629 node = self._head.prev
629 node = self._head.prev
630
630
631 # At capacity. Kill the old entry.
631 # At capacity. Kill the old entry.
632 if node.key is not _notset:
632 if node.key is not _notset:
633 del self._cache[node.key]
633 del self._cache[node.key]
634
634
635 node.key = k
635 node.key = k
636 node.value = v
636 node.value = v
637 self._cache[k] = node
637 self._cache[k] = node
638 # And mark it as newest entry. No need to adjust order since it
638 # And mark it as newest entry. No need to adjust order since it
639 # is already self._head.prev.
639 # is already self._head.prev.
640 self._head = node
640 self._head = node
641
641
642 def __delitem__(self, k):
642 def __delitem__(self, k):
643 node = self._cache.pop(k)
643 node = self._cache.pop(k)
644 node.markempty()
644 node.markempty()
645
645
646 # Temporarily mark as newest item before re-adjusting head to make
646 # Temporarily mark as newest item before re-adjusting head to make
647 # this node the oldest item.
647 # this node the oldest item.
648 self._movetohead(node)
648 self._movetohead(node)
649 self._head = node.next
649 self._head = node.next
650
650
651 # Additional dict methods.
651 # Additional dict methods.
652
652
653 def get(self, k, default=None):
653 def get(self, k, default=None):
654 try:
654 try:
655 return self._cache[k].value
655 return self._cache[k].value
656 except KeyError:
656 except KeyError:
657 return default
657 return default
658
658
659 def clear(self):
659 def clear(self):
660 n = self._head
660 n = self._head
661 while n.key is not _notset:
661 while n.key is not _notset:
662 n.markempty()
662 n.markempty()
663 n = n.next
663 n = n.next
664
664
665 self._cache.clear()
665 self._cache.clear()
666
666
667 def copy(self):
667 def copy(self):
668 result = lrucachedict(self._capacity)
668 result = lrucachedict(self._capacity)
669 n = self._head.prev
669 n = self._head.prev
670 # Iterate in oldest-to-newest order, so the copy has the right ordering
670 # Iterate in oldest-to-newest order, so the copy has the right ordering
671 for i in range(len(self._cache)):
671 for i in range(len(self._cache)):
672 result[n.key] = n.value
672 result[n.key] = n.value
673 n = n.prev
673 n = n.prev
674 return result
674 return result
675
675
676 def _movetohead(self, node):
676 def _movetohead(self, node):
677 """Mark a node as the newest, making it the new head.
677 """Mark a node as the newest, making it the new head.
678
678
679 When a node is accessed, it becomes the freshest entry in the LRU
679 When a node is accessed, it becomes the freshest entry in the LRU
680 list, which is denoted by self._head.
680 list, which is denoted by self._head.
681
681
682 Visually, let's make ``N`` the new head node (* denotes head):
682 Visually, let's make ``N`` the new head node (* denotes head):
683
683
684 previous/oldest <-> head <-> next/next newest
684 previous/oldest <-> head <-> next/next newest
685
685
686 ----<->--- A* ---<->-----
686 ----<->--- A* ---<->-----
687 | |
687 | |
688 E <-> D <-> N <-> C <-> B
688 E <-> D <-> N <-> C <-> B
689
689
690 To:
690 To:
691
691
692 ----<->--- N* ---<->-----
692 ----<->--- N* ---<->-----
693 | |
693 | |
694 E <-> D <-> C <-> B <-> A
694 E <-> D <-> C <-> B <-> A
695
695
696 This requires the following moves:
696 This requires the following moves:
697
697
698 C.next = D (node.prev.next = node.next)
698 C.next = D (node.prev.next = node.next)
699 D.prev = C (node.next.prev = node.prev)
699 D.prev = C (node.next.prev = node.prev)
700 E.next = N (head.prev.next = node)
700 E.next = N (head.prev.next = node)
701 N.prev = E (node.prev = head.prev)
701 N.prev = E (node.prev = head.prev)
702 N.next = A (node.next = head)
702 N.next = A (node.next = head)
703 A.prev = N (head.prev = node)
703 A.prev = N (head.prev = node)
704 """
704 """
705 head = self._head
705 head = self._head
706 # C.next = D
706 # C.next = D
707 node.prev.next = node.next
707 node.prev.next = node.next
708 # D.prev = C
708 # D.prev = C
709 node.next.prev = node.prev
709 node.next.prev = node.prev
710 # N.prev = E
710 # N.prev = E
711 node.prev = head.prev
711 node.prev = head.prev
712 # N.next = A
712 # N.next = A
713 # It is tempting to do just "head" here, however if node is
713 # It is tempting to do just "head" here, however if node is
714 # adjacent to head, this will do bad things.
714 # adjacent to head, this will do bad things.
715 node.next = head.prev.next
715 node.next = head.prev.next
716 # E.next = N
716 # E.next = N
717 node.next.prev = node
717 node.next.prev = node
718 # A.prev = N
718 # A.prev = N
719 node.prev.next = node
719 node.prev.next = node
720
720
721 self._head = node
721 self._head = node
722
722
723 def _addcapacity(self):
723 def _addcapacity(self):
724 """Add a node to the circular linked list.
724 """Add a node to the circular linked list.
725
725
726 The new node is inserted before the head node.
726 The new node is inserted before the head node.
727 """
727 """
728 head = self._head
728 head = self._head
729 node = _lrucachenode()
729 node = _lrucachenode()
730 head.prev.next = node
730 head.prev.next = node
731 node.prev = head.prev
731 node.prev = head.prev
732 node.next = head
732 node.next = head
733 head.prev = node
733 head.prev = node
734 self._size += 1
734 self._size += 1
735 return node
735 return node
736
736
737 def lrucachefunc(func):
737 def lrucachefunc(func):
738 '''cache most recent results of function calls'''
738 '''cache most recent results of function calls'''
739 cache = {}
739 cache = {}
740 order = collections.deque()
740 order = collections.deque()
741 if func.__code__.co_argcount == 1:
741 if func.__code__.co_argcount == 1:
742 def f(arg):
742 def f(arg):
743 if arg not in cache:
743 if arg not in cache:
744 if len(cache) > 20:
744 if len(cache) > 20:
745 del cache[order.popleft()]
745 del cache[order.popleft()]
746 cache[arg] = func(arg)
746 cache[arg] = func(arg)
747 else:
747 else:
748 order.remove(arg)
748 order.remove(arg)
749 order.append(arg)
749 order.append(arg)
750 return cache[arg]
750 return cache[arg]
751 else:
751 else:
752 def f(*args):
752 def f(*args):
753 if args not in cache:
753 if args not in cache:
754 if len(cache) > 20:
754 if len(cache) > 20:
755 del cache[order.popleft()]
755 del cache[order.popleft()]
756 cache[args] = func(*args)
756 cache[args] = func(*args)
757 else:
757 else:
758 order.remove(args)
758 order.remove(args)
759 order.append(args)
759 order.append(args)
760 return cache[args]
760 return cache[args]
761
761
762 return f
762 return f
763
763
764 class propertycache(object):
764 class propertycache(object):
765 def __init__(self, func):
765 def __init__(self, func):
766 self.func = func
766 self.func = func
767 self.name = func.__name__
767 self.name = func.__name__
768 def __get__(self, obj, type=None):
768 def __get__(self, obj, type=None):
769 result = self.func(obj)
769 result = self.func(obj)
770 self.cachevalue(obj, result)
770 self.cachevalue(obj, result)
771 return result
771 return result
772
772
773 def cachevalue(self, obj, value):
773 def cachevalue(self, obj, value):
774 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
774 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
775 obj.__dict__[self.name] = value
775 obj.__dict__[self.name] = value
776
776
777 def pipefilter(s, cmd):
777 def pipefilter(s, cmd):
778 '''filter string S through command CMD, returning its output'''
778 '''filter string S through command CMD, returning its output'''
779 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
779 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
780 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
780 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
781 pout, perr = p.communicate(s)
781 pout, perr = p.communicate(s)
782 return pout
782 return pout
783
783
784 def tempfilter(s, cmd):
784 def tempfilter(s, cmd):
785 '''filter string S through a pair of temporary files with CMD.
785 '''filter string S through a pair of temporary files with CMD.
786 CMD is used as a template to create the real command to be run,
786 CMD is used as a template to create the real command to be run,
787 with the strings INFILE and OUTFILE replaced by the real names of
787 with the strings INFILE and OUTFILE replaced by the real names of
788 the temporary files generated.'''
788 the temporary files generated.'''
789 inname, outname = None, None
789 inname, outname = None, None
790 try:
790 try:
791 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
791 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
792 fp = os.fdopen(infd, 'wb')
792 fp = os.fdopen(infd, 'wb')
793 fp.write(s)
793 fp.write(s)
794 fp.close()
794 fp.close()
795 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
795 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
796 os.close(outfd)
796 os.close(outfd)
797 cmd = cmd.replace('INFILE', inname)
797 cmd = cmd.replace('INFILE', inname)
798 cmd = cmd.replace('OUTFILE', outname)
798 cmd = cmd.replace('OUTFILE', outname)
799 code = os.system(cmd)
799 code = os.system(cmd)
800 if sys.platform == 'OpenVMS' and code & 1:
800 if sys.platform == 'OpenVMS' and code & 1:
801 code = 0
801 code = 0
802 if code:
802 if code:
803 raise Abort(_("command '%s' failed: %s") %
803 raise Abort(_("command '%s' failed: %s") %
804 (cmd, explainexit(code)))
804 (cmd, explainexit(code)))
805 return readfile(outname)
805 return readfile(outname)
806 finally:
806 finally:
807 try:
807 try:
808 if inname:
808 if inname:
809 os.unlink(inname)
809 os.unlink(inname)
810 except OSError:
810 except OSError:
811 pass
811 pass
812 try:
812 try:
813 if outname:
813 if outname:
814 os.unlink(outname)
814 os.unlink(outname)
815 except OSError:
815 except OSError:
816 pass
816 pass
817
817
818 filtertable = {
818 filtertable = {
819 'tempfile:': tempfilter,
819 'tempfile:': tempfilter,
820 'pipe:': pipefilter,
820 'pipe:': pipefilter,
821 }
821 }
822
822
823 def filter(s, cmd):
823 def filter(s, cmd):
824 "filter a string through a command that transforms its input to its output"
824 "filter a string through a command that transforms its input to its output"
825 for name, fn in filtertable.iteritems():
825 for name, fn in filtertable.iteritems():
826 if cmd.startswith(name):
826 if cmd.startswith(name):
827 return fn(s, cmd[len(name):].lstrip())
827 return fn(s, cmd[len(name):].lstrip())
828 return pipefilter(s, cmd)
828 return pipefilter(s, cmd)
829
829
830 def binary(s):
830 def binary(s):
831 """return true if a string is binary data"""
831 """return true if a string is binary data"""
832 return bool(s and '\0' in s)
832 return bool(s and '\0' in s)
833
833
834 def increasingchunks(source, min=1024, max=65536):
834 def increasingchunks(source, min=1024, max=65536):
835 '''return no less than min bytes per chunk while data remains,
835 '''return no less than min bytes per chunk while data remains,
836 doubling min after each chunk until it reaches max'''
836 doubling min after each chunk until it reaches max'''
837 def log2(x):
837 def log2(x):
838 if not x:
838 if not x:
839 return 0
839 return 0
840 i = 0
840 i = 0
841 while x:
841 while x:
842 x >>= 1
842 x >>= 1
843 i += 1
843 i += 1
844 return i - 1
844 return i - 1
845
845
846 buf = []
846 buf = []
847 blen = 0
847 blen = 0
848 for chunk in source:
848 for chunk in source:
849 buf.append(chunk)
849 buf.append(chunk)
850 blen += len(chunk)
850 blen += len(chunk)
851 if blen >= min:
851 if blen >= min:
852 if min < max:
852 if min < max:
853 min = min << 1
853 min = min << 1
854 nmin = 1 << log2(blen)
854 nmin = 1 << log2(blen)
855 if nmin > min:
855 if nmin > min:
856 min = nmin
856 min = nmin
857 if min > max:
857 if min > max:
858 min = max
858 min = max
859 yield ''.join(buf)
859 yield ''.join(buf)
860 blen = 0
860 blen = 0
861 buf = []
861 buf = []
862 if buf:
862 if buf:
863 yield ''.join(buf)
863 yield ''.join(buf)
864
864
865 Abort = error.Abort
865 Abort = error.Abort
866
866
867 def always(fn):
867 def always(fn):
868 return True
868 return True
869
869
870 def never(fn):
870 def never(fn):
871 return False
871 return False
872
872
873 def nogc(func):
873 def nogc(func):
874 """disable garbage collector
874 """disable garbage collector
875
875
876 Python's garbage collector triggers a GC each time a certain number of
876 Python's garbage collector triggers a GC each time a certain number of
877 container objects (the number being defined by gc.get_threshold()) are
877 container objects (the number being defined by gc.get_threshold()) are
878 allocated even when marked not to be tracked by the collector. Tracking has
878 allocated even when marked not to be tracked by the collector. Tracking has
879 no effect on when GCs are triggered, only on what objects the GC looks
879 no effect on when GCs are triggered, only on what objects the GC looks
880 into. As a workaround, disable GC while building complex (huge)
880 into. As a workaround, disable GC while building complex (huge)
881 containers.
881 containers.
882
882
883 This garbage collector issue have been fixed in 2.7.
883 This garbage collector issue have been fixed in 2.7.
884 """
884 """
885 if sys.version_info >= (2, 7):
885 if sys.version_info >= (2, 7):
886 return func
886 return func
887 def wrapper(*args, **kwargs):
887 def wrapper(*args, **kwargs):
888 gcenabled = gc.isenabled()
888 gcenabled = gc.isenabled()
889 gc.disable()
889 gc.disable()
890 try:
890 try:
891 return func(*args, **kwargs)
891 return func(*args, **kwargs)
892 finally:
892 finally:
893 if gcenabled:
893 if gcenabled:
894 gc.enable()
894 gc.enable()
895 return wrapper
895 return wrapper
896
896
897 def pathto(root, n1, n2):
897 def pathto(root, n1, n2):
898 '''return the relative path from one place to another.
898 '''return the relative path from one place to another.
899 root should use os.sep to separate directories
899 root should use os.sep to separate directories
900 n1 should use os.sep to separate directories
900 n1 should use os.sep to separate directories
901 n2 should use "/" to separate directories
901 n2 should use "/" to separate directories
902 returns an os.sep-separated path.
902 returns an os.sep-separated path.
903
903
904 If n1 is a relative path, it's assumed it's
904 If n1 is a relative path, it's assumed it's
905 relative to root.
905 relative to root.
906 n2 should always be relative to root.
906 n2 should always be relative to root.
907 '''
907 '''
908 if not n1:
908 if not n1:
909 return localpath(n2)
909 return localpath(n2)
910 if os.path.isabs(n1):
910 if os.path.isabs(n1):
911 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
911 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
912 return os.path.join(root, localpath(n2))
912 return os.path.join(root, localpath(n2))
913 n2 = '/'.join((pconvert(root), n2))
913 n2 = '/'.join((pconvert(root), n2))
914 a, b = splitpath(n1), n2.split('/')
914 a, b = splitpath(n1), n2.split('/')
915 a.reverse()
915 a.reverse()
916 b.reverse()
916 b.reverse()
917 while a and b and a[-1] == b[-1]:
917 while a and b and a[-1] == b[-1]:
918 a.pop()
918 a.pop()
919 b.pop()
919 b.pop()
920 b.reverse()
920 b.reverse()
921 return os.sep.join((['..'] * len(a)) + b) or '.'
921 return os.sep.join((['..'] * len(a)) + b) or '.'
922
922
923 def mainfrozen():
923 def mainfrozen():
924 """return True if we are a frozen executable.
924 """return True if we are a frozen executable.
925
925
926 The code supports py2exe (most common, Windows only) and tools/freeze
926 The code supports py2exe (most common, Windows only) and tools/freeze
927 (portable, not much used).
927 (portable, not much used).
928 """
928 """
929 return (safehasattr(sys, "frozen") or # new py2exe
929 return (safehasattr(sys, "frozen") or # new py2exe
930 safehasattr(sys, "importers") or # old py2exe
930 safehasattr(sys, "importers") or # old py2exe
931 imp.is_frozen(u"__main__")) # tools/freeze
931 imp.is_frozen(u"__main__")) # tools/freeze
932
932
933 # the location of data files matching the source code
933 # the location of data files matching the source code
934 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
934 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
935 # executable version (py2exe) doesn't support __file__
935 # executable version (py2exe) doesn't support __file__
936 datapath = os.path.dirname(sys.executable)
936 datapath = os.path.dirname(sys.executable)
937 else:
937 else:
938 datapath = os.path.dirname(__file__)
938 datapath = os.path.dirname(__file__)
939
939
940 if not isinstance(datapath, bytes):
940 if not isinstance(datapath, bytes):
941 datapath = pycompat.fsencode(datapath)
941 datapath = pycompat.fsencode(datapath)
942
942
943 i18n.setdatapath(datapath)
943 i18n.setdatapath(datapath)
944
944
945 _hgexecutable = None
945 _hgexecutable = None
946
946
947 def hgexecutable():
947 def hgexecutable():
948 """return location of the 'hg' executable.
948 """return location of the 'hg' executable.
949
949
950 Defaults to $HG or 'hg' in the search path.
950 Defaults to $HG or 'hg' in the search path.
951 """
951 """
952 if _hgexecutable is None:
952 if _hgexecutable is None:
953 hg = os.environ.get('HG')
953 hg = os.environ.get('HG')
954 mainmod = sys.modules['__main__']
954 mainmod = sys.modules['__main__']
955 if hg:
955 if hg:
956 _sethgexecutable(hg)
956 _sethgexecutable(hg)
957 elif mainfrozen():
957 elif mainfrozen():
958 if getattr(sys, 'frozen', None) == 'macosx_app':
958 if getattr(sys, 'frozen', None) == 'macosx_app':
959 # Env variable set by py2app
959 # Env variable set by py2app
960 _sethgexecutable(os.environ['EXECUTABLEPATH'])
960 _sethgexecutable(os.environ['EXECUTABLEPATH'])
961 else:
961 else:
962 _sethgexecutable(sys.executable)
962 _sethgexecutable(sys.executable)
963 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
963 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
964 _sethgexecutable(mainmod.__file__)
964 _sethgexecutable(mainmod.__file__)
965 else:
965 else:
966 exe = findexe('hg') or os.path.basename(sys.argv[0])
966 exe = findexe('hg') or os.path.basename(sys.argv[0])
967 _sethgexecutable(exe)
967 _sethgexecutable(exe)
968 return _hgexecutable
968 return _hgexecutable
969
969
970 def _sethgexecutable(path):
970 def _sethgexecutable(path):
971 """set location of the 'hg' executable"""
971 """set location of the 'hg' executable"""
972 global _hgexecutable
972 global _hgexecutable
973 _hgexecutable = path
973 _hgexecutable = path
974
974
975 def _isstdout(f):
975 def _isstdout(f):
976 fileno = getattr(f, 'fileno', None)
976 fileno = getattr(f, 'fileno', None)
977 return fileno and fileno() == sys.__stdout__.fileno()
977 return fileno and fileno() == sys.__stdout__.fileno()
978
978
979 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
979 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
980 '''enhanced shell command execution.
980 '''enhanced shell command execution.
981 run with environment maybe modified, maybe in different dir.
981 run with environment maybe modified, maybe in different dir.
982
982
983 if command fails and onerr is None, return status, else raise onerr
983 if command fails and onerr is None, return status, else raise onerr
984 object as exception.
984 object as exception.
985
985
986 if out is specified, it is assumed to be a file-like object that has a
986 if out is specified, it is assumed to be a file-like object that has a
987 write() method. stdout and stderr will be redirected to out.'''
987 write() method. stdout and stderr will be redirected to out.'''
988 if environ is None:
988 if environ is None:
989 environ = {}
989 environ = {}
990 try:
990 try:
991 sys.stdout.flush()
991 sys.stdout.flush()
992 except Exception:
992 except Exception:
993 pass
993 pass
994 def py2shell(val):
994 def py2shell(val):
995 'convert python object into string that is useful to shell'
995 'convert python object into string that is useful to shell'
996 if val is None or val is False:
996 if val is None or val is False:
997 return '0'
997 return '0'
998 if val is True:
998 if val is True:
999 return '1'
999 return '1'
1000 return str(val)
1000 return str(val)
1001 origcmd = cmd
1001 origcmd = cmd
1002 cmd = quotecommand(cmd)
1002 cmd = quotecommand(cmd)
1003 if sys.platform == 'plan9' and (sys.version_info[0] == 2
1003 if sys.platform == 'plan9' and (sys.version_info[0] == 2
1004 and sys.version_info[1] < 7):
1004 and sys.version_info[1] < 7):
1005 # subprocess kludge to work around issues in half-baked Python
1005 # subprocess kludge to work around issues in half-baked Python
1006 # ports, notably bichued/python:
1006 # ports, notably bichued/python:
1007 if not cwd is None:
1007 if not cwd is None:
1008 os.chdir(cwd)
1008 os.chdir(cwd)
1009 rc = os.system(cmd)
1009 rc = os.system(cmd)
1010 else:
1010 else:
1011 env = dict(os.environ)
1011 env = dict(os.environ)
1012 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1012 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1013 env['HG'] = hgexecutable()
1013 env['HG'] = hgexecutable()
1014 if out is None or _isstdout(out):
1014 if out is None or _isstdout(out):
1015 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1015 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1016 env=env, cwd=cwd)
1016 env=env, cwd=cwd)
1017 else:
1017 else:
1018 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1018 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1019 env=env, cwd=cwd, stdout=subprocess.PIPE,
1019 env=env, cwd=cwd, stdout=subprocess.PIPE,
1020 stderr=subprocess.STDOUT)
1020 stderr=subprocess.STDOUT)
1021 for line in iter(proc.stdout.readline, ''):
1021 for line in iter(proc.stdout.readline, ''):
1022 out.write(line)
1022 out.write(line)
1023 proc.wait()
1023 proc.wait()
1024 rc = proc.returncode
1024 rc = proc.returncode
1025 if sys.platform == 'OpenVMS' and rc & 1:
1025 if sys.platform == 'OpenVMS' and rc & 1:
1026 rc = 0
1026 rc = 0
1027 if rc and onerr:
1027 if rc and onerr:
1028 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1028 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1029 explainexit(rc)[0])
1029 explainexit(rc)[0])
1030 if errprefix:
1030 if errprefix:
1031 errmsg = '%s: %s' % (errprefix, errmsg)
1031 errmsg = '%s: %s' % (errprefix, errmsg)
1032 raise onerr(errmsg)
1032 raise onerr(errmsg)
1033 return rc
1033 return rc
1034
1034
1035 def checksignature(func):
1035 def checksignature(func):
1036 '''wrap a function with code to check for calling errors'''
1036 '''wrap a function with code to check for calling errors'''
1037 def check(*args, **kwargs):
1037 def check(*args, **kwargs):
1038 try:
1038 try:
1039 return func(*args, **kwargs)
1039 return func(*args, **kwargs)
1040 except TypeError:
1040 except TypeError:
1041 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1041 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1042 raise error.SignatureError
1042 raise error.SignatureError
1043 raise
1043 raise
1044
1044
1045 return check
1045 return check
1046
1046
1047 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1047 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1048 '''copy a file, preserving mode and optionally other stat info like
1048 '''copy a file, preserving mode and optionally other stat info like
1049 atime/mtime
1049 atime/mtime
1050
1050
1051 checkambig argument is used with filestat, and is useful only if
1051 checkambig argument is used with filestat, and is useful only if
1052 destination file is guarded by any lock (e.g. repo.lock or
1052 destination file is guarded by any lock (e.g. repo.lock or
1053 repo.wlock).
1053 repo.wlock).
1054
1054
1055 copystat and checkambig should be exclusive.
1055 copystat and checkambig should be exclusive.
1056 '''
1056 '''
1057 assert not (copystat and checkambig)
1057 assert not (copystat and checkambig)
1058 oldstat = None
1058 oldstat = None
1059 if os.path.lexists(dest):
1059 if os.path.lexists(dest):
1060 if checkambig:
1060 if checkambig:
1061 oldstat = checkambig and filestat(dest)
1061 oldstat = checkambig and filestat(dest)
1062 unlink(dest)
1062 unlink(dest)
1063 # hardlinks are problematic on CIFS, quietly ignore this flag
1063 # hardlinks are problematic on CIFS, quietly ignore this flag
1064 # until we find a way to work around it cleanly (issue4546)
1064 # until we find a way to work around it cleanly (issue4546)
1065 if False and hardlink:
1065 if False and hardlink:
1066 try:
1066 try:
1067 oslink(src, dest)
1067 oslink(src, dest)
1068 return
1068 return
1069 except (IOError, OSError):
1069 except (IOError, OSError):
1070 pass # fall back to normal copy
1070 pass # fall back to normal copy
1071 if os.path.islink(src):
1071 if os.path.islink(src):
1072 os.symlink(os.readlink(src), dest)
1072 os.symlink(os.readlink(src), dest)
1073 # copytime is ignored for symlinks, but in general copytime isn't needed
1073 # copytime is ignored for symlinks, but in general copytime isn't needed
1074 # for them anyway
1074 # for them anyway
1075 else:
1075 else:
1076 try:
1076 try:
1077 shutil.copyfile(src, dest)
1077 shutil.copyfile(src, dest)
1078 if copystat:
1078 if copystat:
1079 # copystat also copies mode
1079 # copystat also copies mode
1080 shutil.copystat(src, dest)
1080 shutil.copystat(src, dest)
1081 else:
1081 else:
1082 shutil.copymode(src, dest)
1082 shutil.copymode(src, dest)
1083 if oldstat and oldstat.stat:
1083 if oldstat and oldstat.stat:
1084 newstat = filestat(dest)
1084 newstat = filestat(dest)
1085 if newstat.isambig(oldstat):
1085 if newstat.isambig(oldstat):
1086 # stat of copied file is ambiguous to original one
1086 # stat of copied file is ambiguous to original one
1087 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1087 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1088 os.utime(dest, (advanced, advanced))
1088 os.utime(dest, (advanced, advanced))
1089 except shutil.Error as inst:
1089 except shutil.Error as inst:
1090 raise Abort(str(inst))
1090 raise Abort(str(inst))
1091
1091
1092 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1092 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1093 """Copy a directory tree using hardlinks if possible."""
1093 """Copy a directory tree using hardlinks if possible."""
1094 num = 0
1094 num = 0
1095
1095
1096 if hardlink is None:
1096 if hardlink is None:
1097 hardlink = (os.stat(src).st_dev ==
1097 hardlink = (os.stat(src).st_dev ==
1098 os.stat(os.path.dirname(dst)).st_dev)
1098 os.stat(os.path.dirname(dst)).st_dev)
1099 if hardlink:
1099 if hardlink:
1100 topic = _('linking')
1100 topic = _('linking')
1101 else:
1101 else:
1102 topic = _('copying')
1102 topic = _('copying')
1103
1103
1104 if os.path.isdir(src):
1104 if os.path.isdir(src):
1105 os.mkdir(dst)
1105 os.mkdir(dst)
1106 for name, kind in osutil.listdir(src):
1106 for name, kind in osutil.listdir(src):
1107 srcname = os.path.join(src, name)
1107 srcname = os.path.join(src, name)
1108 dstname = os.path.join(dst, name)
1108 dstname = os.path.join(dst, name)
1109 def nprog(t, pos):
1109 def nprog(t, pos):
1110 if pos is not None:
1110 if pos is not None:
1111 return progress(t, pos + num)
1111 return progress(t, pos + num)
1112 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1112 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1113 num += n
1113 num += n
1114 else:
1114 else:
1115 if hardlink:
1115 if hardlink:
1116 try:
1116 try:
1117 oslink(src, dst)
1117 oslink(src, dst)
1118 except (IOError, OSError):
1118 except (IOError, OSError):
1119 hardlink = False
1119 hardlink = False
1120 shutil.copy(src, dst)
1120 shutil.copy(src, dst)
1121 else:
1121 else:
1122 shutil.copy(src, dst)
1122 shutil.copy(src, dst)
1123 num += 1
1123 num += 1
1124 progress(topic, num)
1124 progress(topic, num)
1125 progress(topic, None)
1125 progress(topic, None)
1126
1126
1127 return hardlink, num
1127 return hardlink, num
1128
1128
1129 _winreservednames = '''con prn aux nul
1129 _winreservednames = '''con prn aux nul
1130 com1 com2 com3 com4 com5 com6 com7 com8 com9
1130 com1 com2 com3 com4 com5 com6 com7 com8 com9
1131 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1131 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1132 _winreservedchars = ':*?"<>|'
1132 _winreservedchars = ':*?"<>|'
1133 def checkwinfilename(path):
1133 def checkwinfilename(path):
1134 r'''Check that the base-relative path is a valid filename on Windows.
1134 r'''Check that the base-relative path is a valid filename on Windows.
1135 Returns None if the path is ok, or a UI string describing the problem.
1135 Returns None if the path is ok, or a UI string describing the problem.
1136
1136
1137 >>> checkwinfilename("just/a/normal/path")
1137 >>> checkwinfilename("just/a/normal/path")
1138 >>> checkwinfilename("foo/bar/con.xml")
1138 >>> checkwinfilename("foo/bar/con.xml")
1139 "filename contains 'con', which is reserved on Windows"
1139 "filename contains 'con', which is reserved on Windows"
1140 >>> checkwinfilename("foo/con.xml/bar")
1140 >>> checkwinfilename("foo/con.xml/bar")
1141 "filename contains 'con', which is reserved on Windows"
1141 "filename contains 'con', which is reserved on Windows"
1142 >>> checkwinfilename("foo/bar/xml.con")
1142 >>> checkwinfilename("foo/bar/xml.con")
1143 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1143 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1144 "filename contains 'AUX', which is reserved on Windows"
1144 "filename contains 'AUX', which is reserved on Windows"
1145 >>> checkwinfilename("foo/bar/bla:.txt")
1145 >>> checkwinfilename("foo/bar/bla:.txt")
1146 "filename contains ':', which is reserved on Windows"
1146 "filename contains ':', which is reserved on Windows"
1147 >>> checkwinfilename("foo/bar/b\07la.txt")
1147 >>> checkwinfilename("foo/bar/b\07la.txt")
1148 "filename contains '\\x07', which is invalid on Windows"
1148 "filename contains '\\x07', which is invalid on Windows"
1149 >>> checkwinfilename("foo/bar/bla ")
1149 >>> checkwinfilename("foo/bar/bla ")
1150 "filename ends with ' ', which is not allowed on Windows"
1150 "filename ends with ' ', which is not allowed on Windows"
1151 >>> checkwinfilename("../bar")
1151 >>> checkwinfilename("../bar")
1152 >>> checkwinfilename("foo\\")
1152 >>> checkwinfilename("foo\\")
1153 "filename ends with '\\', which is invalid on Windows"
1153 "filename ends with '\\', which is invalid on Windows"
1154 >>> checkwinfilename("foo\\/bar")
1154 >>> checkwinfilename("foo\\/bar")
1155 "directory name ends with '\\', which is invalid on Windows"
1155 "directory name ends with '\\', which is invalid on Windows"
1156 '''
1156 '''
1157 if path.endswith('\\'):
1157 if path.endswith('\\'):
1158 return _("filename ends with '\\', which is invalid on Windows")
1158 return _("filename ends with '\\', which is invalid on Windows")
1159 if '\\/' in path:
1159 if '\\/' in path:
1160 return _("directory name ends with '\\', which is invalid on Windows")
1160 return _("directory name ends with '\\', which is invalid on Windows")
1161 for n in path.replace('\\', '/').split('/'):
1161 for n in path.replace('\\', '/').split('/'):
1162 if not n:
1162 if not n:
1163 continue
1163 continue
1164 for c in n:
1164 for c in n:
1165 if c in _winreservedchars:
1165 if c in _winreservedchars:
1166 return _("filename contains '%s', which is reserved "
1166 return _("filename contains '%s', which is reserved "
1167 "on Windows") % c
1167 "on Windows") % c
1168 if ord(c) <= 31:
1168 if ord(c) <= 31:
1169 return _("filename contains %r, which is invalid "
1169 return _("filename contains %r, which is invalid "
1170 "on Windows") % c
1170 "on Windows") % c
1171 base = n.split('.')[0]
1171 base = n.split('.')[0]
1172 if base and base.lower() in _winreservednames:
1172 if base and base.lower() in _winreservednames:
1173 return _("filename contains '%s', which is reserved "
1173 return _("filename contains '%s', which is reserved "
1174 "on Windows") % base
1174 "on Windows") % base
1175 t = n[-1]
1175 t = n[-1]
1176 if t in '. ' and n not in '..':
1176 if t in '. ' and n not in '..':
1177 return _("filename ends with '%s', which is not allowed "
1177 return _("filename ends with '%s', which is not allowed "
1178 "on Windows") % t
1178 "on Windows") % t
1179
1179
1180 if os.name == 'nt':
1180 if os.name == 'nt':
1181 checkosfilename = checkwinfilename
1181 checkosfilename = checkwinfilename
1182 else:
1182 else:
1183 checkosfilename = platform.checkosfilename
1183 checkosfilename = platform.checkosfilename
1184
1184
1185 def makelock(info, pathname):
1185 def makelock(info, pathname):
1186 try:
1186 try:
1187 return os.symlink(info, pathname)
1187 return os.symlink(info, pathname)
1188 except OSError as why:
1188 except OSError as why:
1189 if why.errno == errno.EEXIST:
1189 if why.errno == errno.EEXIST:
1190 raise
1190 raise
1191 except AttributeError: # no symlink in os
1191 except AttributeError: # no symlink in os
1192 pass
1192 pass
1193
1193
1194 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1194 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1195 os.write(ld, info)
1195 os.write(ld, info)
1196 os.close(ld)
1196 os.close(ld)
1197
1197
1198 def readlock(pathname):
1198 def readlock(pathname):
1199 try:
1199 try:
1200 return os.readlink(pathname)
1200 return os.readlink(pathname)
1201 except OSError as why:
1201 except OSError as why:
1202 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1202 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1203 raise
1203 raise
1204 except AttributeError: # no symlink in os
1204 except AttributeError: # no symlink in os
1205 pass
1205 pass
1206 fp = posixfile(pathname)
1206 fp = posixfile(pathname)
1207 r = fp.read()
1207 r = fp.read()
1208 fp.close()
1208 fp.close()
1209 return r
1209 return r
1210
1210
1211 def fstat(fp):
1211 def fstat(fp):
1212 '''stat file object that may not have fileno method.'''
1212 '''stat file object that may not have fileno method.'''
1213 try:
1213 try:
1214 return os.fstat(fp.fileno())
1214 return os.fstat(fp.fileno())
1215 except AttributeError:
1215 except AttributeError:
1216 return os.stat(fp.name)
1216 return os.stat(fp.name)
1217
1217
1218 # File system features
1218 # File system features
1219
1219
1220 def fscasesensitive(path):
1220 def fscasesensitive(path):
1221 """
1221 """
1222 Return true if the given path is on a case-sensitive filesystem
1222 Return true if the given path is on a case-sensitive filesystem
1223
1223
1224 Requires a path (like /foo/.hg) ending with a foldable final
1224 Requires a path (like /foo/.hg) ending with a foldable final
1225 directory component.
1225 directory component.
1226 """
1226 """
1227 s1 = os.lstat(path)
1227 s1 = os.lstat(path)
1228 d, b = os.path.split(path)
1228 d, b = os.path.split(path)
1229 b2 = b.upper()
1229 b2 = b.upper()
1230 if b == b2:
1230 if b == b2:
1231 b2 = b.lower()
1231 b2 = b.lower()
1232 if b == b2:
1232 if b == b2:
1233 return True # no evidence against case sensitivity
1233 return True # no evidence against case sensitivity
1234 p2 = os.path.join(d, b2)
1234 p2 = os.path.join(d, b2)
1235 try:
1235 try:
1236 s2 = os.lstat(p2)
1236 s2 = os.lstat(p2)
1237 if s2 == s1:
1237 if s2 == s1:
1238 return False
1238 return False
1239 return True
1239 return True
1240 except OSError:
1240 except OSError:
1241 return True
1241 return True
1242
1242
1243 try:
1243 try:
1244 import re2
1244 import re2
1245 _re2 = None
1245 _re2 = None
1246 except ImportError:
1246 except ImportError:
1247 _re2 = False
1247 _re2 = False
1248
1248
1249 class _re(object):
1249 class _re(object):
1250 def _checkre2(self):
1250 def _checkre2(self):
1251 global _re2
1251 global _re2
1252 try:
1252 try:
1253 # check if match works, see issue3964
1253 # check if match works, see issue3964
1254 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1254 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1255 except ImportError:
1255 except ImportError:
1256 _re2 = False
1256 _re2 = False
1257
1257
1258 def compile(self, pat, flags=0):
1258 def compile(self, pat, flags=0):
1259 '''Compile a regular expression, using re2 if possible
1259 '''Compile a regular expression, using re2 if possible
1260
1260
1261 For best performance, use only re2-compatible regexp features. The
1261 For best performance, use only re2-compatible regexp features. The
1262 only flags from the re module that are re2-compatible are
1262 only flags from the re module that are re2-compatible are
1263 IGNORECASE and MULTILINE.'''
1263 IGNORECASE and MULTILINE.'''
1264 if _re2 is None:
1264 if _re2 is None:
1265 self._checkre2()
1265 self._checkre2()
1266 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1266 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1267 if flags & remod.IGNORECASE:
1267 if flags & remod.IGNORECASE:
1268 pat = '(?i)' + pat
1268 pat = '(?i)' + pat
1269 if flags & remod.MULTILINE:
1269 if flags & remod.MULTILINE:
1270 pat = '(?m)' + pat
1270 pat = '(?m)' + pat
1271 try:
1271 try:
1272 return re2.compile(pat)
1272 return re2.compile(pat)
1273 except re2.error:
1273 except re2.error:
1274 pass
1274 pass
1275 return remod.compile(pat, flags)
1275 return remod.compile(pat, flags)
1276
1276
1277 @propertycache
1277 @propertycache
1278 def escape(self):
1278 def escape(self):
1279 '''Return the version of escape corresponding to self.compile.
1279 '''Return the version of escape corresponding to self.compile.
1280
1280
1281 This is imperfect because whether re2 or re is used for a particular
1281 This is imperfect because whether re2 or re is used for a particular
1282 function depends on the flags, etc, but it's the best we can do.
1282 function depends on the flags, etc, but it's the best we can do.
1283 '''
1283 '''
1284 global _re2
1284 global _re2
1285 if _re2 is None:
1285 if _re2 is None:
1286 self._checkre2()
1286 self._checkre2()
1287 if _re2:
1287 if _re2:
1288 return re2.escape
1288 return re2.escape
1289 else:
1289 else:
1290 return remod.escape
1290 return remod.escape
1291
1291
1292 re = _re()
1292 re = _re()
1293
1293
1294 _fspathcache = {}
1294 _fspathcache = {}
1295 def fspath(name, root):
1295 def fspath(name, root):
1296 '''Get name in the case stored in the filesystem
1296 '''Get name in the case stored in the filesystem
1297
1297
1298 The name should be relative to root, and be normcase-ed for efficiency.
1298 The name should be relative to root, and be normcase-ed for efficiency.
1299
1299
1300 Note that this function is unnecessary, and should not be
1300 Note that this function is unnecessary, and should not be
1301 called, for case-sensitive filesystems (simply because it's expensive).
1301 called, for case-sensitive filesystems (simply because it's expensive).
1302
1302
1303 The root should be normcase-ed, too.
1303 The root should be normcase-ed, too.
1304 '''
1304 '''
1305 def _makefspathcacheentry(dir):
1305 def _makefspathcacheentry(dir):
1306 return dict((normcase(n), n) for n in os.listdir(dir))
1306 return dict((normcase(n), n) for n in os.listdir(dir))
1307
1307
1308 seps = os.sep
1308 seps = os.sep
1309 if os.altsep:
1309 if os.altsep:
1310 seps = seps + os.altsep
1310 seps = seps + os.altsep
1311 # Protect backslashes. This gets silly very quickly.
1311 # Protect backslashes. This gets silly very quickly.
1312 seps.replace('\\','\\\\')
1312 seps.replace('\\','\\\\')
1313 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1313 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1314 dir = os.path.normpath(root)
1314 dir = os.path.normpath(root)
1315 result = []
1315 result = []
1316 for part, sep in pattern.findall(name):
1316 for part, sep in pattern.findall(name):
1317 if sep:
1317 if sep:
1318 result.append(sep)
1318 result.append(sep)
1319 continue
1319 continue
1320
1320
1321 if dir not in _fspathcache:
1321 if dir not in _fspathcache:
1322 _fspathcache[dir] = _makefspathcacheentry(dir)
1322 _fspathcache[dir] = _makefspathcacheentry(dir)
1323 contents = _fspathcache[dir]
1323 contents = _fspathcache[dir]
1324
1324
1325 found = contents.get(part)
1325 found = contents.get(part)
1326 if not found:
1326 if not found:
1327 # retry "once per directory" per "dirstate.walk" which
1327 # retry "once per directory" per "dirstate.walk" which
1328 # may take place for each patches of "hg qpush", for example
1328 # may take place for each patches of "hg qpush", for example
1329 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1329 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1330 found = contents.get(part)
1330 found = contents.get(part)
1331
1331
1332 result.append(found or part)
1332 result.append(found or part)
1333 dir = os.path.join(dir, part)
1333 dir = os.path.join(dir, part)
1334
1334
1335 return ''.join(result)
1335 return ''.join(result)
1336
1336
1337 def checknlink(testfile):
1337 def checknlink(testfile):
1338 '''check whether hardlink count reporting works properly'''
1338 '''check whether hardlink count reporting works properly'''
1339
1339
1340 # testfile may be open, so we need a separate file for checking to
1340 # testfile may be open, so we need a separate file for checking to
1341 # work around issue2543 (or testfile may get lost on Samba shares)
1341 # work around issue2543 (or testfile may get lost on Samba shares)
1342 f1 = testfile + ".hgtmp1"
1342 f1 = testfile + ".hgtmp1"
1343 if os.path.lexists(f1):
1343 if os.path.lexists(f1):
1344 return False
1344 return False
1345 try:
1345 try:
1346 posixfile(f1, 'w').close()
1346 posixfile(f1, 'w').close()
1347 except IOError:
1347 except IOError:
1348 try:
1348 try:
1349 os.unlink(f1)
1349 os.unlink(f1)
1350 except OSError:
1350 except OSError:
1351 pass
1351 pass
1352 return False
1352 return False
1353
1353
1354 f2 = testfile + ".hgtmp2"
1354 f2 = testfile + ".hgtmp2"
1355 fd = None
1355 fd = None
1356 try:
1356 try:
1357 oslink(f1, f2)
1357 oslink(f1, f2)
1358 # nlinks() may behave differently for files on Windows shares if
1358 # nlinks() may behave differently for files on Windows shares if
1359 # the file is open.
1359 # the file is open.
1360 fd = posixfile(f2)
1360 fd = posixfile(f2)
1361 return nlinks(f2) > 1
1361 return nlinks(f2) > 1
1362 except OSError:
1362 except OSError:
1363 return False
1363 return False
1364 finally:
1364 finally:
1365 if fd is not None:
1365 if fd is not None:
1366 fd.close()
1366 fd.close()
1367 for f in (f1, f2):
1367 for f in (f1, f2):
1368 try:
1368 try:
1369 os.unlink(f)
1369 os.unlink(f)
1370 except OSError:
1370 except OSError:
1371 pass
1371 pass
1372
1372
1373 def endswithsep(path):
1373 def endswithsep(path):
1374 '''Check path ends with os.sep or os.altsep.'''
1374 '''Check path ends with os.sep or os.altsep.'''
1375 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1375 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1376
1376
1377 def splitpath(path):
1377 def splitpath(path):
1378 '''Split path by os.sep.
1378 '''Split path by os.sep.
1379 Note that this function does not use os.altsep because this is
1379 Note that this function does not use os.altsep because this is
1380 an alternative of simple "xxx.split(os.sep)".
1380 an alternative of simple "xxx.split(os.sep)".
1381 It is recommended to use os.path.normpath() before using this
1381 It is recommended to use os.path.normpath() before using this
1382 function if need.'''
1382 function if need.'''
1383 return path.split(os.sep)
1383 return path.split(os.sep)
1384
1384
1385 def gui():
1385 def gui():
1386 '''Are we running in a GUI?'''
1386 '''Are we running in a GUI?'''
1387 if sys.platform == 'darwin':
1387 if sys.platform == 'darwin':
1388 if 'SSH_CONNECTION' in os.environ:
1388 if 'SSH_CONNECTION' in os.environ:
1389 # handle SSH access to a box where the user is logged in
1389 # handle SSH access to a box where the user is logged in
1390 return False
1390 return False
1391 elif getattr(osutil, 'isgui', None):
1391 elif getattr(osutil, 'isgui', None):
1392 # check if a CoreGraphics session is available
1392 # check if a CoreGraphics session is available
1393 return osutil.isgui()
1393 return osutil.isgui()
1394 else:
1394 else:
1395 # pure build; use a safe default
1395 # pure build; use a safe default
1396 return True
1396 return True
1397 else:
1397 else:
1398 return os.name == "nt" or os.environ.get("DISPLAY")
1398 return os.name == "nt" or os.environ.get("DISPLAY")
1399
1399
1400 def mktempcopy(name, emptyok=False, createmode=None):
1400 def mktempcopy(name, emptyok=False, createmode=None):
1401 """Create a temporary file with the same contents from name
1401 """Create a temporary file with the same contents from name
1402
1402
1403 The permission bits are copied from the original file.
1403 The permission bits are copied from the original file.
1404
1404
1405 If the temporary file is going to be truncated immediately, you
1405 If the temporary file is going to be truncated immediately, you
1406 can use emptyok=True as an optimization.
1406 can use emptyok=True as an optimization.
1407
1407
1408 Returns the name of the temporary file.
1408 Returns the name of the temporary file.
1409 """
1409 """
1410 d, fn = os.path.split(name)
1410 d, fn = os.path.split(name)
1411 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1411 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1412 os.close(fd)
1412 os.close(fd)
1413 # Temporary files are created with mode 0600, which is usually not
1413 # Temporary files are created with mode 0600, which is usually not
1414 # what we want. If the original file already exists, just copy
1414 # what we want. If the original file already exists, just copy
1415 # its mode. Otherwise, manually obey umask.
1415 # its mode. Otherwise, manually obey umask.
1416 copymode(name, temp, createmode)
1416 copymode(name, temp, createmode)
1417 if emptyok:
1417 if emptyok:
1418 return temp
1418 return temp
1419 try:
1419 try:
1420 try:
1420 try:
1421 ifp = posixfile(name, "rb")
1421 ifp = posixfile(name, "rb")
1422 except IOError as inst:
1422 except IOError as inst:
1423 if inst.errno == errno.ENOENT:
1423 if inst.errno == errno.ENOENT:
1424 return temp
1424 return temp
1425 if not getattr(inst, 'filename', None):
1425 if not getattr(inst, 'filename', None):
1426 inst.filename = name
1426 inst.filename = name
1427 raise
1427 raise
1428 ofp = posixfile(temp, "wb")
1428 ofp = posixfile(temp, "wb")
1429 for chunk in filechunkiter(ifp):
1429 for chunk in filechunkiter(ifp):
1430 ofp.write(chunk)
1430 ofp.write(chunk)
1431 ifp.close()
1431 ifp.close()
1432 ofp.close()
1432 ofp.close()
1433 except: # re-raises
1433 except: # re-raises
1434 try: os.unlink(temp)
1434 try: os.unlink(temp)
1435 except OSError: pass
1435 except OSError: pass
1436 raise
1436 raise
1437 return temp
1437 return temp
1438
1438
1439 class filestat(object):
1439 class filestat(object):
1440 """help to exactly detect change of a file
1440 """help to exactly detect change of a file
1441
1441
1442 'stat' attribute is result of 'os.stat()' if specified 'path'
1442 'stat' attribute is result of 'os.stat()' if specified 'path'
1443 exists. Otherwise, it is None. This can avoid preparative
1443 exists. Otherwise, it is None. This can avoid preparative
1444 'exists()' examination on client side of this class.
1444 'exists()' examination on client side of this class.
1445 """
1445 """
1446 def __init__(self, path):
1446 def __init__(self, path):
1447 try:
1447 try:
1448 self.stat = os.stat(path)
1448 self.stat = os.stat(path)
1449 except OSError as err:
1449 except OSError as err:
1450 if err.errno != errno.ENOENT:
1450 if err.errno != errno.ENOENT:
1451 raise
1451 raise
1452 self.stat = None
1452 self.stat = None
1453
1453
1454 __hash__ = object.__hash__
1454 __hash__ = object.__hash__
1455
1455
1456 def __eq__(self, old):
1456 def __eq__(self, old):
1457 try:
1457 try:
1458 # if ambiguity between stat of new and old file is
1458 # if ambiguity between stat of new and old file is
1459 # avoided, comparison of size, ctime and mtime is enough
1459 # avoided, comparison of size, ctime and mtime is enough
1460 # to exactly detect change of a file regardless of platform
1460 # to exactly detect change of a file regardless of platform
1461 return (self.stat.st_size == old.stat.st_size and
1461 return (self.stat.st_size == old.stat.st_size and
1462 self.stat.st_ctime == old.stat.st_ctime and
1462 self.stat.st_ctime == old.stat.st_ctime and
1463 self.stat.st_mtime == old.stat.st_mtime)
1463 self.stat.st_mtime == old.stat.st_mtime)
1464 except AttributeError:
1464 except AttributeError:
1465 return False
1465 return False
1466
1466
1467 def isambig(self, old):
1467 def isambig(self, old):
1468 """Examine whether new (= self) stat is ambiguous against old one
1468 """Examine whether new (= self) stat is ambiguous against old one
1469
1469
1470 "S[N]" below means stat of a file at N-th change:
1470 "S[N]" below means stat of a file at N-th change:
1471
1471
1472 - S[n-1].ctime < S[n].ctime: can detect change of a file
1472 - S[n-1].ctime < S[n].ctime: can detect change of a file
1473 - S[n-1].ctime == S[n].ctime
1473 - S[n-1].ctime == S[n].ctime
1474 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1474 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1475 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1475 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1476 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1476 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1477 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1477 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1478
1478
1479 Case (*2) above means that a file was changed twice or more at
1479 Case (*2) above means that a file was changed twice or more at
1480 same time in sec (= S[n-1].ctime), and comparison of timestamp
1480 same time in sec (= S[n-1].ctime), and comparison of timestamp
1481 is ambiguous.
1481 is ambiguous.
1482
1482
1483 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1483 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1484 timestamp is ambiguous".
1484 timestamp is ambiguous".
1485
1485
1486 But advancing mtime only in case (*2) doesn't work as
1486 But advancing mtime only in case (*2) doesn't work as
1487 expected, because naturally advanced S[n].mtime in case (*1)
1487 expected, because naturally advanced S[n].mtime in case (*1)
1488 might be equal to manually advanced S[n-1 or earlier].mtime.
1488 might be equal to manually advanced S[n-1 or earlier].mtime.
1489
1489
1490 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1490 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1491 treated as ambiguous regardless of mtime, to avoid overlooking
1491 treated as ambiguous regardless of mtime, to avoid overlooking
1492 by confliction between such mtime.
1492 by confliction between such mtime.
1493
1493
1494 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1494 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1495 S[n].mtime", even if size of a file isn't changed.
1495 S[n].mtime", even if size of a file isn't changed.
1496 """
1496 """
1497 try:
1497 try:
1498 return (self.stat.st_ctime == old.stat.st_ctime)
1498 return (self.stat.st_ctime == old.stat.st_ctime)
1499 except AttributeError:
1499 except AttributeError:
1500 return False
1500 return False
1501
1501
1502 def __ne__(self, other):
1502 def __ne__(self, other):
1503 return not self == other
1503 return not self == other
1504
1504
1505 class atomictempfile(object):
1505 class atomictempfile(object):
1506 '''writable file object that atomically updates a file
1506 '''writable file object that atomically updates a file
1507
1507
1508 All writes will go to a temporary copy of the original file. Call
1508 All writes will go to a temporary copy of the original file. Call
1509 close() when you are done writing, and atomictempfile will rename
1509 close() when you are done writing, and atomictempfile will rename
1510 the temporary copy to the original name, making the changes
1510 the temporary copy to the original name, making the changes
1511 visible. If the object is destroyed without being closed, all your
1511 visible. If the object is destroyed without being closed, all your
1512 writes are discarded.
1512 writes are discarded.
1513
1513
1514 checkambig argument of constructor is used with filestat, and is
1514 checkambig argument of constructor is used with filestat, and is
1515 useful only if target file is guarded by any lock (e.g. repo.lock
1515 useful only if target file is guarded by any lock (e.g. repo.lock
1516 or repo.wlock).
1516 or repo.wlock).
1517 '''
1517 '''
1518 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1518 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1519 self.__name = name # permanent name
1519 self.__name = name # permanent name
1520 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1520 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1521 createmode=createmode)
1521 createmode=createmode)
1522 self._fp = posixfile(self._tempname, mode)
1522 self._fp = posixfile(self._tempname, mode)
1523 self._checkambig = checkambig
1523 self._checkambig = checkambig
1524
1524
1525 # delegated methods
1525 # delegated methods
1526 self.read = self._fp.read
1526 self.read = self._fp.read
1527 self.write = self._fp.write
1527 self.write = self._fp.write
1528 self.seek = self._fp.seek
1528 self.seek = self._fp.seek
1529 self.tell = self._fp.tell
1529 self.tell = self._fp.tell
1530 self.fileno = self._fp.fileno
1530 self.fileno = self._fp.fileno
1531
1531
1532 def close(self):
1532 def close(self):
1533 if not self._fp.closed:
1533 if not self._fp.closed:
1534 self._fp.close()
1534 self._fp.close()
1535 filename = localpath(self.__name)
1535 filename = localpath(self.__name)
1536 oldstat = self._checkambig and filestat(filename)
1536 oldstat = self._checkambig and filestat(filename)
1537 if oldstat and oldstat.stat:
1537 if oldstat and oldstat.stat:
1538 rename(self._tempname, filename)
1538 rename(self._tempname, filename)
1539 newstat = filestat(filename)
1539 newstat = filestat(filename)
1540 if newstat.isambig(oldstat):
1540 if newstat.isambig(oldstat):
1541 # stat of changed file is ambiguous to original one
1541 # stat of changed file is ambiguous to original one
1542 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1542 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1543 os.utime(filename, (advanced, advanced))
1543 os.utime(filename, (advanced, advanced))
1544 else:
1544 else:
1545 rename(self._tempname, filename)
1545 rename(self._tempname, filename)
1546
1546
1547 def discard(self):
1547 def discard(self):
1548 if not self._fp.closed:
1548 if not self._fp.closed:
1549 try:
1549 try:
1550 os.unlink(self._tempname)
1550 os.unlink(self._tempname)
1551 except OSError:
1551 except OSError:
1552 pass
1552 pass
1553 self._fp.close()
1553 self._fp.close()
1554
1554
1555 def __del__(self):
1555 def __del__(self):
1556 if safehasattr(self, '_fp'): # constructor actually did something
1556 if safehasattr(self, '_fp'): # constructor actually did something
1557 self.discard()
1557 self.discard()
1558
1558
1559 def __enter__(self):
1559 def __enter__(self):
1560 return self
1560 return self
1561
1561
1562 def __exit__(self, exctype, excvalue, traceback):
1562 def __exit__(self, exctype, excvalue, traceback):
1563 if exctype is not None:
1563 if exctype is not None:
1564 self.discard()
1564 self.discard()
1565 else:
1565 else:
1566 self.close()
1566 self.close()
1567
1567
1568 def makedirs(name, mode=None, notindexed=False):
1568 def makedirs(name, mode=None, notindexed=False):
1569 """recursive directory creation with parent mode inheritance
1569 """recursive directory creation with parent mode inheritance
1570
1570
1571 Newly created directories are marked as "not to be indexed by
1571 Newly created directories are marked as "not to be indexed by
1572 the content indexing service", if ``notindexed`` is specified
1572 the content indexing service", if ``notindexed`` is specified
1573 for "write" mode access.
1573 for "write" mode access.
1574 """
1574 """
1575 try:
1575 try:
1576 makedir(name, notindexed)
1576 makedir(name, notindexed)
1577 except OSError as err:
1577 except OSError as err:
1578 if err.errno == errno.EEXIST:
1578 if err.errno == errno.EEXIST:
1579 return
1579 return
1580 if err.errno != errno.ENOENT or not name:
1580 if err.errno != errno.ENOENT or not name:
1581 raise
1581 raise
1582 parent = os.path.dirname(os.path.abspath(name))
1582 parent = os.path.dirname(os.path.abspath(name))
1583 if parent == name:
1583 if parent == name:
1584 raise
1584 raise
1585 makedirs(parent, mode, notindexed)
1585 makedirs(parent, mode, notindexed)
1586 try:
1586 try:
1587 makedir(name, notindexed)
1587 makedir(name, notindexed)
1588 except OSError as err:
1588 except OSError as err:
1589 # Catch EEXIST to handle races
1589 # Catch EEXIST to handle races
1590 if err.errno == errno.EEXIST:
1590 if err.errno == errno.EEXIST:
1591 return
1591 return
1592 raise
1592 raise
1593 if mode is not None:
1593 if mode is not None:
1594 os.chmod(name, mode)
1594 os.chmod(name, mode)
1595
1595
1596 def readfile(path):
1596 def readfile(path):
1597 with open(path, 'rb') as fp:
1597 with open(path, 'rb') as fp:
1598 return fp.read()
1598 return fp.read()
1599
1599
1600 def writefile(path, text):
1600 def writefile(path, text):
1601 with open(path, 'wb') as fp:
1601 with open(path, 'wb') as fp:
1602 fp.write(text)
1602 fp.write(text)
1603
1603
1604 def appendfile(path, text):
1604 def appendfile(path, text):
1605 with open(path, 'ab') as fp:
1605 with open(path, 'ab') as fp:
1606 fp.write(text)
1606 fp.write(text)
1607
1607
1608 class chunkbuffer(object):
1608 class chunkbuffer(object):
1609 """Allow arbitrary sized chunks of data to be efficiently read from an
1609 """Allow arbitrary sized chunks of data to be efficiently read from an
1610 iterator over chunks of arbitrary size."""
1610 iterator over chunks of arbitrary size."""
1611
1611
1612 def __init__(self, in_iter):
1612 def __init__(self, in_iter):
1613 """in_iter is the iterator that's iterating over the input chunks.
1613 """in_iter is the iterator that's iterating over the input chunks.
1614 targetsize is how big a buffer to try to maintain."""
1614 targetsize is how big a buffer to try to maintain."""
1615 def splitbig(chunks):
1615 def splitbig(chunks):
1616 for chunk in chunks:
1616 for chunk in chunks:
1617 if len(chunk) > 2**20:
1617 if len(chunk) > 2**20:
1618 pos = 0
1618 pos = 0
1619 while pos < len(chunk):
1619 while pos < len(chunk):
1620 end = pos + 2 ** 18
1620 end = pos + 2 ** 18
1621 yield chunk[pos:end]
1621 yield chunk[pos:end]
1622 pos = end
1622 pos = end
1623 else:
1623 else:
1624 yield chunk
1624 yield chunk
1625 self.iter = splitbig(in_iter)
1625 self.iter = splitbig(in_iter)
1626 self._queue = collections.deque()
1626 self._queue = collections.deque()
1627 self._chunkoffset = 0
1627 self._chunkoffset = 0
1628
1628
1629 def read(self, l=None):
1629 def read(self, l=None):
1630 """Read L bytes of data from the iterator of chunks of data.
1630 """Read L bytes of data from the iterator of chunks of data.
1631 Returns less than L bytes if the iterator runs dry.
1631 Returns less than L bytes if the iterator runs dry.
1632
1632
1633 If size parameter is omitted, read everything"""
1633 If size parameter is omitted, read everything"""
1634 if l is None:
1634 if l is None:
1635 return ''.join(self.iter)
1635 return ''.join(self.iter)
1636
1636
1637 left = l
1637 left = l
1638 buf = []
1638 buf = []
1639 queue = self._queue
1639 queue = self._queue
1640 while left > 0:
1640 while left > 0:
1641 # refill the queue
1641 # refill the queue
1642 if not queue:
1642 if not queue:
1643 target = 2**18
1643 target = 2**18
1644 for chunk in self.iter:
1644 for chunk in self.iter:
1645 queue.append(chunk)
1645 queue.append(chunk)
1646 target -= len(chunk)
1646 target -= len(chunk)
1647 if target <= 0:
1647 if target <= 0:
1648 break
1648 break
1649 if not queue:
1649 if not queue:
1650 break
1650 break
1651
1651
1652 # The easy way to do this would be to queue.popleft(), modify the
1652 # The easy way to do this would be to queue.popleft(), modify the
1653 # chunk (if necessary), then queue.appendleft(). However, for cases
1653 # chunk (if necessary), then queue.appendleft(). However, for cases
1654 # where we read partial chunk content, this incurs 2 dequeue
1654 # where we read partial chunk content, this incurs 2 dequeue
1655 # mutations and creates a new str for the remaining chunk in the
1655 # mutations and creates a new str for the remaining chunk in the
1656 # queue. Our code below avoids this overhead.
1656 # queue. Our code below avoids this overhead.
1657
1657
1658 chunk = queue[0]
1658 chunk = queue[0]
1659 chunkl = len(chunk)
1659 chunkl = len(chunk)
1660 offset = self._chunkoffset
1660 offset = self._chunkoffset
1661
1661
1662 # Use full chunk.
1662 # Use full chunk.
1663 if offset == 0 and left >= chunkl:
1663 if offset == 0 and left >= chunkl:
1664 left -= chunkl
1664 left -= chunkl
1665 queue.popleft()
1665 queue.popleft()
1666 buf.append(chunk)
1666 buf.append(chunk)
1667 # self._chunkoffset remains at 0.
1667 # self._chunkoffset remains at 0.
1668 continue
1668 continue
1669
1669
1670 chunkremaining = chunkl - offset
1670 chunkremaining = chunkl - offset
1671
1671
1672 # Use all of unconsumed part of chunk.
1672 # Use all of unconsumed part of chunk.
1673 if left >= chunkremaining:
1673 if left >= chunkremaining:
1674 left -= chunkremaining
1674 left -= chunkremaining
1675 queue.popleft()
1675 queue.popleft()
1676 # offset == 0 is enabled by block above, so this won't merely
1676 # offset == 0 is enabled by block above, so this won't merely
1677 # copy via ``chunk[0:]``.
1677 # copy via ``chunk[0:]``.
1678 buf.append(chunk[offset:])
1678 buf.append(chunk[offset:])
1679 self._chunkoffset = 0
1679 self._chunkoffset = 0
1680
1680
1681 # Partial chunk needed.
1681 # Partial chunk needed.
1682 else:
1682 else:
1683 buf.append(chunk[offset:offset + left])
1683 buf.append(chunk[offset:offset + left])
1684 self._chunkoffset += left
1684 self._chunkoffset += left
1685 left -= chunkremaining
1685 left -= chunkremaining
1686
1686
1687 return ''.join(buf)
1687 return ''.join(buf)
1688
1688
1689 def filechunkiter(f, size=131072, limit=None):
1689 def filechunkiter(f, size=131072, limit=None):
1690 """Create a generator that produces the data in the file size
1690 """Create a generator that produces the data in the file size
1691 (default 131072) bytes at a time, up to optional limit (default is
1691 (default 131072) bytes at a time, up to optional limit (default is
1692 to read all data). Chunks may be less than size bytes if the
1692 to read all data). Chunks may be less than size bytes if the
1693 chunk is the last chunk in the file, or the file is a socket or
1693 chunk is the last chunk in the file, or the file is a socket or
1694 some other type of file that sometimes reads less data than is
1694 some other type of file that sometimes reads less data than is
1695 requested."""
1695 requested."""
1696 assert size >= 0
1696 assert size >= 0
1697 assert limit is None or limit >= 0
1697 assert limit is None or limit >= 0
1698 while True:
1698 while True:
1699 if limit is None:
1699 if limit is None:
1700 nbytes = size
1700 nbytes = size
1701 else:
1701 else:
1702 nbytes = min(limit, size)
1702 nbytes = min(limit, size)
1703 s = nbytes and f.read(nbytes)
1703 s = nbytes and f.read(nbytes)
1704 if not s:
1704 if not s:
1705 break
1705 break
1706 if limit:
1706 if limit:
1707 limit -= len(s)
1707 limit -= len(s)
1708 yield s
1708 yield s
1709
1709
1710 def makedate(timestamp=None):
1710 def makedate(timestamp=None):
1711 '''Return a unix timestamp (or the current time) as a (unixtime,
1711 '''Return a unix timestamp (or the current time) as a (unixtime,
1712 offset) tuple based off the local timezone.'''
1712 offset) tuple based off the local timezone.'''
1713 if timestamp is None:
1713 if timestamp is None:
1714 timestamp = time.time()
1714 timestamp = time.time()
1715 if timestamp < 0:
1715 if timestamp < 0:
1716 hint = _("check your clock")
1716 hint = _("check your clock")
1717 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1717 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1718 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1718 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1719 datetime.datetime.fromtimestamp(timestamp))
1719 datetime.datetime.fromtimestamp(timestamp))
1720 tz = delta.days * 86400 + delta.seconds
1720 tz = delta.days * 86400 + delta.seconds
1721 return timestamp, tz
1721 return timestamp, tz
1722
1722
1723 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1723 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1724 """represent a (unixtime, offset) tuple as a localized time.
1724 """represent a (unixtime, offset) tuple as a localized time.
1725 unixtime is seconds since the epoch, and offset is the time zone's
1725 unixtime is seconds since the epoch, and offset is the time zone's
1726 number of seconds away from UTC.
1726 number of seconds away from UTC.
1727
1727
1728 >>> datestr((0, 0))
1728 >>> datestr((0, 0))
1729 'Thu Jan 01 00:00:00 1970 +0000'
1729 'Thu Jan 01 00:00:00 1970 +0000'
1730 >>> datestr((42, 0))
1730 >>> datestr((42, 0))
1731 'Thu Jan 01 00:00:42 1970 +0000'
1731 'Thu Jan 01 00:00:42 1970 +0000'
1732 >>> datestr((-42, 0))
1732 >>> datestr((-42, 0))
1733 'Wed Dec 31 23:59:18 1969 +0000'
1733 'Wed Dec 31 23:59:18 1969 +0000'
1734 >>> datestr((0x7fffffff, 0))
1734 >>> datestr((0x7fffffff, 0))
1735 'Tue Jan 19 03:14:07 2038 +0000'
1735 'Tue Jan 19 03:14:07 2038 +0000'
1736 >>> datestr((-0x80000000, 0))
1736 >>> datestr((-0x80000000, 0))
1737 'Fri Dec 13 20:45:52 1901 +0000'
1737 'Fri Dec 13 20:45:52 1901 +0000'
1738 """
1738 """
1739 t, tz = date or makedate()
1739 t, tz = date or makedate()
1740 if "%1" in format or "%2" in format or "%z" in format:
1740 if "%1" in format or "%2" in format or "%z" in format:
1741 sign = (tz > 0) and "-" or "+"
1741 sign = (tz > 0) and "-" or "+"
1742 minutes = abs(tz) // 60
1742 minutes = abs(tz) // 60
1743 q, r = divmod(minutes, 60)
1743 q, r = divmod(minutes, 60)
1744 format = format.replace("%z", "%1%2")
1744 format = format.replace("%z", "%1%2")
1745 format = format.replace("%1", "%c%02d" % (sign, q))
1745 format = format.replace("%1", "%c%02d" % (sign, q))
1746 format = format.replace("%2", "%02d" % r)
1746 format = format.replace("%2", "%02d" % r)
1747 d = t - tz
1747 d = t - tz
1748 if d > 0x7fffffff:
1748 if d > 0x7fffffff:
1749 d = 0x7fffffff
1749 d = 0x7fffffff
1750 elif d < -0x80000000:
1750 elif d < -0x80000000:
1751 d = -0x80000000
1751 d = -0x80000000
1752 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1752 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1753 # because they use the gmtime() system call which is buggy on Windows
1753 # because they use the gmtime() system call which is buggy on Windows
1754 # for negative values.
1754 # for negative values.
1755 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1755 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1756 s = t.strftime(format)
1756 s = t.strftime(format)
1757 return s
1757 return s
1758
1758
1759 def shortdate(date=None):
1759 def shortdate(date=None):
1760 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1760 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1761 return datestr(date, format='%Y-%m-%d')
1761 return datestr(date, format='%Y-%m-%d')
1762
1762
1763 def parsetimezone(s):
1763 def parsetimezone(s):
1764 """find a trailing timezone, if any, in string, and return a
1764 """find a trailing timezone, if any, in string, and return a
1765 (offset, remainder) pair"""
1765 (offset, remainder) pair"""
1766
1766
1767 if s.endswith("GMT") or s.endswith("UTC"):
1767 if s.endswith("GMT") or s.endswith("UTC"):
1768 return 0, s[:-3].rstrip()
1768 return 0, s[:-3].rstrip()
1769
1769
1770 # Unix-style timezones [+-]hhmm
1770 # Unix-style timezones [+-]hhmm
1771 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1771 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1772 sign = (s[-5] == "+") and 1 or -1
1772 sign = (s[-5] == "+") and 1 or -1
1773 hours = int(s[-4:-2])
1773 hours = int(s[-4:-2])
1774 minutes = int(s[-2:])
1774 minutes = int(s[-2:])
1775 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1775 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1776
1776
1777 # ISO8601 trailing Z
1777 # ISO8601 trailing Z
1778 if s.endswith("Z") and s[-2:-1].isdigit():
1778 if s.endswith("Z") and s[-2:-1].isdigit():
1779 return 0, s[:-1]
1779 return 0, s[:-1]
1780
1780
1781 # ISO8601-style [+-]hh:mm
1781 # ISO8601-style [+-]hh:mm
1782 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1782 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1783 s[-5:-3].isdigit() and s[-2:].isdigit()):
1783 s[-5:-3].isdigit() and s[-2:].isdigit()):
1784 sign = (s[-6] == "+") and 1 or -1
1784 sign = (s[-6] == "+") and 1 or -1
1785 hours = int(s[-5:-3])
1785 hours = int(s[-5:-3])
1786 minutes = int(s[-2:])
1786 minutes = int(s[-2:])
1787 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1787 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1788
1788
1789 return None, s
1789 return None, s
1790
1790
1791 def strdate(string, format, defaults=[]):
1791 def strdate(string, format, defaults=[]):
1792 """parse a localized time string and return a (unixtime, offset) tuple.
1792 """parse a localized time string and return a (unixtime, offset) tuple.
1793 if the string cannot be parsed, ValueError is raised."""
1793 if the string cannot be parsed, ValueError is raised."""
1794 # NOTE: unixtime = localunixtime + offset
1794 # NOTE: unixtime = localunixtime + offset
1795 offset, date = parsetimezone(string)
1795 offset, date = parsetimezone(string)
1796
1796
1797 # add missing elements from defaults
1797 # add missing elements from defaults
1798 usenow = False # default to using biased defaults
1798 usenow = False # default to using biased defaults
1799 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1799 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1800 found = [True for p in part if ("%"+p) in format]
1800 found = [True for p in part if ("%"+p) in format]
1801 if not found:
1801 if not found:
1802 date += "@" + defaults[part][usenow]
1802 date += "@" + defaults[part][usenow]
1803 format += "@%" + part[0]
1803 format += "@%" + part[0]
1804 else:
1804 else:
1805 # We've found a specific time element, less specific time
1805 # We've found a specific time element, less specific time
1806 # elements are relative to today
1806 # elements are relative to today
1807 usenow = True
1807 usenow = True
1808
1808
1809 timetuple = time.strptime(date, format)
1809 timetuple = time.strptime(date, format)
1810 localunixtime = int(calendar.timegm(timetuple))
1810 localunixtime = int(calendar.timegm(timetuple))
1811 if offset is None:
1811 if offset is None:
1812 # local timezone
1812 # local timezone
1813 unixtime = int(time.mktime(timetuple))
1813 unixtime = int(time.mktime(timetuple))
1814 offset = unixtime - localunixtime
1814 offset = unixtime - localunixtime
1815 else:
1815 else:
1816 unixtime = localunixtime + offset
1816 unixtime = localunixtime + offset
1817 return unixtime, offset
1817 return unixtime, offset
1818
1818
1819 def parsedate(date, formats=None, bias=None):
1819 def parsedate(date, formats=None, bias=None):
1820 """parse a localized date/time and return a (unixtime, offset) tuple.
1820 """parse a localized date/time and return a (unixtime, offset) tuple.
1821
1821
1822 The date may be a "unixtime offset" string or in one of the specified
1822 The date may be a "unixtime offset" string or in one of the specified
1823 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1823 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1824
1824
1825 >>> parsedate(' today ') == parsedate(\
1825 >>> parsedate(' today ') == parsedate(\
1826 datetime.date.today().strftime('%b %d'))
1826 datetime.date.today().strftime('%b %d'))
1827 True
1827 True
1828 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1828 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1829 datetime.timedelta(days=1)\
1829 datetime.timedelta(days=1)\
1830 ).strftime('%b %d'))
1830 ).strftime('%b %d'))
1831 True
1831 True
1832 >>> now, tz = makedate()
1832 >>> now, tz = makedate()
1833 >>> strnow, strtz = parsedate('now')
1833 >>> strnow, strtz = parsedate('now')
1834 >>> (strnow - now) < 1
1834 >>> (strnow - now) < 1
1835 True
1835 True
1836 >>> tz == strtz
1836 >>> tz == strtz
1837 True
1837 True
1838 """
1838 """
1839 if bias is None:
1839 if bias is None:
1840 bias = {}
1840 bias = {}
1841 if not date:
1841 if not date:
1842 return 0, 0
1842 return 0, 0
1843 if isinstance(date, tuple) and len(date) == 2:
1843 if isinstance(date, tuple) and len(date) == 2:
1844 return date
1844 return date
1845 if not formats:
1845 if not formats:
1846 formats = defaultdateformats
1846 formats = defaultdateformats
1847 date = date.strip()
1847 date = date.strip()
1848
1848
1849 if date == 'now' or date == _('now'):
1849 if date == 'now' or date == _('now'):
1850 return makedate()
1850 return makedate()
1851 if date == 'today' or date == _('today'):
1851 if date == 'today' or date == _('today'):
1852 date = datetime.date.today().strftime('%b %d')
1852 date = datetime.date.today().strftime('%b %d')
1853 elif date == 'yesterday' or date == _('yesterday'):
1853 elif date == 'yesterday' or date == _('yesterday'):
1854 date = (datetime.date.today() -
1854 date = (datetime.date.today() -
1855 datetime.timedelta(days=1)).strftime('%b %d')
1855 datetime.timedelta(days=1)).strftime('%b %d')
1856
1856
1857 try:
1857 try:
1858 when, offset = map(int, date.split(' '))
1858 when, offset = map(int, date.split(' '))
1859 except ValueError:
1859 except ValueError:
1860 # fill out defaults
1860 # fill out defaults
1861 now = makedate()
1861 now = makedate()
1862 defaults = {}
1862 defaults = {}
1863 for part in ("d", "mb", "yY", "HI", "M", "S"):
1863 for part in ("d", "mb", "yY", "HI", "M", "S"):
1864 # this piece is for rounding the specific end of unknowns
1864 # this piece is for rounding the specific end of unknowns
1865 b = bias.get(part)
1865 b = bias.get(part)
1866 if b is None:
1866 if b is None:
1867 if part[0] in "HMS":
1867 if part[0] in "HMS":
1868 b = "00"
1868 b = "00"
1869 else:
1869 else:
1870 b = "0"
1870 b = "0"
1871
1871
1872 # this piece is for matching the generic end to today's date
1872 # this piece is for matching the generic end to today's date
1873 n = datestr(now, "%" + part[0])
1873 n = datestr(now, "%" + part[0])
1874
1874
1875 defaults[part] = (b, n)
1875 defaults[part] = (b, n)
1876
1876
1877 for format in formats:
1877 for format in formats:
1878 try:
1878 try:
1879 when, offset = strdate(date, format, defaults)
1879 when, offset = strdate(date, format, defaults)
1880 except (ValueError, OverflowError):
1880 except (ValueError, OverflowError):
1881 pass
1881 pass
1882 else:
1882 else:
1883 break
1883 break
1884 else:
1884 else:
1885 raise Abort(_('invalid date: %r') % date)
1885 raise Abort(_('invalid date: %r') % date)
1886 # validate explicit (probably user-specified) date and
1886 # validate explicit (probably user-specified) date and
1887 # time zone offset. values must fit in signed 32 bits for
1887 # time zone offset. values must fit in signed 32 bits for
1888 # current 32-bit linux runtimes. timezones go from UTC-12
1888 # current 32-bit linux runtimes. timezones go from UTC-12
1889 # to UTC+14
1889 # to UTC+14
1890 if when < -0x80000000 or when > 0x7fffffff:
1890 if when < -0x80000000 or when > 0x7fffffff:
1891 raise Abort(_('date exceeds 32 bits: %d') % when)
1891 raise Abort(_('date exceeds 32 bits: %d') % when)
1892 if offset < -50400 or offset > 43200:
1892 if offset < -50400 or offset > 43200:
1893 raise Abort(_('impossible time zone offset: %d') % offset)
1893 raise Abort(_('impossible time zone offset: %d') % offset)
1894 return when, offset
1894 return when, offset
1895
1895
1896 def matchdate(date):
1896 def matchdate(date):
1897 """Return a function that matches a given date match specifier
1897 """Return a function that matches a given date match specifier
1898
1898
1899 Formats include:
1899 Formats include:
1900
1900
1901 '{date}' match a given date to the accuracy provided
1901 '{date}' match a given date to the accuracy provided
1902
1902
1903 '<{date}' on or before a given date
1903 '<{date}' on or before a given date
1904
1904
1905 '>{date}' on or after a given date
1905 '>{date}' on or after a given date
1906
1906
1907 >>> p1 = parsedate("10:29:59")
1907 >>> p1 = parsedate("10:29:59")
1908 >>> p2 = parsedate("10:30:00")
1908 >>> p2 = parsedate("10:30:00")
1909 >>> p3 = parsedate("10:30:59")
1909 >>> p3 = parsedate("10:30:59")
1910 >>> p4 = parsedate("10:31:00")
1910 >>> p4 = parsedate("10:31:00")
1911 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1911 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1912 >>> f = matchdate("10:30")
1912 >>> f = matchdate("10:30")
1913 >>> f(p1[0])
1913 >>> f(p1[0])
1914 False
1914 False
1915 >>> f(p2[0])
1915 >>> f(p2[0])
1916 True
1916 True
1917 >>> f(p3[0])
1917 >>> f(p3[0])
1918 True
1918 True
1919 >>> f(p4[0])
1919 >>> f(p4[0])
1920 False
1920 False
1921 >>> f(p5[0])
1921 >>> f(p5[0])
1922 False
1922 False
1923 """
1923 """
1924
1924
1925 def lower(date):
1925 def lower(date):
1926 d = {'mb': "1", 'd': "1"}
1926 d = {'mb': "1", 'd': "1"}
1927 return parsedate(date, extendeddateformats, d)[0]
1927 return parsedate(date, extendeddateformats, d)[0]
1928
1928
1929 def upper(date):
1929 def upper(date):
1930 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1930 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1931 for days in ("31", "30", "29"):
1931 for days in ("31", "30", "29"):
1932 try:
1932 try:
1933 d["d"] = days
1933 d["d"] = days
1934 return parsedate(date, extendeddateformats, d)[0]
1934 return parsedate(date, extendeddateformats, d)[0]
1935 except Abort:
1935 except Abort:
1936 pass
1936 pass
1937 d["d"] = "28"
1937 d["d"] = "28"
1938 return parsedate(date, extendeddateformats, d)[0]
1938 return parsedate(date, extendeddateformats, d)[0]
1939
1939
1940 date = date.strip()
1940 date = date.strip()
1941
1941
1942 if not date:
1942 if not date:
1943 raise Abort(_("dates cannot consist entirely of whitespace"))
1943 raise Abort(_("dates cannot consist entirely of whitespace"))
1944 elif date[0] == "<":
1944 elif date[0] == "<":
1945 if not date[1:]:
1945 if not date[1:]:
1946 raise Abort(_("invalid day spec, use '<DATE'"))
1946 raise Abort(_("invalid day spec, use '<DATE'"))
1947 when = upper(date[1:])
1947 when = upper(date[1:])
1948 return lambda x: x <= when
1948 return lambda x: x <= when
1949 elif date[0] == ">":
1949 elif date[0] == ">":
1950 if not date[1:]:
1950 if not date[1:]:
1951 raise Abort(_("invalid day spec, use '>DATE'"))
1951 raise Abort(_("invalid day spec, use '>DATE'"))
1952 when = lower(date[1:])
1952 when = lower(date[1:])
1953 return lambda x: x >= when
1953 return lambda x: x >= when
1954 elif date[0] == "-":
1954 elif date[0] == "-":
1955 try:
1955 try:
1956 days = int(date[1:])
1956 days = int(date[1:])
1957 except ValueError:
1957 except ValueError:
1958 raise Abort(_("invalid day spec: %s") % date[1:])
1958 raise Abort(_("invalid day spec: %s") % date[1:])
1959 if days < 0:
1959 if days < 0:
1960 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1960 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1961 % date[1:])
1961 % date[1:])
1962 when = makedate()[0] - days * 3600 * 24
1962 when = makedate()[0] - days * 3600 * 24
1963 return lambda x: x >= when
1963 return lambda x: x >= when
1964 elif " to " in date:
1964 elif " to " in date:
1965 a, b = date.split(" to ")
1965 a, b = date.split(" to ")
1966 start, stop = lower(a), upper(b)
1966 start, stop = lower(a), upper(b)
1967 return lambda x: x >= start and x <= stop
1967 return lambda x: x >= start and x <= stop
1968 else:
1968 else:
1969 start, stop = lower(date), upper(date)
1969 start, stop = lower(date), upper(date)
1970 return lambda x: x >= start and x <= stop
1970 return lambda x: x >= start and x <= stop
1971
1971
1972 def stringmatcher(pattern):
1972 def stringmatcher(pattern):
1973 """
1973 """
1974 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1974 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1975 returns the matcher name, pattern, and matcher function.
1975 returns the matcher name, pattern, and matcher function.
1976 missing or unknown prefixes are treated as literal matches.
1976 missing or unknown prefixes are treated as literal matches.
1977
1977
1978 helper for tests:
1978 helper for tests:
1979 >>> def test(pattern, *tests):
1979 >>> def test(pattern, *tests):
1980 ... kind, pattern, matcher = stringmatcher(pattern)
1980 ... kind, pattern, matcher = stringmatcher(pattern)
1981 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1981 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1982
1982
1983 exact matching (no prefix):
1983 exact matching (no prefix):
1984 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1984 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1985 ('literal', 'abcdefg', [False, False, True])
1985 ('literal', 'abcdefg', [False, False, True])
1986
1986
1987 regex matching ('re:' prefix)
1987 regex matching ('re:' prefix)
1988 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1988 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1989 ('re', 'a.+b', [False, False, True])
1989 ('re', 'a.+b', [False, False, True])
1990
1990
1991 force exact matches ('literal:' prefix)
1991 force exact matches ('literal:' prefix)
1992 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1992 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1993 ('literal', 're:foobar', [False, True])
1993 ('literal', 're:foobar', [False, True])
1994
1994
1995 unknown prefixes are ignored and treated as literals
1995 unknown prefixes are ignored and treated as literals
1996 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1996 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1997 ('literal', 'foo:bar', [False, False, True])
1997 ('literal', 'foo:bar', [False, False, True])
1998 """
1998 """
1999 if pattern.startswith('re:'):
1999 if pattern.startswith('re:'):
2000 pattern = pattern[3:]
2000 pattern = pattern[3:]
2001 try:
2001 try:
2002 regex = remod.compile(pattern)
2002 regex = remod.compile(pattern)
2003 except remod.error as e:
2003 except remod.error as e:
2004 raise error.ParseError(_('invalid regular expression: %s')
2004 raise error.ParseError(_('invalid regular expression: %s')
2005 % e)
2005 % e)
2006 return 're', pattern, regex.search
2006 return 're', pattern, regex.search
2007 elif pattern.startswith('literal:'):
2007 elif pattern.startswith('literal:'):
2008 pattern = pattern[8:]
2008 pattern = pattern[8:]
2009 return 'literal', pattern, pattern.__eq__
2009 return 'literal', pattern, pattern.__eq__
2010
2010
2011 def shortuser(user):
2011 def shortuser(user):
2012 """Return a short representation of a user name or email address."""
2012 """Return a short representation of a user name or email address."""
2013 f = user.find('@')
2013 f = user.find('@')
2014 if f >= 0:
2014 if f >= 0:
2015 user = user[:f]
2015 user = user[:f]
2016 f = user.find('<')
2016 f = user.find('<')
2017 if f >= 0:
2017 if f >= 0:
2018 user = user[f + 1:]
2018 user = user[f + 1:]
2019 f = user.find(' ')
2019 f = user.find(' ')
2020 if f >= 0:
2020 if f >= 0:
2021 user = user[:f]
2021 user = user[:f]
2022 f = user.find('.')
2022 f = user.find('.')
2023 if f >= 0:
2023 if f >= 0:
2024 user = user[:f]
2024 user = user[:f]
2025 return user
2025 return user
2026
2026
2027 def emailuser(user):
2027 def emailuser(user):
2028 """Return the user portion of an email address."""
2028 """Return the user portion of an email address."""
2029 f = user.find('@')
2029 f = user.find('@')
2030 if f >= 0:
2030 if f >= 0:
2031 user = user[:f]
2031 user = user[:f]
2032 f = user.find('<')
2032 f = user.find('<')
2033 if f >= 0:
2033 if f >= 0:
2034 user = user[f + 1:]
2034 user = user[f + 1:]
2035 return user
2035 return user
2036
2036
2037 def email(author):
2037 def email(author):
2038 '''get email of author.'''
2038 '''get email of author.'''
2039 r = author.find('>')
2039 r = author.find('>')
2040 if r == -1:
2040 if r == -1:
2041 r = None
2041 r = None
2042 return author[author.find('<') + 1:r]
2042 return author[author.find('<') + 1:r]
2043
2043
2044 def ellipsis(text, maxlength=400):
2044 def ellipsis(text, maxlength=400):
2045 """Trim string to at most maxlength (default: 400) columns in display."""
2045 """Trim string to at most maxlength (default: 400) columns in display."""
2046 return encoding.trim(text, maxlength, ellipsis='...')
2046 return encoding.trim(text, maxlength, ellipsis='...')
2047
2047
2048 def unitcountfn(*unittable):
2048 def unitcountfn(*unittable):
2049 '''return a function that renders a readable count of some quantity'''
2049 '''return a function that renders a readable count of some quantity'''
2050
2050
2051 def go(count):
2051 def go(count):
2052 for multiplier, divisor, format in unittable:
2052 for multiplier, divisor, format in unittable:
2053 if count >= divisor * multiplier:
2053 if count >= divisor * multiplier:
2054 return format % (count / float(divisor))
2054 return format % (count / float(divisor))
2055 return unittable[-1][2] % count
2055 return unittable[-1][2] % count
2056
2056
2057 return go
2057 return go
2058
2058
2059 bytecount = unitcountfn(
2059 bytecount = unitcountfn(
2060 (100, 1 << 30, _('%.0f GB')),
2060 (100, 1 << 30, _('%.0f GB')),
2061 (10, 1 << 30, _('%.1f GB')),
2061 (10, 1 << 30, _('%.1f GB')),
2062 (1, 1 << 30, _('%.2f GB')),
2062 (1, 1 << 30, _('%.2f GB')),
2063 (100, 1 << 20, _('%.0f MB')),
2063 (100, 1 << 20, _('%.0f MB')),
2064 (10, 1 << 20, _('%.1f MB')),
2064 (10, 1 << 20, _('%.1f MB')),
2065 (1, 1 << 20, _('%.2f MB')),
2065 (1, 1 << 20, _('%.2f MB')),
2066 (100, 1 << 10, _('%.0f KB')),
2066 (100, 1 << 10, _('%.0f KB')),
2067 (10, 1 << 10, _('%.1f KB')),
2067 (10, 1 << 10, _('%.1f KB')),
2068 (1, 1 << 10, _('%.2f KB')),
2068 (1, 1 << 10, _('%.2f KB')),
2069 (1, 1, _('%.0f bytes')),
2069 (1, 1, _('%.0f bytes')),
2070 )
2070 )
2071
2071
2072 def uirepr(s):
2072 def uirepr(s):
2073 # Avoid double backslash in Windows path repr()
2073 # Avoid double backslash in Windows path repr()
2074 return repr(s).replace('\\\\', '\\')
2074 return repr(s).replace('\\\\', '\\')
2075
2075
2076 # delay import of textwrap
2076 # delay import of textwrap
2077 def MBTextWrapper(**kwargs):
2077 def MBTextWrapper(**kwargs):
2078 class tw(textwrap.TextWrapper):
2078 class tw(textwrap.TextWrapper):
2079 """
2079 """
2080 Extend TextWrapper for width-awareness.
2080 Extend TextWrapper for width-awareness.
2081
2081
2082 Neither number of 'bytes' in any encoding nor 'characters' is
2082 Neither number of 'bytes' in any encoding nor 'characters' is
2083 appropriate to calculate terminal columns for specified string.
2083 appropriate to calculate terminal columns for specified string.
2084
2084
2085 Original TextWrapper implementation uses built-in 'len()' directly,
2085 Original TextWrapper implementation uses built-in 'len()' directly,
2086 so overriding is needed to use width information of each characters.
2086 so overriding is needed to use width information of each characters.
2087
2087
2088 In addition, characters classified into 'ambiguous' width are
2088 In addition, characters classified into 'ambiguous' width are
2089 treated as wide in East Asian area, but as narrow in other.
2089 treated as wide in East Asian area, but as narrow in other.
2090
2090
2091 This requires use decision to determine width of such characters.
2091 This requires use decision to determine width of such characters.
2092 """
2092 """
2093 def _cutdown(self, ucstr, space_left):
2093 def _cutdown(self, ucstr, space_left):
2094 l = 0
2094 l = 0
2095 colwidth = encoding.ucolwidth
2095 colwidth = encoding.ucolwidth
2096 for i in xrange(len(ucstr)):
2096 for i in xrange(len(ucstr)):
2097 l += colwidth(ucstr[i])
2097 l += colwidth(ucstr[i])
2098 if space_left < l:
2098 if space_left < l:
2099 return (ucstr[:i], ucstr[i:])
2099 return (ucstr[:i], ucstr[i:])
2100 return ucstr, ''
2100 return ucstr, ''
2101
2101
2102 # overriding of base class
2102 # overriding of base class
2103 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2103 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2104 space_left = max(width - cur_len, 1)
2104 space_left = max(width - cur_len, 1)
2105
2105
2106 if self.break_long_words:
2106 if self.break_long_words:
2107 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2107 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2108 cur_line.append(cut)
2108 cur_line.append(cut)
2109 reversed_chunks[-1] = res
2109 reversed_chunks[-1] = res
2110 elif not cur_line:
2110 elif not cur_line:
2111 cur_line.append(reversed_chunks.pop())
2111 cur_line.append(reversed_chunks.pop())
2112
2112
2113 # this overriding code is imported from TextWrapper of Python 2.6
2113 # this overriding code is imported from TextWrapper of Python 2.6
2114 # to calculate columns of string by 'encoding.ucolwidth()'
2114 # to calculate columns of string by 'encoding.ucolwidth()'
2115 def _wrap_chunks(self, chunks):
2115 def _wrap_chunks(self, chunks):
2116 colwidth = encoding.ucolwidth
2116 colwidth = encoding.ucolwidth
2117
2117
2118 lines = []
2118 lines = []
2119 if self.width <= 0:
2119 if self.width <= 0:
2120 raise ValueError("invalid width %r (must be > 0)" % self.width)
2120 raise ValueError("invalid width %r (must be > 0)" % self.width)
2121
2121
2122 # Arrange in reverse order so items can be efficiently popped
2122 # Arrange in reverse order so items can be efficiently popped
2123 # from a stack of chucks.
2123 # from a stack of chucks.
2124 chunks.reverse()
2124 chunks.reverse()
2125
2125
2126 while chunks:
2126 while chunks:
2127
2127
2128 # Start the list of chunks that will make up the current line.
2128 # Start the list of chunks that will make up the current line.
2129 # cur_len is just the length of all the chunks in cur_line.
2129 # cur_len is just the length of all the chunks in cur_line.
2130 cur_line = []
2130 cur_line = []
2131 cur_len = 0
2131 cur_len = 0
2132
2132
2133 # Figure out which static string will prefix this line.
2133 # Figure out which static string will prefix this line.
2134 if lines:
2134 if lines:
2135 indent = self.subsequent_indent
2135 indent = self.subsequent_indent
2136 else:
2136 else:
2137 indent = self.initial_indent
2137 indent = self.initial_indent
2138
2138
2139 # Maximum width for this line.
2139 # Maximum width for this line.
2140 width = self.width - len(indent)
2140 width = self.width - len(indent)
2141
2141
2142 # First chunk on line is whitespace -- drop it, unless this
2142 # First chunk on line is whitespace -- drop it, unless this
2143 # is the very beginning of the text (i.e. no lines started yet).
2143 # is the very beginning of the text (i.e. no lines started yet).
2144 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2144 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2145 del chunks[-1]
2145 del chunks[-1]
2146
2146
2147 while chunks:
2147 while chunks:
2148 l = colwidth(chunks[-1])
2148 l = colwidth(chunks[-1])
2149
2149
2150 # Can at least squeeze this chunk onto the current line.
2150 # Can at least squeeze this chunk onto the current line.
2151 if cur_len + l <= width:
2151 if cur_len + l <= width:
2152 cur_line.append(chunks.pop())
2152 cur_line.append(chunks.pop())
2153 cur_len += l
2153 cur_len += l
2154
2154
2155 # Nope, this line is full.
2155 # Nope, this line is full.
2156 else:
2156 else:
2157 break
2157 break
2158
2158
2159 # The current line is full, and the next chunk is too big to
2159 # The current line is full, and the next chunk is too big to
2160 # fit on *any* line (not just this one).
2160 # fit on *any* line (not just this one).
2161 if chunks and colwidth(chunks[-1]) > width:
2161 if chunks and colwidth(chunks[-1]) > width:
2162 self._handle_long_word(chunks, cur_line, cur_len, width)
2162 self._handle_long_word(chunks, cur_line, cur_len, width)
2163
2163
2164 # If the last chunk on this line is all whitespace, drop it.
2164 # If the last chunk on this line is all whitespace, drop it.
2165 if (self.drop_whitespace and
2165 if (self.drop_whitespace and
2166 cur_line and cur_line[-1].strip() == ''):
2166 cur_line and cur_line[-1].strip() == ''):
2167 del cur_line[-1]
2167 del cur_line[-1]
2168
2168
2169 # Convert current line back to a string and store it in list
2169 # Convert current line back to a string and store it in list
2170 # of all lines (return value).
2170 # of all lines (return value).
2171 if cur_line:
2171 if cur_line:
2172 lines.append(indent + ''.join(cur_line))
2172 lines.append(indent + ''.join(cur_line))
2173
2173
2174 return lines
2174 return lines
2175
2175
2176 global MBTextWrapper
2176 global MBTextWrapper
2177 MBTextWrapper = tw
2177 MBTextWrapper = tw
2178 return tw(**kwargs)
2178 return tw(**kwargs)
2179
2179
2180 def wrap(line, width, initindent='', hangindent=''):
2180 def wrap(line, width, initindent='', hangindent=''):
2181 maxindent = max(len(hangindent), len(initindent))
2181 maxindent = max(len(hangindent), len(initindent))
2182 if width <= maxindent:
2182 if width <= maxindent:
2183 # adjust for weird terminal size
2183 # adjust for weird terminal size
2184 width = max(78, maxindent + 1)
2184 width = max(78, maxindent + 1)
2185 line = line.decode(encoding.encoding, encoding.encodingmode)
2185 line = line.decode(encoding.encoding, encoding.encodingmode)
2186 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2186 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2187 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2187 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2188 wrapper = MBTextWrapper(width=width,
2188 wrapper = MBTextWrapper(width=width,
2189 initial_indent=initindent,
2189 initial_indent=initindent,
2190 subsequent_indent=hangindent)
2190 subsequent_indent=hangindent)
2191 return wrapper.fill(line).encode(encoding.encoding)
2191 return wrapper.fill(line).encode(encoding.encoding)
2192
2192
2193 def iterlines(iterator):
2193 def iterlines(iterator):
2194 for chunk in iterator:
2194 for chunk in iterator:
2195 for line in chunk.splitlines():
2195 for line in chunk.splitlines():
2196 yield line
2196 yield line
2197
2197
2198 def expandpath(path):
2198 def expandpath(path):
2199 return os.path.expanduser(os.path.expandvars(path))
2199 return os.path.expanduser(os.path.expandvars(path))
2200
2200
2201 def hgcmd():
2201 def hgcmd():
2202 """Return the command used to execute current hg
2202 """Return the command used to execute current hg
2203
2203
2204 This is different from hgexecutable() because on Windows we want
2204 This is different from hgexecutable() because on Windows we want
2205 to avoid things opening new shell windows like batch files, so we
2205 to avoid things opening new shell windows like batch files, so we
2206 get either the python call or current executable.
2206 get either the python call or current executable.
2207 """
2207 """
2208 if mainfrozen():
2208 if mainfrozen():
2209 if getattr(sys, 'frozen', None) == 'macosx_app':
2209 if getattr(sys, 'frozen', None) == 'macosx_app':
2210 # Env variable set by py2app
2210 # Env variable set by py2app
2211 return [os.environ['EXECUTABLEPATH']]
2211 return [os.environ['EXECUTABLEPATH']]
2212 else:
2212 else:
2213 return [sys.executable]
2213 return [sys.executable]
2214 return gethgcmd()
2214 return gethgcmd()
2215
2215
2216 def rundetached(args, condfn):
2216 def rundetached(args, condfn):
2217 """Execute the argument list in a detached process.
2217 """Execute the argument list in a detached process.
2218
2218
2219 condfn is a callable which is called repeatedly and should return
2219 condfn is a callable which is called repeatedly and should return
2220 True once the child process is known to have started successfully.
2220 True once the child process is known to have started successfully.
2221 At this point, the child process PID is returned. If the child
2221 At this point, the child process PID is returned. If the child
2222 process fails to start or finishes before condfn() evaluates to
2222 process fails to start or finishes before condfn() evaluates to
2223 True, return -1.
2223 True, return -1.
2224 """
2224 """
2225 # Windows case is easier because the child process is either
2225 # Windows case is easier because the child process is either
2226 # successfully starting and validating the condition or exiting
2226 # successfully starting and validating the condition or exiting
2227 # on failure. We just poll on its PID. On Unix, if the child
2227 # on failure. We just poll on its PID. On Unix, if the child
2228 # process fails to start, it will be left in a zombie state until
2228 # process fails to start, it will be left in a zombie state until
2229 # the parent wait on it, which we cannot do since we expect a long
2229 # the parent wait on it, which we cannot do since we expect a long
2230 # running process on success. Instead we listen for SIGCHLD telling
2230 # running process on success. Instead we listen for SIGCHLD telling
2231 # us our child process terminated.
2231 # us our child process terminated.
2232 terminated = set()
2232 terminated = set()
2233 def handler(signum, frame):
2233 def handler(signum, frame):
2234 terminated.add(os.wait())
2234 terminated.add(os.wait())
2235 prevhandler = None
2235 prevhandler = None
2236 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2236 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2237 if SIGCHLD is not None:
2237 if SIGCHLD is not None:
2238 prevhandler = signal.signal(SIGCHLD, handler)
2238 prevhandler = signal.signal(SIGCHLD, handler)
2239 try:
2239 try:
2240 pid = spawndetached(args)
2240 pid = spawndetached(args)
2241 while not condfn():
2241 while not condfn():
2242 if ((pid in terminated or not testpid(pid))
2242 if ((pid in terminated or not testpid(pid))
2243 and not condfn()):
2243 and not condfn()):
2244 return -1
2244 return -1
2245 time.sleep(0.1)
2245 time.sleep(0.1)
2246 return pid
2246 return pid
2247 finally:
2247 finally:
2248 if prevhandler is not None:
2248 if prevhandler is not None:
2249 signal.signal(signal.SIGCHLD, prevhandler)
2249 signal.signal(signal.SIGCHLD, prevhandler)
2250
2250
2251 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2251 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2252 """Return the result of interpolating items in the mapping into string s.
2252 """Return the result of interpolating items in the mapping into string s.
2253
2253
2254 prefix is a single character string, or a two character string with
2254 prefix is a single character string, or a two character string with
2255 a backslash as the first character if the prefix needs to be escaped in
2255 a backslash as the first character if the prefix needs to be escaped in
2256 a regular expression.
2256 a regular expression.
2257
2257
2258 fn is an optional function that will be applied to the replacement text
2258 fn is an optional function that will be applied to the replacement text
2259 just before replacement.
2259 just before replacement.
2260
2260
2261 escape_prefix is an optional flag that allows using doubled prefix for
2261 escape_prefix is an optional flag that allows using doubled prefix for
2262 its escaping.
2262 its escaping.
2263 """
2263 """
2264 fn = fn or (lambda s: s)
2264 fn = fn or (lambda s: s)
2265 patterns = '|'.join(mapping.keys())
2265 patterns = '|'.join(mapping.keys())
2266 if escape_prefix:
2266 if escape_prefix:
2267 patterns += '|' + prefix
2267 patterns += '|' + prefix
2268 if len(prefix) > 1:
2268 if len(prefix) > 1:
2269 prefix_char = prefix[1:]
2269 prefix_char = prefix[1:]
2270 else:
2270 else:
2271 prefix_char = prefix
2271 prefix_char = prefix
2272 mapping[prefix_char] = prefix_char
2272 mapping[prefix_char] = prefix_char
2273 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2273 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2274 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2274 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2275
2275
2276 def getport(port):
2276 def getport(port):
2277 """Return the port for a given network service.
2277 """Return the port for a given network service.
2278
2278
2279 If port is an integer, it's returned as is. If it's a string, it's
2279 If port is an integer, it's returned as is. If it's a string, it's
2280 looked up using socket.getservbyname(). If there's no matching
2280 looked up using socket.getservbyname(). If there's no matching
2281 service, error.Abort is raised.
2281 service, error.Abort is raised.
2282 """
2282 """
2283 try:
2283 try:
2284 return int(port)
2284 return int(port)
2285 except ValueError:
2285 except ValueError:
2286 pass
2286 pass
2287
2287
2288 try:
2288 try:
2289 return socket.getservbyname(port)
2289 return socket.getservbyname(port)
2290 except socket.error:
2290 except socket.error:
2291 raise Abort(_("no port number associated with service '%s'") % port)
2291 raise Abort(_("no port number associated with service '%s'") % port)
2292
2292
2293 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2293 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2294 '0': False, 'no': False, 'false': False, 'off': False,
2294 '0': False, 'no': False, 'false': False, 'off': False,
2295 'never': False}
2295 'never': False}
2296
2296
2297 def parsebool(s):
2297 def parsebool(s):
2298 """Parse s into a boolean.
2298 """Parse s into a boolean.
2299
2299
2300 If s is not a valid boolean, returns None.
2300 If s is not a valid boolean, returns None.
2301 """
2301 """
2302 return _booleans.get(s.lower(), None)
2302 return _booleans.get(s.lower(), None)
2303
2303
2304 _hextochr = dict((a + b, chr(int(a + b, 16)))
2304 _hextochr = dict((a + b, chr(int(a + b, 16)))
2305 for a in string.hexdigits for b in string.hexdigits)
2305 for a in string.hexdigits for b in string.hexdigits)
2306
2306
2307 class url(object):
2307 class url(object):
2308 r"""Reliable URL parser.
2308 r"""Reliable URL parser.
2309
2309
2310 This parses URLs and provides attributes for the following
2310 This parses URLs and provides attributes for the following
2311 components:
2311 components:
2312
2312
2313 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2313 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2314
2314
2315 Missing components are set to None. The only exception is
2315 Missing components are set to None. The only exception is
2316 fragment, which is set to '' if present but empty.
2316 fragment, which is set to '' if present but empty.
2317
2317
2318 If parsefragment is False, fragment is included in query. If
2318 If parsefragment is False, fragment is included in query. If
2319 parsequery is False, query is included in path. If both are
2319 parsequery is False, query is included in path. If both are
2320 False, both fragment and query are included in path.
2320 False, both fragment and query are included in path.
2321
2321
2322 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2322 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2323
2323
2324 Note that for backward compatibility reasons, bundle URLs do not
2324 Note that for backward compatibility reasons, bundle URLs do not
2325 take host names. That means 'bundle://../' has a path of '../'.
2325 take host names. That means 'bundle://../' has a path of '../'.
2326
2326
2327 Examples:
2327 Examples:
2328
2328
2329 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2329 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2330 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2330 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2331 >>> url('ssh://[::1]:2200//home/joe/repo')
2331 >>> url('ssh://[::1]:2200//home/joe/repo')
2332 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2332 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2333 >>> url('file:///home/joe/repo')
2333 >>> url('file:///home/joe/repo')
2334 <url scheme: 'file', path: '/home/joe/repo'>
2334 <url scheme: 'file', path: '/home/joe/repo'>
2335 >>> url('file:///c:/temp/foo/')
2335 >>> url('file:///c:/temp/foo/')
2336 <url scheme: 'file', path: 'c:/temp/foo/'>
2336 <url scheme: 'file', path: 'c:/temp/foo/'>
2337 >>> url('bundle:foo')
2337 >>> url('bundle:foo')
2338 <url scheme: 'bundle', path: 'foo'>
2338 <url scheme: 'bundle', path: 'foo'>
2339 >>> url('bundle://../foo')
2339 >>> url('bundle://../foo')
2340 <url scheme: 'bundle', path: '../foo'>
2340 <url scheme: 'bundle', path: '../foo'>
2341 >>> url(r'c:\foo\bar')
2341 >>> url(r'c:\foo\bar')
2342 <url path: 'c:\\foo\\bar'>
2342 <url path: 'c:\\foo\\bar'>
2343 >>> url(r'\\blah\blah\blah')
2343 >>> url(r'\\blah\blah\blah')
2344 <url path: '\\\\blah\\blah\\blah'>
2344 <url path: '\\\\blah\\blah\\blah'>
2345 >>> url(r'\\blah\blah\blah#baz')
2345 >>> url(r'\\blah\blah\blah#baz')
2346 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2346 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2347 >>> url(r'file:///C:\users\me')
2347 >>> url(r'file:///C:\users\me')
2348 <url scheme: 'file', path: 'C:\\users\\me'>
2348 <url scheme: 'file', path: 'C:\\users\\me'>
2349
2349
2350 Authentication credentials:
2350 Authentication credentials:
2351
2351
2352 >>> url('ssh://joe:xyz@x/repo')
2352 >>> url('ssh://joe:xyz@x/repo')
2353 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2353 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2354 >>> url('ssh://joe@x/repo')
2354 >>> url('ssh://joe@x/repo')
2355 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2355 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2356
2356
2357 Query strings and fragments:
2357 Query strings and fragments:
2358
2358
2359 >>> url('http://host/a?b#c')
2359 >>> url('http://host/a?b#c')
2360 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2360 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2361 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2361 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2362 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2362 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2363
2363
2364 Empty path:
2364 Empty path:
2365
2365
2366 >>> url('')
2366 >>> url('')
2367 <url path: ''>
2367 <url path: ''>
2368 >>> url('#a')
2368 >>> url('#a')
2369 <url path: '', fragment: 'a'>
2369 <url path: '', fragment: 'a'>
2370 >>> url('http://host/')
2370 >>> url('http://host/')
2371 <url scheme: 'http', host: 'host', path: ''>
2371 <url scheme: 'http', host: 'host', path: ''>
2372 >>> url('http://host/#a')
2372 >>> url('http://host/#a')
2373 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2373 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2374
2374
2375 Only scheme:
2375 Only scheme:
2376
2376
2377 >>> url('http:')
2377 >>> url('http:')
2378 <url scheme: 'http'>
2378 <url scheme: 'http'>
2379 """
2379 """
2380
2380
2381 _safechars = "!~*'()+"
2381 _safechars = "!~*'()+"
2382 _safepchars = "/!~*'()+:\\"
2382 _safepchars = "/!~*'()+:\\"
2383 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2383 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2384
2384
2385 def __init__(self, path, parsequery=True, parsefragment=True):
2385 def __init__(self, path, parsequery=True, parsefragment=True):
2386 # We slowly chomp away at path until we have only the path left
2386 # We slowly chomp away at path until we have only the path left
2387 self.scheme = self.user = self.passwd = self.host = None
2387 self.scheme = self.user = self.passwd = self.host = None
2388 self.port = self.path = self.query = self.fragment = None
2388 self.port = self.path = self.query = self.fragment = None
2389 self._localpath = True
2389 self._localpath = True
2390 self._hostport = ''
2390 self._hostport = ''
2391 self._origpath = path
2391 self._origpath = path
2392
2392
2393 if parsefragment and '#' in path:
2393 if parsefragment and '#' in path:
2394 path, self.fragment = path.split('#', 1)
2394 path, self.fragment = path.split('#', 1)
2395
2395
2396 # special case for Windows drive letters and UNC paths
2396 # special case for Windows drive letters and UNC paths
2397 if hasdriveletter(path) or path.startswith('\\\\'):
2397 if hasdriveletter(path) or path.startswith('\\\\'):
2398 self.path = path
2398 self.path = path
2399 return
2399 return
2400
2400
2401 # For compatibility reasons, we can't handle bundle paths as
2401 # For compatibility reasons, we can't handle bundle paths as
2402 # normal URLS
2402 # normal URLS
2403 if path.startswith('bundle:'):
2403 if path.startswith('bundle:'):
2404 self.scheme = 'bundle'
2404 self.scheme = 'bundle'
2405 path = path[7:]
2405 path = path[7:]
2406 if path.startswith('//'):
2406 if path.startswith('//'):
2407 path = path[2:]
2407 path = path[2:]
2408 self.path = path
2408 self.path = path
2409 return
2409 return
2410
2410
2411 if self._matchscheme(path):
2411 if self._matchscheme(path):
2412 parts = path.split(':', 1)
2412 parts = path.split(':', 1)
2413 if parts[0]:
2413 if parts[0]:
2414 self.scheme, path = parts
2414 self.scheme, path = parts
2415 self._localpath = False
2415 self._localpath = False
2416
2416
2417 if not path:
2417 if not path:
2418 path = None
2418 path = None
2419 if self._localpath:
2419 if self._localpath:
2420 self.path = ''
2420 self.path = ''
2421 return
2421 return
2422 else:
2422 else:
2423 if self._localpath:
2423 if self._localpath:
2424 self.path = path
2424 self.path = path
2425 return
2425 return
2426
2426
2427 if parsequery and '?' in path:
2427 if parsequery and '?' in path:
2428 path, self.query = path.split('?', 1)
2428 path, self.query = path.split('?', 1)
2429 if not path:
2429 if not path:
2430 path = None
2430 path = None
2431 if not self.query:
2431 if not self.query:
2432 self.query = None
2432 self.query = None
2433
2433
2434 # // is required to specify a host/authority
2434 # // is required to specify a host/authority
2435 if path and path.startswith('//'):
2435 if path and path.startswith('//'):
2436 parts = path[2:].split('/', 1)
2436 parts = path[2:].split('/', 1)
2437 if len(parts) > 1:
2437 if len(parts) > 1:
2438 self.host, path = parts
2438 self.host, path = parts
2439 else:
2439 else:
2440 self.host = parts[0]
2440 self.host = parts[0]
2441 path = None
2441 path = None
2442 if not self.host:
2442 if not self.host:
2443 self.host = None
2443 self.host = None
2444 # path of file:///d is /d
2444 # path of file:///d is /d
2445 # path of file:///d:/ is d:/, not /d:/
2445 # path of file:///d:/ is d:/, not /d:/
2446 if path and not hasdriveletter(path):
2446 if path and not hasdriveletter(path):
2447 path = '/' + path
2447 path = '/' + path
2448
2448
2449 if self.host and '@' in self.host:
2449 if self.host and '@' in self.host:
2450 self.user, self.host = self.host.rsplit('@', 1)
2450 self.user, self.host = self.host.rsplit('@', 1)
2451 if ':' in self.user:
2451 if ':' in self.user:
2452 self.user, self.passwd = self.user.split(':', 1)
2452 self.user, self.passwd = self.user.split(':', 1)
2453 if not self.host:
2453 if not self.host:
2454 self.host = None
2454 self.host = None
2455
2455
2456 # Don't split on colons in IPv6 addresses without ports
2456 # Don't split on colons in IPv6 addresses without ports
2457 if (self.host and ':' in self.host and
2457 if (self.host and ':' in self.host and
2458 not (self.host.startswith('[') and self.host.endswith(']'))):
2458 not (self.host.startswith('[') and self.host.endswith(']'))):
2459 self._hostport = self.host
2459 self._hostport = self.host
2460 self.host, self.port = self.host.rsplit(':', 1)
2460 self.host, self.port = self.host.rsplit(':', 1)
2461 if not self.host:
2461 if not self.host:
2462 self.host = None
2462 self.host = None
2463
2463
2464 if (self.host and self.scheme == 'file' and
2464 if (self.host and self.scheme == 'file' and
2465 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2465 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2466 raise Abort(_('file:// URLs can only refer to localhost'))
2466 raise Abort(_('file:// URLs can only refer to localhost'))
2467
2467
2468 self.path = path
2468 self.path = path
2469
2469
2470 # leave the query string escaped
2470 # leave the query string escaped
2471 for a in ('user', 'passwd', 'host', 'port',
2471 for a in ('user', 'passwd', 'host', 'port',
2472 'path', 'fragment'):
2472 'path', 'fragment'):
2473 v = getattr(self, a)
2473 v = getattr(self, a)
2474 if v is not None:
2474 if v is not None:
2475 setattr(self, a, pycompat.urlunquote(v))
2475 setattr(self, a, pycompat.urlunquote(v))
2476
2476
2477 def __repr__(self):
2477 def __repr__(self):
2478 attrs = []
2478 attrs = []
2479 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2479 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2480 'query', 'fragment'):
2480 'query', 'fragment'):
2481 v = getattr(self, a)
2481 v = getattr(self, a)
2482 if v is not None:
2482 if v is not None:
2483 attrs.append('%s: %r' % (a, v))
2483 attrs.append('%s: %r' % (a, v))
2484 return '<url %s>' % ', '.join(attrs)
2484 return '<url %s>' % ', '.join(attrs)
2485
2485
2486 def __str__(self):
2486 def __str__(self):
2487 r"""Join the URL's components back into a URL string.
2487 r"""Join the URL's components back into a URL string.
2488
2488
2489 Examples:
2489 Examples:
2490
2490
2491 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2491 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2492 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2492 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2493 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2493 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2494 'http://user:pw@host:80/?foo=bar&baz=42'
2494 'http://user:pw@host:80/?foo=bar&baz=42'
2495 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2495 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2496 'http://user:pw@host:80/?foo=bar%3dbaz'
2496 'http://user:pw@host:80/?foo=bar%3dbaz'
2497 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2497 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2498 'ssh://user:pw@[::1]:2200//home/joe#'
2498 'ssh://user:pw@[::1]:2200//home/joe#'
2499 >>> str(url('http://localhost:80//'))
2499 >>> str(url('http://localhost:80//'))
2500 'http://localhost:80//'
2500 'http://localhost:80//'
2501 >>> str(url('http://localhost:80/'))
2501 >>> str(url('http://localhost:80/'))
2502 'http://localhost:80/'
2502 'http://localhost:80/'
2503 >>> str(url('http://localhost:80'))
2503 >>> str(url('http://localhost:80'))
2504 'http://localhost:80/'
2504 'http://localhost:80/'
2505 >>> str(url('bundle:foo'))
2505 >>> str(url('bundle:foo'))
2506 'bundle:foo'
2506 'bundle:foo'
2507 >>> str(url('bundle://../foo'))
2507 >>> str(url('bundle://../foo'))
2508 'bundle:../foo'
2508 'bundle:../foo'
2509 >>> str(url('path'))
2509 >>> str(url('path'))
2510 'path'
2510 'path'
2511 >>> str(url('file:///tmp/foo/bar'))
2511 >>> str(url('file:///tmp/foo/bar'))
2512 'file:///tmp/foo/bar'
2512 'file:///tmp/foo/bar'
2513 >>> str(url('file:///c:/tmp/foo/bar'))
2513 >>> str(url('file:///c:/tmp/foo/bar'))
2514 'file:///c:/tmp/foo/bar'
2514 'file:///c:/tmp/foo/bar'
2515 >>> print url(r'bundle:foo\bar')
2515 >>> print url(r'bundle:foo\bar')
2516 bundle:foo\bar
2516 bundle:foo\bar
2517 >>> print url(r'file:///D:\data\hg')
2517 >>> print url(r'file:///D:\data\hg')
2518 file:///D:\data\hg
2518 file:///D:\data\hg
2519 """
2519 """
2520 if self._localpath:
2520 if self._localpath:
2521 s = self.path
2521 s = self.path
2522 if self.scheme == 'bundle':
2522 if self.scheme == 'bundle':
2523 s = 'bundle:' + s
2523 s = 'bundle:' + s
2524 if self.fragment:
2524 if self.fragment:
2525 s += '#' + self.fragment
2525 s += '#' + self.fragment
2526 return s
2526 return s
2527
2527
2528 s = self.scheme + ':'
2528 s = self.scheme + ':'
2529 if self.user or self.passwd or self.host:
2529 if self.user or self.passwd or self.host:
2530 s += '//'
2530 s += '//'
2531 elif self.scheme and (not self.path or self.path.startswith('/')
2531 elif self.scheme and (not self.path or self.path.startswith('/')
2532 or hasdriveletter(self.path)):
2532 or hasdriveletter(self.path)):
2533 s += '//'
2533 s += '//'
2534 if hasdriveletter(self.path):
2534 if hasdriveletter(self.path):
2535 s += '/'
2535 s += '/'
2536 if self.user:
2536 if self.user:
2537 s += urlreq.quote(self.user, safe=self._safechars)
2537 s += urlreq.quote(self.user, safe=self._safechars)
2538 if self.passwd:
2538 if self.passwd:
2539 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2539 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2540 if self.user or self.passwd:
2540 if self.user or self.passwd:
2541 s += '@'
2541 s += '@'
2542 if self.host:
2542 if self.host:
2543 if not (self.host.startswith('[') and self.host.endswith(']')):
2543 if not (self.host.startswith('[') and self.host.endswith(']')):
2544 s += urlreq.quote(self.host)
2544 s += urlreq.quote(self.host)
2545 else:
2545 else:
2546 s += self.host
2546 s += self.host
2547 if self.port:
2547 if self.port:
2548 s += ':' + urlreq.quote(self.port)
2548 s += ':' + urlreq.quote(self.port)
2549 if self.host:
2549 if self.host:
2550 s += '/'
2550 s += '/'
2551 if self.path:
2551 if self.path:
2552 # TODO: similar to the query string, we should not unescape the
2552 # TODO: similar to the query string, we should not unescape the
2553 # path when we store it, the path might contain '%2f' = '/',
2553 # path when we store it, the path might contain '%2f' = '/',
2554 # which we should *not* escape.
2554 # which we should *not* escape.
2555 s += urlreq.quote(self.path, safe=self._safepchars)
2555 s += urlreq.quote(self.path, safe=self._safepchars)
2556 if self.query:
2556 if self.query:
2557 # we store the query in escaped form.
2557 # we store the query in escaped form.
2558 s += '?' + self.query
2558 s += '?' + self.query
2559 if self.fragment is not None:
2559 if self.fragment is not None:
2560 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2560 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2561 return s
2561 return s
2562
2562
2563 def authinfo(self):
2563 def authinfo(self):
2564 user, passwd = self.user, self.passwd
2564 user, passwd = self.user, self.passwd
2565 try:
2565 try:
2566 self.user, self.passwd = None, None
2566 self.user, self.passwd = None, None
2567 s = str(self)
2567 s = str(self)
2568 finally:
2568 finally:
2569 self.user, self.passwd = user, passwd
2569 self.user, self.passwd = user, passwd
2570 if not self.user:
2570 if not self.user:
2571 return (s, None)
2571 return (s, None)
2572 # authinfo[1] is passed to urllib2 password manager, and its
2572 # authinfo[1] is passed to urllib2 password manager, and its
2573 # URIs must not contain credentials. The host is passed in the
2573 # URIs must not contain credentials. The host is passed in the
2574 # URIs list because Python < 2.4.3 uses only that to search for
2574 # URIs list because Python < 2.4.3 uses only that to search for
2575 # a password.
2575 # a password.
2576 return (s, (None, (s, self.host),
2576 return (s, (None, (s, self.host),
2577 self.user, self.passwd or ''))
2577 self.user, self.passwd or ''))
2578
2578
2579 def isabs(self):
2579 def isabs(self):
2580 if self.scheme and self.scheme != 'file':
2580 if self.scheme and self.scheme != 'file':
2581 return True # remote URL
2581 return True # remote URL
2582 if hasdriveletter(self.path):
2582 if hasdriveletter(self.path):
2583 return True # absolute for our purposes - can't be joined()
2583 return True # absolute for our purposes - can't be joined()
2584 if self.path.startswith(r'\\'):
2584 if self.path.startswith(r'\\'):
2585 return True # Windows UNC path
2585 return True # Windows UNC path
2586 if self.path.startswith('/'):
2586 if self.path.startswith('/'):
2587 return True # POSIX-style
2587 return True # POSIX-style
2588 return False
2588 return False
2589
2589
2590 def localpath(self):
2590 def localpath(self):
2591 if self.scheme == 'file' or self.scheme == 'bundle':
2591 if self.scheme == 'file' or self.scheme == 'bundle':
2592 path = self.path or '/'
2592 path = self.path or '/'
2593 # For Windows, we need to promote hosts containing drive
2593 # For Windows, we need to promote hosts containing drive
2594 # letters to paths with drive letters.
2594 # letters to paths with drive letters.
2595 if hasdriveletter(self._hostport):
2595 if hasdriveletter(self._hostport):
2596 path = self._hostport + '/' + self.path
2596 path = self._hostport + '/' + self.path
2597 elif (self.host is not None and self.path
2597 elif (self.host is not None and self.path
2598 and not hasdriveletter(path)):
2598 and not hasdriveletter(path)):
2599 path = '/' + path
2599 path = '/' + path
2600 return path
2600 return path
2601 return self._origpath
2601 return self._origpath
2602
2602
2603 def islocal(self):
2603 def islocal(self):
2604 '''whether localpath will return something that posixfile can open'''
2604 '''whether localpath will return something that posixfile can open'''
2605 return (not self.scheme or self.scheme == 'file'
2605 return (not self.scheme or self.scheme == 'file'
2606 or self.scheme == 'bundle')
2606 or self.scheme == 'bundle')
2607
2607
2608 def hasscheme(path):
2608 def hasscheme(path):
2609 return bool(url(path).scheme)
2609 return bool(url(path).scheme)
2610
2610
2611 def hasdriveletter(path):
2611 def hasdriveletter(path):
2612 return path and path[1:2] == ':' and path[0:1].isalpha()
2612 return path and path[1:2] == ':' and path[0:1].isalpha()
2613
2613
2614 def urllocalpath(path):
2614 def urllocalpath(path):
2615 return url(path, parsequery=False, parsefragment=False).localpath()
2615 return url(path, parsequery=False, parsefragment=False).localpath()
2616
2616
2617 def hidepassword(u):
2617 def hidepassword(u):
2618 '''hide user credential in a url string'''
2618 '''hide user credential in a url string'''
2619 u = url(u)
2619 u = url(u)
2620 if u.passwd:
2620 if u.passwd:
2621 u.passwd = '***'
2621 u.passwd = '***'
2622 return str(u)
2622 return str(u)
2623
2623
2624 def removeauth(u):
2624 def removeauth(u):
2625 '''remove all authentication information from a url string'''
2625 '''remove all authentication information from a url string'''
2626 u = url(u)
2626 u = url(u)
2627 u.user = u.passwd = None
2627 u.user = u.passwd = None
2628 return str(u)
2628 return str(u)
2629
2629
2630 def isatty(fp):
2630 def isatty(fp):
2631 try:
2631 try:
2632 return fp.isatty()
2632 return fp.isatty()
2633 except AttributeError:
2633 except AttributeError:
2634 return False
2634 return False
2635
2635
2636 timecount = unitcountfn(
2636 timecount = unitcountfn(
2637 (1, 1e3, _('%.0f s')),
2637 (1, 1e3, _('%.0f s')),
2638 (100, 1, _('%.1f s')),
2638 (100, 1, _('%.1f s')),
2639 (10, 1, _('%.2f s')),
2639 (10, 1, _('%.2f s')),
2640 (1, 1, _('%.3f s')),
2640 (1, 1, _('%.3f s')),
2641 (100, 0.001, _('%.1f ms')),
2641 (100, 0.001, _('%.1f ms')),
2642 (10, 0.001, _('%.2f ms')),
2642 (10, 0.001, _('%.2f ms')),
2643 (1, 0.001, _('%.3f ms')),
2643 (1, 0.001, _('%.3f ms')),
2644 (100, 0.000001, _('%.1f us')),
2644 (100, 0.000001, _('%.1f us')),
2645 (10, 0.000001, _('%.2f us')),
2645 (10, 0.000001, _('%.2f us')),
2646 (1, 0.000001, _('%.3f us')),
2646 (1, 0.000001, _('%.3f us')),
2647 (100, 0.000000001, _('%.1f ns')),
2647 (100, 0.000000001, _('%.1f ns')),
2648 (10, 0.000000001, _('%.2f ns')),
2648 (10, 0.000000001, _('%.2f ns')),
2649 (1, 0.000000001, _('%.3f ns')),
2649 (1, 0.000000001, _('%.3f ns')),
2650 )
2650 )
2651
2651
2652 _timenesting = [0]
2652 _timenesting = [0]
2653
2653
2654 def timed(func):
2654 def timed(func):
2655 '''Report the execution time of a function call to stderr.
2655 '''Report the execution time of a function call to stderr.
2656
2656
2657 During development, use as a decorator when you need to measure
2657 During development, use as a decorator when you need to measure
2658 the cost of a function, e.g. as follows:
2658 the cost of a function, e.g. as follows:
2659
2659
2660 @util.timed
2660 @util.timed
2661 def foo(a, b, c):
2661 def foo(a, b, c):
2662 pass
2662 pass
2663 '''
2663 '''
2664
2664
2665 def wrapper(*args, **kwargs):
2665 def wrapper(*args, **kwargs):
2666 start = time.time()
2666 start = time.time()
2667 indent = 2
2667 indent = 2
2668 _timenesting[0] += indent
2668 _timenesting[0] += indent
2669 try:
2669 try:
2670 return func(*args, **kwargs)
2670 return func(*args, **kwargs)
2671 finally:
2671 finally:
2672 elapsed = time.time() - start
2672 elapsed = time.time() - start
2673 _timenesting[0] -= indent
2673 _timenesting[0] -= indent
2674 sys.stderr.write('%s%s: %s\n' %
2674 sys.stderr.write('%s%s: %s\n' %
2675 (' ' * _timenesting[0], func.__name__,
2675 (' ' * _timenesting[0], func.__name__,
2676 timecount(elapsed)))
2676 timecount(elapsed)))
2677 return wrapper
2677 return wrapper
2678
2678
2679 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2679 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2680 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2680 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2681
2681
2682 def sizetoint(s):
2682 def sizetoint(s):
2683 '''Convert a space specifier to a byte count.
2683 '''Convert a space specifier to a byte count.
2684
2684
2685 >>> sizetoint('30')
2685 >>> sizetoint('30')
2686 30
2686 30
2687 >>> sizetoint('2.2kb')
2687 >>> sizetoint('2.2kb')
2688 2252
2688 2252
2689 >>> sizetoint('6M')
2689 >>> sizetoint('6M')
2690 6291456
2690 6291456
2691 '''
2691 '''
2692 t = s.strip().lower()
2692 t = s.strip().lower()
2693 try:
2693 try:
2694 for k, u in _sizeunits:
2694 for k, u in _sizeunits:
2695 if t.endswith(k):
2695 if t.endswith(k):
2696 return int(float(t[:-len(k)]) * u)
2696 return int(float(t[:-len(k)]) * u)
2697 return int(t)
2697 return int(t)
2698 except ValueError:
2698 except ValueError:
2699 raise error.ParseError(_("couldn't parse size: %s") % s)
2699 raise error.ParseError(_("couldn't parse size: %s") % s)
2700
2700
2701 class hooks(object):
2701 class hooks(object):
2702 '''A collection of hook functions that can be used to extend a
2702 '''A collection of hook functions that can be used to extend a
2703 function's behavior. Hooks are called in lexicographic order,
2703 function's behavior. Hooks are called in lexicographic order,
2704 based on the names of their sources.'''
2704 based on the names of their sources.'''
2705
2705
2706 def __init__(self):
2706 def __init__(self):
2707 self._hooks = []
2707 self._hooks = []
2708
2708
2709 def add(self, source, hook):
2709 def add(self, source, hook):
2710 self._hooks.append((source, hook))
2710 self._hooks.append((source, hook))
2711
2711
2712 def __call__(self, *args):
2712 def __call__(self, *args):
2713 self._hooks.sort(key=lambda x: x[0])
2713 self._hooks.sort(key=lambda x: x[0])
2714 results = []
2714 results = []
2715 for source, hook in self._hooks:
2715 for source, hook in self._hooks:
2716 results.append(hook(*args))
2716 results.append(hook(*args))
2717 return results
2717 return results
2718
2718
2719 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2719 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2720 '''Yields lines for a nicely formatted stacktrace.
2720 '''Yields lines for a nicely formatted stacktrace.
2721 Skips the 'skip' last entries.
2721 Skips the 'skip' last entries.
2722 Each file+linenumber is formatted according to fileline.
2722 Each file+linenumber is formatted according to fileline.
2723 Each line is formatted according to line.
2723 Each line is formatted according to line.
2724 If line is None, it yields:
2724 If line is None, it yields:
2725 length of longest filepath+line number,
2725 length of longest filepath+line number,
2726 filepath+linenumber,
2726 filepath+linenumber,
2727 function
2727 function
2728
2728
2729 Not be used in production code but very convenient while developing.
2729 Not be used in production code but very convenient while developing.
2730 '''
2730 '''
2731 entries = [(fileline % (fn, ln), func)
2731 entries = [(fileline % (fn, ln), func)
2732 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2732 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2733 if entries:
2733 if entries:
2734 fnmax = max(len(entry[0]) for entry in entries)
2734 fnmax = max(len(entry[0]) for entry in entries)
2735 for fnln, func in entries:
2735 for fnln, func in entries:
2736 if line is None:
2736 if line is None:
2737 yield (fnmax, fnln, func)
2737 yield (fnmax, fnln, func)
2738 else:
2738 else:
2739 yield line % (fnmax, fnln, func)
2739 yield line % (fnmax, fnln, func)
2740
2740
2741 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2741 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2742 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2742 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2743 Skips the 'skip' last entries. By default it will flush stdout first.
2743 Skips the 'skip' last entries. By default it will flush stdout first.
2744 It can be used everywhere and intentionally does not require an ui object.
2744 It can be used everywhere and intentionally does not require an ui object.
2745 Not be used in production code but very convenient while developing.
2745 Not be used in production code but very convenient while developing.
2746 '''
2746 '''
2747 if otherf:
2747 if otherf:
2748 otherf.flush()
2748 otherf.flush()
2749 f.write('%s at:\n' % msg)
2749 f.write('%s at:\n' % msg)
2750 for line in getstackframes(skip + 1):
2750 for line in getstackframes(skip + 1):
2751 f.write(line)
2751 f.write(line)
2752 f.flush()
2752 f.flush()
2753
2753
2754 class dirs(object):
2754 class dirs(object):
2755 '''a multiset of directory names from a dirstate or manifest'''
2755 '''a multiset of directory names from a dirstate or manifest'''
2756
2756
2757 def __init__(self, map, skip=None):
2757 def __init__(self, map, skip=None):
2758 self._dirs = {}
2758 self._dirs = {}
2759 addpath = self.addpath
2759 addpath = self.addpath
2760 if safehasattr(map, 'iteritems') and skip is not None:
2760 if safehasattr(map, 'iteritems') and skip is not None:
2761 for f, s in map.iteritems():
2761 for f, s in map.iteritems():
2762 if s[0] != skip:
2762 if s[0] != skip:
2763 addpath(f)
2763 addpath(f)
2764 else:
2764 else:
2765 for f in map:
2765 for f in map:
2766 addpath(f)
2766 addpath(f)
2767
2767
2768 def addpath(self, path):
2768 def addpath(self, path):
2769 dirs = self._dirs
2769 dirs = self._dirs
2770 for base in finddirs(path):
2770 for base in finddirs(path):
2771 if base in dirs:
2771 if base in dirs:
2772 dirs[base] += 1
2772 dirs[base] += 1
2773 return
2773 return
2774 dirs[base] = 1
2774 dirs[base] = 1
2775
2775
2776 def delpath(self, path):
2776 def delpath(self, path):
2777 dirs = self._dirs
2777 dirs = self._dirs
2778 for base in finddirs(path):
2778 for base in finddirs(path):
2779 if dirs[base] > 1:
2779 if dirs[base] > 1:
2780 dirs[base] -= 1
2780 dirs[base] -= 1
2781 return
2781 return
2782 del dirs[base]
2782 del dirs[base]
2783
2783
2784 def __iter__(self):
2784 def __iter__(self):
2785 return self._dirs.iterkeys()
2785 return self._dirs.iterkeys()
2786
2786
2787 def __contains__(self, d):
2787 def __contains__(self, d):
2788 return d in self._dirs
2788 return d in self._dirs
2789
2789
2790 if safehasattr(parsers, 'dirs'):
2790 if safehasattr(parsers, 'dirs'):
2791 dirs = parsers.dirs
2791 dirs = parsers.dirs
2792
2792
2793 def finddirs(path):
2793 def finddirs(path):
2794 pos = path.rfind('/')
2794 pos = path.rfind('/')
2795 while pos != -1:
2795 while pos != -1:
2796 yield path[:pos]
2796 yield path[:pos]
2797 pos = path.rfind('/', 0, pos)
2797 pos = path.rfind('/', 0, pos)
2798
2798
2799 class ctxmanager(object):
2799 class ctxmanager(object):
2800 '''A context manager for use in 'with' blocks to allow multiple
2800 '''A context manager for use in 'with' blocks to allow multiple
2801 contexts to be entered at once. This is both safer and more
2801 contexts to be entered at once. This is both safer and more
2802 flexible than contextlib.nested.
2802 flexible than contextlib.nested.
2803
2803
2804 Once Mercurial supports Python 2.7+, this will become mostly
2804 Once Mercurial supports Python 2.7+, this will become mostly
2805 unnecessary.
2805 unnecessary.
2806 '''
2806 '''
2807
2807
2808 def __init__(self, *args):
2808 def __init__(self, *args):
2809 '''Accepts a list of no-argument functions that return context
2809 '''Accepts a list of no-argument functions that return context
2810 managers. These will be invoked at __call__ time.'''
2810 managers. These will be invoked at __call__ time.'''
2811 self._pending = args
2811 self._pending = args
2812 self._atexit = []
2812 self._atexit = []
2813
2813
2814 def __enter__(self):
2814 def __enter__(self):
2815 return self
2815 return self
2816
2816
2817 def enter(self):
2817 def enter(self):
2818 '''Create and enter context managers in the order in which they were
2818 '''Create and enter context managers in the order in which they were
2819 passed to the constructor.'''
2819 passed to the constructor.'''
2820 values = []
2820 values = []
2821 for func in self._pending:
2821 for func in self._pending:
2822 obj = func()
2822 obj = func()
2823 values.append(obj.__enter__())
2823 values.append(obj.__enter__())
2824 self._atexit.append(obj.__exit__)
2824 self._atexit.append(obj.__exit__)
2825 del self._pending
2825 del self._pending
2826 return values
2826 return values
2827
2827
2828 def atexit(self, func, *args, **kwargs):
2828 def atexit(self, func, *args, **kwargs):
2829 '''Add a function to call when this context manager exits. The
2829 '''Add a function to call when this context manager exits. The
2830 ordering of multiple atexit calls is unspecified, save that
2830 ordering of multiple atexit calls is unspecified, save that
2831 they will happen before any __exit__ functions.'''
2831 they will happen before any __exit__ functions.'''
2832 def wrapper(exc_type, exc_val, exc_tb):
2832 def wrapper(exc_type, exc_val, exc_tb):
2833 func(*args, **kwargs)
2833 func(*args, **kwargs)
2834 self._atexit.append(wrapper)
2834 self._atexit.append(wrapper)
2835 return func
2835 return func
2836
2836
2837 def __exit__(self, exc_type, exc_val, exc_tb):
2837 def __exit__(self, exc_type, exc_val, exc_tb):
2838 '''Context managers are exited in the reverse order from which
2838 '''Context managers are exited in the reverse order from which
2839 they were created.'''
2839 they were created.'''
2840 received = exc_type is not None
2840 received = exc_type is not None
2841 suppressed = False
2841 suppressed = False
2842 pending = None
2842 pending = None
2843 self._atexit.reverse()
2843 self._atexit.reverse()
2844 for exitfunc in self._atexit:
2844 for exitfunc in self._atexit:
2845 try:
2845 try:
2846 if exitfunc(exc_type, exc_val, exc_tb):
2846 if exitfunc(exc_type, exc_val, exc_tb):
2847 suppressed = True
2847 suppressed = True
2848 exc_type = None
2848 exc_type = None
2849 exc_val = None
2849 exc_val = None
2850 exc_tb = None
2850 exc_tb = None
2851 except BaseException:
2851 except BaseException:
2852 pending = sys.exc_info()
2852 pending = sys.exc_info()
2853 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2853 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2854 del self._atexit
2854 del self._atexit
2855 if pending:
2855 if pending:
2856 raise exc_val
2856 raise exc_val
2857 return received and suppressed
2857 return received and suppressed
2858
2858
2859 # compression code
2859 # compression code
2860
2860
2861 class compressormanager(object):
2861 class compressormanager(object):
2862 """Holds registrations of various compression engines.
2862 """Holds registrations of various compression engines.
2863
2863
2864 This class essentially abstracts the differences between compression
2864 This class essentially abstracts the differences between compression
2865 engines to allow new compression formats to be added easily, possibly from
2865 engines to allow new compression formats to be added easily, possibly from
2866 extensions.
2866 extensions.
2867
2867
2868 Compressors are registered against the global instance by calling its
2868 Compressors are registered against the global instance by calling its
2869 ``register()`` method.
2869 ``register()`` method.
2870 """
2870 """
2871 def __init__(self):
2871 def __init__(self):
2872 self._engines = {}
2872 self._engines = {}
2873 # Bundle spec human name to engine name.
2873 # Bundle spec human name to engine name.
2874 self._bundlenames = {}
2874 self._bundlenames = {}
2875 # Internal bundle identifier to engine name.
2875 # Internal bundle identifier to engine name.
2876 self._bundletypes = {}
2876 self._bundletypes = {}
2877
2877
2878 def __getitem__(self, key):
2878 def __getitem__(self, key):
2879 return self._engines[key]
2879 return self._engines[key]
2880
2880
2881 def __contains__(self, key):
2881 def __contains__(self, key):
2882 return key in self._engines
2882 return key in self._engines
2883
2883
2884 def __iter__(self):
2884 def __iter__(self):
2885 return iter(self._engines.keys())
2885 return iter(self._engines.keys())
2886
2886
2887 def register(self, engine):
2887 def register(self, engine):
2888 """Register a compression engine with the manager.
2888 """Register a compression engine with the manager.
2889
2889
2890 The argument must be a ``compressionengine`` instance.
2890 The argument must be a ``compressionengine`` instance.
2891 """
2891 """
2892 if not isinstance(engine, compressionengine):
2892 if not isinstance(engine, compressionengine):
2893 raise ValueError(_('argument must be a compressionengine'))
2893 raise ValueError(_('argument must be a compressionengine'))
2894
2894
2895 name = engine.name()
2895 name = engine.name()
2896
2896
2897 if name in self._engines:
2897 if name in self._engines:
2898 raise error.Abort(_('compression engine %s already registered') %
2898 raise error.Abort(_('compression engine %s already registered') %
2899 name)
2899 name)
2900
2900
2901 bundleinfo = engine.bundletype()
2901 bundleinfo = engine.bundletype()
2902 if bundleinfo:
2902 if bundleinfo:
2903 bundlename, bundletype = bundleinfo
2903 bundlename, bundletype = bundleinfo
2904
2904
2905 if bundlename in self._bundlenames:
2905 if bundlename in self._bundlenames:
2906 raise error.Abort(_('bundle name %s already registered') %
2906 raise error.Abort(_('bundle name %s already registered') %
2907 bundlename)
2907 bundlename)
2908 if bundletype in self._bundletypes:
2908 if bundletype in self._bundletypes:
2909 raise error.Abort(_('bundle type %s already registered by %s') %
2909 raise error.Abort(_('bundle type %s already registered by %s') %
2910 (bundletype, self._bundletypes[bundletype]))
2910 (bundletype, self._bundletypes[bundletype]))
2911
2911
2912 # No external facing name declared.
2912 # No external facing name declared.
2913 if bundlename:
2913 if bundlename:
2914 self._bundlenames[bundlename] = name
2914 self._bundlenames[bundlename] = name
2915
2915
2916 self._bundletypes[bundletype] = name
2916 self._bundletypes[bundletype] = name
2917
2917
2918 self._engines[name] = engine
2918 self._engines[name] = engine
2919
2919
2920 @property
2920 @property
2921 def supportedbundlenames(self):
2921 def supportedbundlenames(self):
2922 return set(self._bundlenames.keys())
2922 return set(self._bundlenames.keys())
2923
2923
2924 @property
2924 @property
2925 def supportedbundletypes(self):
2925 def supportedbundletypes(self):
2926 return set(self._bundletypes.keys())
2926 return set(self._bundletypes.keys())
2927
2927
2928 def forbundlename(self, bundlename):
2928 def forbundlename(self, bundlename):
2929 """Obtain a compression engine registered to a bundle name.
2929 """Obtain a compression engine registered to a bundle name.
2930
2930
2931 Will raise KeyError if the bundle type isn't registered.
2931 Will raise KeyError if the bundle type isn't registered.
2932 """
2932 """
2933 return self._engines[self._bundlenames[bundlename]]
2933 return self._engines[self._bundlenames[bundlename]]
2934
2934
2935 def forbundletype(self, bundletype):
2935 def forbundletype(self, bundletype):
2936 """Obtain a compression engine registered to a bundle type.
2936 """Obtain a compression engine registered to a bundle type.
2937
2937
2938 Will raise KeyError if the bundle type isn't registered.
2938 Will raise KeyError if the bundle type isn't registered.
2939 """
2939 """
2940 return self._engines[self._bundletypes[bundletype]]
2940 return self._engines[self._bundletypes[bundletype]]
2941
2941
2942 compengines = compressormanager()
2942 compengines = compressormanager()
2943
2943
2944 class compressionengine(object):
2944 class compressionengine(object):
2945 """Base class for compression engines.
2945 """Base class for compression engines.
2946
2946
2947 Compression engines must implement the interface defined by this class.
2947 Compression engines must implement the interface defined by this class.
2948 """
2948 """
2949 def name(self):
2949 def name(self):
2950 """Returns the name of the compression engine.
2950 """Returns the name of the compression engine.
2951
2951
2952 This is the key the engine is registered under.
2952 This is the key the engine is registered under.
2953
2953
2954 This method must be implemented.
2954 This method must be implemented.
2955 """
2955 """
2956 raise NotImplementedError()
2956 raise NotImplementedError()
2957
2957
2958 def bundletype(self):
2958 def bundletype(self):
2959 """Describes bundle identifiers for this engine.
2959 """Describes bundle identifiers for this engine.
2960
2960
2961 If this compression engine isn't supported for bundles, returns None.
2961 If this compression engine isn't supported for bundles, returns None.
2962
2962
2963 If this engine can be used for bundles, returns a 2-tuple of strings of
2963 If this engine can be used for bundles, returns a 2-tuple of strings of
2964 the user-facing "bundle spec" compression name and an internal
2964 the user-facing "bundle spec" compression name and an internal
2965 identifier used to denote the compression format within bundles. To
2965 identifier used to denote the compression format within bundles. To
2966 exclude the name from external usage, set the first element to ``None``.
2966 exclude the name from external usage, set the first element to ``None``.
2967
2967
2968 If bundle compression is supported, the class must also implement
2968 If bundle compression is supported, the class must also implement
2969 ``compressstream``, ``compressorobj`` and `decompressorreader``.
2969 ``compressstream`` and `decompressorreader``.
2970 """
2970 """
2971 return None
2971 return None
2972
2972
2973 def compressstream(self, it, opts=None):
2973 def compressstream(self, it, opts=None):
2974 """Compress an iterator of chunks.
2974 """Compress an iterator of chunks.
2975
2975
2976 The method receives an iterator (ideally a generator) of chunks of
2976 The method receives an iterator (ideally a generator) of chunks of
2977 bytes to be compressed. It returns an iterator (ideally a generator)
2977 bytes to be compressed. It returns an iterator (ideally a generator)
2978 of bytes of chunks representing the compressed output.
2978 of bytes of chunks representing the compressed output.
2979
2979
2980 Optionally accepts an argument defining how to perform compression.
2980 Optionally accepts an argument defining how to perform compression.
2981 Each engine treats this argument differently.
2981 Each engine treats this argument differently.
2982 """
2982 """
2983 raise NotImplementedError()
2983 raise NotImplementedError()
2984
2984
2985 def compressorobj(self):
2986 """(Temporary) Obtain an object used for compression.
2987
2988 The returned object has ``compress(data)`` and ``flush()`` methods.
2989 These are used to incrementally feed data chunks into a compressor.
2990 """
2991 raise NotImplementedError()
2992
2993 def decompressorreader(self, fh):
2985 def decompressorreader(self, fh):
2994 """Perform decompression on a file object.
2986 """Perform decompression on a file object.
2995
2987
2996 Argument is an object with a ``read(size)`` method that returns
2988 Argument is an object with a ``read(size)`` method that returns
2997 compressed data. Return value is an object with a ``read(size)`` that
2989 compressed data. Return value is an object with a ``read(size)`` that
2998 returns uncompressed data.
2990 returns uncompressed data.
2999 """
2991 """
3000 raise NotImplementedError()
2992 raise NotImplementedError()
3001
2993
3002 class _zlibengine(compressionengine):
2994 class _zlibengine(compressionengine):
3003 def name(self):
2995 def name(self):
3004 return 'zlib'
2996 return 'zlib'
3005
2997
3006 def bundletype(self):
2998 def bundletype(self):
3007 return 'gzip', 'GZ'
2999 return 'gzip', 'GZ'
3008
3000
3009 def compressorobj(self):
3010 return zlib.compressobj()
3011
3012 def compressstream(self, it, opts=None):
3001 def compressstream(self, it, opts=None):
3013 opts = opts or {}
3002 opts = opts or {}
3014
3003
3015 z = zlib.compressobj(opts.get('level', -1))
3004 z = zlib.compressobj(opts.get('level', -1))
3016 for chunk in it:
3005 for chunk in it:
3017 data = z.compress(chunk)
3006 data = z.compress(chunk)
3018 # Not all calls to compress emit data. It is cheaper to inspect
3007 # Not all calls to compress emit data. It is cheaper to inspect
3019 # here than to feed empty chunks through generator.
3008 # here than to feed empty chunks through generator.
3020 if data:
3009 if data:
3021 yield data
3010 yield data
3022
3011
3023 yield z.flush()
3012 yield z.flush()
3024
3013
3025 def decompressorreader(self, fh):
3014 def decompressorreader(self, fh):
3026 def gen():
3015 def gen():
3027 d = zlib.decompressobj()
3016 d = zlib.decompressobj()
3028 for chunk in filechunkiter(fh):
3017 for chunk in filechunkiter(fh):
3029 yield d.decompress(chunk)
3018 yield d.decompress(chunk)
3030
3019
3031 return chunkbuffer(gen())
3020 return chunkbuffer(gen())
3032
3021
3033 compengines.register(_zlibengine())
3022 compengines.register(_zlibengine())
3034
3023
3035 class _bz2engine(compressionengine):
3024 class _bz2engine(compressionengine):
3036 def name(self):
3025 def name(self):
3037 return 'bz2'
3026 return 'bz2'
3038
3027
3039 def bundletype(self):
3028 def bundletype(self):
3040 return 'bzip2', 'BZ'
3029 return 'bzip2', 'BZ'
3041
3030
3042 def compressorobj(self):
3043 return bz2.BZ2Compressor()
3044
3045 def compressstream(self, it, opts=None):
3031 def compressstream(self, it, opts=None):
3046 opts = opts or {}
3032 opts = opts or {}
3047 z = bz2.BZ2Compressor(opts.get('level', 9))
3033 z = bz2.BZ2Compressor(opts.get('level', 9))
3048 for chunk in it:
3034 for chunk in it:
3049 data = z.compress(chunk)
3035 data = z.compress(chunk)
3050 if data:
3036 if data:
3051 yield data
3037 yield data
3052
3038
3053 yield z.flush()
3039 yield z.flush()
3054
3040
3055 def decompressorreader(self, fh):
3041 def decompressorreader(self, fh):
3056 def gen():
3042 def gen():
3057 d = bz2.BZ2Decompressor()
3043 d = bz2.BZ2Decompressor()
3058 for chunk in filechunkiter(fh):
3044 for chunk in filechunkiter(fh):
3059 yield d.decompress(chunk)
3045 yield d.decompress(chunk)
3060
3046
3061 return chunkbuffer(gen())
3047 return chunkbuffer(gen())
3062
3048
3063 compengines.register(_bz2engine())
3049 compengines.register(_bz2engine())
3064
3050
3065 class _truncatedbz2engine(compressionengine):
3051 class _truncatedbz2engine(compressionengine):
3066 def name(self):
3052 def name(self):
3067 return 'bz2truncated'
3053 return 'bz2truncated'
3068
3054
3069 def bundletype(self):
3055 def bundletype(self):
3070 return None, '_truncatedBZ'
3056 return None, '_truncatedBZ'
3071
3057
3072 # We don't implement compressorobj because it is hackily handled elsewhere.
3058 # We don't implement compressstream because it is hackily handled elsewhere.
3073
3059
3074 def decompressorreader(self, fh):
3060 def decompressorreader(self, fh):
3075 def gen():
3061 def gen():
3076 # The input stream doesn't have the 'BZ' header. So add it back.
3062 # The input stream doesn't have the 'BZ' header. So add it back.
3077 d = bz2.BZ2Decompressor()
3063 d = bz2.BZ2Decompressor()
3078 d.decompress('BZ')
3064 d.decompress('BZ')
3079 for chunk in filechunkiter(fh):
3065 for chunk in filechunkiter(fh):
3080 yield d.decompress(chunk)
3066 yield d.decompress(chunk)
3081
3067
3082 return chunkbuffer(gen())
3068 return chunkbuffer(gen())
3083
3069
3084 compengines.register(_truncatedbz2engine())
3070 compengines.register(_truncatedbz2engine())
3085
3071
3086 class nocompress(object):
3087 def compress(self, x):
3088 return x
3089
3090 def flush(self):
3091 return ''
3092
3093 class _noopengine(compressionengine):
3072 class _noopengine(compressionengine):
3094 def name(self):
3073 def name(self):
3095 return 'none'
3074 return 'none'
3096
3075
3097 def bundletype(self):
3076 def bundletype(self):
3098 return 'none', 'UN'
3077 return 'none', 'UN'
3099
3078
3100 def compressorobj(self):
3101 return nocompress()
3102
3103 def compressstream(self, it, opts=None):
3079 def compressstream(self, it, opts=None):
3104 return it
3080 return it
3105
3081
3106 def decompressorreader(self, fh):
3082 def decompressorreader(self, fh):
3107 return fh
3083 return fh
3108
3084
3109 compengines.register(_noopengine())
3085 compengines.register(_noopengine())
3110
3086
3111 # convenient shortcut
3087 # convenient shortcut
3112 dst = debugstacktrace
3088 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now