##// END OF EJS Templates
util: check for compression engine availability before returning...
Gregory Szorc -
r30438:90933e4e default
parent child Browse files
Show More
@@ -1,3189 +1,3201 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import platform as pyplatform
27 import platform as pyplatform
28 import re as remod
28 import re as remod
29 import shutil
29 import shutil
30 import signal
30 import signal
31 import socket
31 import socket
32 import stat
32 import stat
33 import string
33 import string
34 import subprocess
34 import subprocess
35 import sys
35 import sys
36 import tempfile
36 import tempfile
37 import textwrap
37 import textwrap
38 import time
38 import time
39 import traceback
39 import traceback
40 import zlib
40 import zlib
41
41
42 from . import (
42 from . import (
43 encoding,
43 encoding,
44 error,
44 error,
45 i18n,
45 i18n,
46 osutil,
46 osutil,
47 parsers,
47 parsers,
48 pycompat,
48 pycompat,
49 )
49 )
50
50
51 for attr in (
51 for attr in (
52 'empty',
52 'empty',
53 'httplib',
53 'httplib',
54 'httpserver',
54 'httpserver',
55 'pickle',
55 'pickle',
56 'queue',
56 'queue',
57 'urlerr',
57 'urlerr',
58 'urlparse',
58 'urlparse',
59 # we do import urlreq, but we do it outside the loop
59 # we do import urlreq, but we do it outside the loop
60 #'urlreq',
60 #'urlreq',
61 'stringio',
61 'stringio',
62 'socketserver',
62 'socketserver',
63 'xmlrpclib',
63 'xmlrpclib',
64 ):
64 ):
65 a = pycompat.sysstr(attr)
65 a = pycompat.sysstr(attr)
66 globals()[a] = getattr(pycompat, a)
66 globals()[a] = getattr(pycompat, a)
67
67
68 # This line is to make pyflakes happy:
68 # This line is to make pyflakes happy:
69 urlreq = pycompat.urlreq
69 urlreq = pycompat.urlreq
70
70
71 if os.name == 'nt':
71 if os.name == 'nt':
72 from . import windows as platform
72 from . import windows as platform
73 else:
73 else:
74 from . import posix as platform
74 from . import posix as platform
75
75
76 _ = i18n._
76 _ = i18n._
77
77
78 bindunixsocket = platform.bindunixsocket
78 bindunixsocket = platform.bindunixsocket
79 cachestat = platform.cachestat
79 cachestat = platform.cachestat
80 checkexec = platform.checkexec
80 checkexec = platform.checkexec
81 checklink = platform.checklink
81 checklink = platform.checklink
82 copymode = platform.copymode
82 copymode = platform.copymode
83 executablepath = platform.executablepath
83 executablepath = platform.executablepath
84 expandglobs = platform.expandglobs
84 expandglobs = platform.expandglobs
85 explainexit = platform.explainexit
85 explainexit = platform.explainexit
86 findexe = platform.findexe
86 findexe = platform.findexe
87 gethgcmd = platform.gethgcmd
87 gethgcmd = platform.gethgcmd
88 getuser = platform.getuser
88 getuser = platform.getuser
89 getpid = os.getpid
89 getpid = os.getpid
90 groupmembers = platform.groupmembers
90 groupmembers = platform.groupmembers
91 groupname = platform.groupname
91 groupname = platform.groupname
92 hidewindow = platform.hidewindow
92 hidewindow = platform.hidewindow
93 isexec = platform.isexec
93 isexec = platform.isexec
94 isowner = platform.isowner
94 isowner = platform.isowner
95 localpath = platform.localpath
95 localpath = platform.localpath
96 lookupreg = platform.lookupreg
96 lookupreg = platform.lookupreg
97 makedir = platform.makedir
97 makedir = platform.makedir
98 nlinks = platform.nlinks
98 nlinks = platform.nlinks
99 normpath = platform.normpath
99 normpath = platform.normpath
100 normcase = platform.normcase
100 normcase = platform.normcase
101 normcasespec = platform.normcasespec
101 normcasespec = platform.normcasespec
102 normcasefallback = platform.normcasefallback
102 normcasefallback = platform.normcasefallback
103 openhardlinks = platform.openhardlinks
103 openhardlinks = platform.openhardlinks
104 oslink = platform.oslink
104 oslink = platform.oslink
105 parsepatchoutput = platform.parsepatchoutput
105 parsepatchoutput = platform.parsepatchoutput
106 pconvert = platform.pconvert
106 pconvert = platform.pconvert
107 poll = platform.poll
107 poll = platform.poll
108 popen = platform.popen
108 popen = platform.popen
109 posixfile = platform.posixfile
109 posixfile = platform.posixfile
110 quotecommand = platform.quotecommand
110 quotecommand = platform.quotecommand
111 readpipe = platform.readpipe
111 readpipe = platform.readpipe
112 rename = platform.rename
112 rename = platform.rename
113 removedirs = platform.removedirs
113 removedirs = platform.removedirs
114 samedevice = platform.samedevice
114 samedevice = platform.samedevice
115 samefile = platform.samefile
115 samefile = platform.samefile
116 samestat = platform.samestat
116 samestat = platform.samestat
117 setbinary = platform.setbinary
117 setbinary = platform.setbinary
118 setflags = platform.setflags
118 setflags = platform.setflags
119 setsignalhandler = platform.setsignalhandler
119 setsignalhandler = platform.setsignalhandler
120 shellquote = platform.shellquote
120 shellquote = platform.shellquote
121 spawndetached = platform.spawndetached
121 spawndetached = platform.spawndetached
122 split = platform.split
122 split = platform.split
123 sshargs = platform.sshargs
123 sshargs = platform.sshargs
124 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
124 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
125 statisexec = platform.statisexec
125 statisexec = platform.statisexec
126 statislink = platform.statislink
126 statislink = platform.statislink
127 testpid = platform.testpid
127 testpid = platform.testpid
128 umask = platform.umask
128 umask = platform.umask
129 unlink = platform.unlink
129 unlink = platform.unlink
130 unlinkpath = platform.unlinkpath
130 unlinkpath = platform.unlinkpath
131 username = platform.username
131 username = platform.username
132
132
133 # Python compatibility
133 # Python compatibility
134
134
135 _notset = object()
135 _notset = object()
136
136
137 # disable Python's problematic floating point timestamps (issue4836)
137 # disable Python's problematic floating point timestamps (issue4836)
138 # (Python hypocritically says you shouldn't change this behavior in
138 # (Python hypocritically says you shouldn't change this behavior in
139 # libraries, and sure enough Mercurial is not a library.)
139 # libraries, and sure enough Mercurial is not a library.)
140 os.stat_float_times(False)
140 os.stat_float_times(False)
141
141
142 def safehasattr(thing, attr):
142 def safehasattr(thing, attr):
143 return getattr(thing, attr, _notset) is not _notset
143 return getattr(thing, attr, _notset) is not _notset
144
144
145 DIGESTS = {
145 DIGESTS = {
146 'md5': hashlib.md5,
146 'md5': hashlib.md5,
147 'sha1': hashlib.sha1,
147 'sha1': hashlib.sha1,
148 'sha512': hashlib.sha512,
148 'sha512': hashlib.sha512,
149 }
149 }
150 # List of digest types from strongest to weakest
150 # List of digest types from strongest to weakest
151 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
151 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
152
152
153 for k in DIGESTS_BY_STRENGTH:
153 for k in DIGESTS_BY_STRENGTH:
154 assert k in DIGESTS
154 assert k in DIGESTS
155
155
156 class digester(object):
156 class digester(object):
157 """helper to compute digests.
157 """helper to compute digests.
158
158
159 This helper can be used to compute one or more digests given their name.
159 This helper can be used to compute one or more digests given their name.
160
160
161 >>> d = digester(['md5', 'sha1'])
161 >>> d = digester(['md5', 'sha1'])
162 >>> d.update('foo')
162 >>> d.update('foo')
163 >>> [k for k in sorted(d)]
163 >>> [k for k in sorted(d)]
164 ['md5', 'sha1']
164 ['md5', 'sha1']
165 >>> d['md5']
165 >>> d['md5']
166 'acbd18db4cc2f85cedef654fccc4a4d8'
166 'acbd18db4cc2f85cedef654fccc4a4d8'
167 >>> d['sha1']
167 >>> d['sha1']
168 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
168 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
169 >>> digester.preferred(['md5', 'sha1'])
169 >>> digester.preferred(['md5', 'sha1'])
170 'sha1'
170 'sha1'
171 """
171 """
172
172
173 def __init__(self, digests, s=''):
173 def __init__(self, digests, s=''):
174 self._hashes = {}
174 self._hashes = {}
175 for k in digests:
175 for k in digests:
176 if k not in DIGESTS:
176 if k not in DIGESTS:
177 raise Abort(_('unknown digest type: %s') % k)
177 raise Abort(_('unknown digest type: %s') % k)
178 self._hashes[k] = DIGESTS[k]()
178 self._hashes[k] = DIGESTS[k]()
179 if s:
179 if s:
180 self.update(s)
180 self.update(s)
181
181
182 def update(self, data):
182 def update(self, data):
183 for h in self._hashes.values():
183 for h in self._hashes.values():
184 h.update(data)
184 h.update(data)
185
185
186 def __getitem__(self, key):
186 def __getitem__(self, key):
187 if key not in DIGESTS:
187 if key not in DIGESTS:
188 raise Abort(_('unknown digest type: %s') % k)
188 raise Abort(_('unknown digest type: %s') % k)
189 return self._hashes[key].hexdigest()
189 return self._hashes[key].hexdigest()
190
190
191 def __iter__(self):
191 def __iter__(self):
192 return iter(self._hashes)
192 return iter(self._hashes)
193
193
194 @staticmethod
194 @staticmethod
195 def preferred(supported):
195 def preferred(supported):
196 """returns the strongest digest type in both supported and DIGESTS."""
196 """returns the strongest digest type in both supported and DIGESTS."""
197
197
198 for k in DIGESTS_BY_STRENGTH:
198 for k in DIGESTS_BY_STRENGTH:
199 if k in supported:
199 if k in supported:
200 return k
200 return k
201 return None
201 return None
202
202
203 class digestchecker(object):
203 class digestchecker(object):
204 """file handle wrapper that additionally checks content against a given
204 """file handle wrapper that additionally checks content against a given
205 size and digests.
205 size and digests.
206
206
207 d = digestchecker(fh, size, {'md5': '...'})
207 d = digestchecker(fh, size, {'md5': '...'})
208
208
209 When multiple digests are given, all of them are validated.
209 When multiple digests are given, all of them are validated.
210 """
210 """
211
211
212 def __init__(self, fh, size, digests):
212 def __init__(self, fh, size, digests):
213 self._fh = fh
213 self._fh = fh
214 self._size = size
214 self._size = size
215 self._got = 0
215 self._got = 0
216 self._digests = dict(digests)
216 self._digests = dict(digests)
217 self._digester = digester(self._digests.keys())
217 self._digester = digester(self._digests.keys())
218
218
219 def read(self, length=-1):
219 def read(self, length=-1):
220 content = self._fh.read(length)
220 content = self._fh.read(length)
221 self._digester.update(content)
221 self._digester.update(content)
222 self._got += len(content)
222 self._got += len(content)
223 return content
223 return content
224
224
225 def validate(self):
225 def validate(self):
226 if self._size != self._got:
226 if self._size != self._got:
227 raise Abort(_('size mismatch: expected %d, got %d') %
227 raise Abort(_('size mismatch: expected %d, got %d') %
228 (self._size, self._got))
228 (self._size, self._got))
229 for k, v in self._digests.items():
229 for k, v in self._digests.items():
230 if v != self._digester[k]:
230 if v != self._digester[k]:
231 # i18n: first parameter is a digest name
231 # i18n: first parameter is a digest name
232 raise Abort(_('%s mismatch: expected %s, got %s') %
232 raise Abort(_('%s mismatch: expected %s, got %s') %
233 (k, v, self._digester[k]))
233 (k, v, self._digester[k]))
234
234
235 try:
235 try:
236 buffer = buffer
236 buffer = buffer
237 except NameError:
237 except NameError:
238 if not pycompat.ispy3:
238 if not pycompat.ispy3:
239 def buffer(sliceable, offset=0):
239 def buffer(sliceable, offset=0):
240 return sliceable[offset:]
240 return sliceable[offset:]
241 else:
241 else:
242 def buffer(sliceable, offset=0):
242 def buffer(sliceable, offset=0):
243 return memoryview(sliceable)[offset:]
243 return memoryview(sliceable)[offset:]
244
244
245 closefds = os.name == 'posix'
245 closefds = os.name == 'posix'
246
246
247 _chunksize = 4096
247 _chunksize = 4096
248
248
249 class bufferedinputpipe(object):
249 class bufferedinputpipe(object):
250 """a manually buffered input pipe
250 """a manually buffered input pipe
251
251
252 Python will not let us use buffered IO and lazy reading with 'polling' at
252 Python will not let us use buffered IO and lazy reading with 'polling' at
253 the same time. We cannot probe the buffer state and select will not detect
253 the same time. We cannot probe the buffer state and select will not detect
254 that data are ready to read if they are already buffered.
254 that data are ready to read if they are already buffered.
255
255
256 This class let us work around that by implementing its own buffering
256 This class let us work around that by implementing its own buffering
257 (allowing efficient readline) while offering a way to know if the buffer is
257 (allowing efficient readline) while offering a way to know if the buffer is
258 empty from the output (allowing collaboration of the buffer with polling).
258 empty from the output (allowing collaboration of the buffer with polling).
259
259
260 This class lives in the 'util' module because it makes use of the 'os'
260 This class lives in the 'util' module because it makes use of the 'os'
261 module from the python stdlib.
261 module from the python stdlib.
262 """
262 """
263
263
264 def __init__(self, input):
264 def __init__(self, input):
265 self._input = input
265 self._input = input
266 self._buffer = []
266 self._buffer = []
267 self._eof = False
267 self._eof = False
268 self._lenbuf = 0
268 self._lenbuf = 0
269
269
270 @property
270 @property
271 def hasbuffer(self):
271 def hasbuffer(self):
272 """True is any data is currently buffered
272 """True is any data is currently buffered
273
273
274 This will be used externally a pre-step for polling IO. If there is
274 This will be used externally a pre-step for polling IO. If there is
275 already data then no polling should be set in place."""
275 already data then no polling should be set in place."""
276 return bool(self._buffer)
276 return bool(self._buffer)
277
277
278 @property
278 @property
279 def closed(self):
279 def closed(self):
280 return self._input.closed
280 return self._input.closed
281
281
282 def fileno(self):
282 def fileno(self):
283 return self._input.fileno()
283 return self._input.fileno()
284
284
285 def close(self):
285 def close(self):
286 return self._input.close()
286 return self._input.close()
287
287
288 def read(self, size):
288 def read(self, size):
289 while (not self._eof) and (self._lenbuf < size):
289 while (not self._eof) and (self._lenbuf < size):
290 self._fillbuffer()
290 self._fillbuffer()
291 return self._frombuffer(size)
291 return self._frombuffer(size)
292
292
293 def readline(self, *args, **kwargs):
293 def readline(self, *args, **kwargs):
294 if 1 < len(self._buffer):
294 if 1 < len(self._buffer):
295 # this should not happen because both read and readline end with a
295 # this should not happen because both read and readline end with a
296 # _frombuffer call that collapse it.
296 # _frombuffer call that collapse it.
297 self._buffer = [''.join(self._buffer)]
297 self._buffer = [''.join(self._buffer)]
298 self._lenbuf = len(self._buffer[0])
298 self._lenbuf = len(self._buffer[0])
299 lfi = -1
299 lfi = -1
300 if self._buffer:
300 if self._buffer:
301 lfi = self._buffer[-1].find('\n')
301 lfi = self._buffer[-1].find('\n')
302 while (not self._eof) and lfi < 0:
302 while (not self._eof) and lfi < 0:
303 self._fillbuffer()
303 self._fillbuffer()
304 if self._buffer:
304 if self._buffer:
305 lfi = self._buffer[-1].find('\n')
305 lfi = self._buffer[-1].find('\n')
306 size = lfi + 1
306 size = lfi + 1
307 if lfi < 0: # end of file
307 if lfi < 0: # end of file
308 size = self._lenbuf
308 size = self._lenbuf
309 elif 1 < len(self._buffer):
309 elif 1 < len(self._buffer):
310 # we need to take previous chunks into account
310 # we need to take previous chunks into account
311 size += self._lenbuf - len(self._buffer[-1])
311 size += self._lenbuf - len(self._buffer[-1])
312 return self._frombuffer(size)
312 return self._frombuffer(size)
313
313
314 def _frombuffer(self, size):
314 def _frombuffer(self, size):
315 """return at most 'size' data from the buffer
315 """return at most 'size' data from the buffer
316
316
317 The data are removed from the buffer."""
317 The data are removed from the buffer."""
318 if size == 0 or not self._buffer:
318 if size == 0 or not self._buffer:
319 return ''
319 return ''
320 buf = self._buffer[0]
320 buf = self._buffer[0]
321 if 1 < len(self._buffer):
321 if 1 < len(self._buffer):
322 buf = ''.join(self._buffer)
322 buf = ''.join(self._buffer)
323
323
324 data = buf[:size]
324 data = buf[:size]
325 buf = buf[len(data):]
325 buf = buf[len(data):]
326 if buf:
326 if buf:
327 self._buffer = [buf]
327 self._buffer = [buf]
328 self._lenbuf = len(buf)
328 self._lenbuf = len(buf)
329 else:
329 else:
330 self._buffer = []
330 self._buffer = []
331 self._lenbuf = 0
331 self._lenbuf = 0
332 return data
332 return data
333
333
334 def _fillbuffer(self):
334 def _fillbuffer(self):
335 """read data to the buffer"""
335 """read data to the buffer"""
336 data = os.read(self._input.fileno(), _chunksize)
336 data = os.read(self._input.fileno(), _chunksize)
337 if not data:
337 if not data:
338 self._eof = True
338 self._eof = True
339 else:
339 else:
340 self._lenbuf += len(data)
340 self._lenbuf += len(data)
341 self._buffer.append(data)
341 self._buffer.append(data)
342
342
343 def popen2(cmd, env=None, newlines=False):
343 def popen2(cmd, env=None, newlines=False):
344 # Setting bufsize to -1 lets the system decide the buffer size.
344 # Setting bufsize to -1 lets the system decide the buffer size.
345 # The default for bufsize is 0, meaning unbuffered. This leads to
345 # The default for bufsize is 0, meaning unbuffered. This leads to
346 # poor performance on Mac OS X: http://bugs.python.org/issue4194
346 # poor performance on Mac OS X: http://bugs.python.org/issue4194
347 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
347 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
348 close_fds=closefds,
348 close_fds=closefds,
349 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
349 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
350 universal_newlines=newlines,
350 universal_newlines=newlines,
351 env=env)
351 env=env)
352 return p.stdin, p.stdout
352 return p.stdin, p.stdout
353
353
354 def popen3(cmd, env=None, newlines=False):
354 def popen3(cmd, env=None, newlines=False):
355 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
355 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
356 return stdin, stdout, stderr
356 return stdin, stdout, stderr
357
357
358 def popen4(cmd, env=None, newlines=False, bufsize=-1):
358 def popen4(cmd, env=None, newlines=False, bufsize=-1):
359 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
359 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
360 close_fds=closefds,
360 close_fds=closefds,
361 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
361 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
362 stderr=subprocess.PIPE,
362 stderr=subprocess.PIPE,
363 universal_newlines=newlines,
363 universal_newlines=newlines,
364 env=env)
364 env=env)
365 return p.stdin, p.stdout, p.stderr, p
365 return p.stdin, p.stdout, p.stderr, p
366
366
367 def version():
367 def version():
368 """Return version information if available."""
368 """Return version information if available."""
369 try:
369 try:
370 from . import __version__
370 from . import __version__
371 return __version__.version
371 return __version__.version
372 except ImportError:
372 except ImportError:
373 return 'unknown'
373 return 'unknown'
374
374
375 def versiontuple(v=None, n=4):
375 def versiontuple(v=None, n=4):
376 """Parses a Mercurial version string into an N-tuple.
376 """Parses a Mercurial version string into an N-tuple.
377
377
378 The version string to be parsed is specified with the ``v`` argument.
378 The version string to be parsed is specified with the ``v`` argument.
379 If it isn't defined, the current Mercurial version string will be parsed.
379 If it isn't defined, the current Mercurial version string will be parsed.
380
380
381 ``n`` can be 2, 3, or 4. Here is how some version strings map to
381 ``n`` can be 2, 3, or 4. Here is how some version strings map to
382 returned values:
382 returned values:
383
383
384 >>> v = '3.6.1+190-df9b73d2d444'
384 >>> v = '3.6.1+190-df9b73d2d444'
385 >>> versiontuple(v, 2)
385 >>> versiontuple(v, 2)
386 (3, 6)
386 (3, 6)
387 >>> versiontuple(v, 3)
387 >>> versiontuple(v, 3)
388 (3, 6, 1)
388 (3, 6, 1)
389 >>> versiontuple(v, 4)
389 >>> versiontuple(v, 4)
390 (3, 6, 1, '190-df9b73d2d444')
390 (3, 6, 1, '190-df9b73d2d444')
391
391
392 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
392 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
393 (3, 6, 1, '190-df9b73d2d444+20151118')
393 (3, 6, 1, '190-df9b73d2d444+20151118')
394
394
395 >>> v = '3.6'
395 >>> v = '3.6'
396 >>> versiontuple(v, 2)
396 >>> versiontuple(v, 2)
397 (3, 6)
397 (3, 6)
398 >>> versiontuple(v, 3)
398 >>> versiontuple(v, 3)
399 (3, 6, None)
399 (3, 6, None)
400 >>> versiontuple(v, 4)
400 >>> versiontuple(v, 4)
401 (3, 6, None, None)
401 (3, 6, None, None)
402
402
403 >>> v = '3.9-rc'
403 >>> v = '3.9-rc'
404 >>> versiontuple(v, 2)
404 >>> versiontuple(v, 2)
405 (3, 9)
405 (3, 9)
406 >>> versiontuple(v, 3)
406 >>> versiontuple(v, 3)
407 (3, 9, None)
407 (3, 9, None)
408 >>> versiontuple(v, 4)
408 >>> versiontuple(v, 4)
409 (3, 9, None, 'rc')
409 (3, 9, None, 'rc')
410
410
411 >>> v = '3.9-rc+2-02a8fea4289b'
411 >>> v = '3.9-rc+2-02a8fea4289b'
412 >>> versiontuple(v, 2)
412 >>> versiontuple(v, 2)
413 (3, 9)
413 (3, 9)
414 >>> versiontuple(v, 3)
414 >>> versiontuple(v, 3)
415 (3, 9, None)
415 (3, 9, None)
416 >>> versiontuple(v, 4)
416 >>> versiontuple(v, 4)
417 (3, 9, None, 'rc+2-02a8fea4289b')
417 (3, 9, None, 'rc+2-02a8fea4289b')
418 """
418 """
419 if not v:
419 if not v:
420 v = version()
420 v = version()
421 parts = remod.split('[\+-]', v, 1)
421 parts = remod.split('[\+-]', v, 1)
422 if len(parts) == 1:
422 if len(parts) == 1:
423 vparts, extra = parts[0], None
423 vparts, extra = parts[0], None
424 else:
424 else:
425 vparts, extra = parts
425 vparts, extra = parts
426
426
427 vints = []
427 vints = []
428 for i in vparts.split('.'):
428 for i in vparts.split('.'):
429 try:
429 try:
430 vints.append(int(i))
430 vints.append(int(i))
431 except ValueError:
431 except ValueError:
432 break
432 break
433 # (3, 6) -> (3, 6, None)
433 # (3, 6) -> (3, 6, None)
434 while len(vints) < 3:
434 while len(vints) < 3:
435 vints.append(None)
435 vints.append(None)
436
436
437 if n == 2:
437 if n == 2:
438 return (vints[0], vints[1])
438 return (vints[0], vints[1])
439 if n == 3:
439 if n == 3:
440 return (vints[0], vints[1], vints[2])
440 return (vints[0], vints[1], vints[2])
441 if n == 4:
441 if n == 4:
442 return (vints[0], vints[1], vints[2], extra)
442 return (vints[0], vints[1], vints[2], extra)
443
443
444 # used by parsedate
444 # used by parsedate
445 defaultdateformats = (
445 defaultdateformats = (
446 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
446 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
447 '%Y-%m-%dT%H:%M', # without seconds
447 '%Y-%m-%dT%H:%M', # without seconds
448 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
448 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
449 '%Y-%m-%dT%H%M', # without seconds
449 '%Y-%m-%dT%H%M', # without seconds
450 '%Y-%m-%d %H:%M:%S', # our common legal variant
450 '%Y-%m-%d %H:%M:%S', # our common legal variant
451 '%Y-%m-%d %H:%M', # without seconds
451 '%Y-%m-%d %H:%M', # without seconds
452 '%Y-%m-%d %H%M%S', # without :
452 '%Y-%m-%d %H%M%S', # without :
453 '%Y-%m-%d %H%M', # without seconds
453 '%Y-%m-%d %H%M', # without seconds
454 '%Y-%m-%d %I:%M:%S%p',
454 '%Y-%m-%d %I:%M:%S%p',
455 '%Y-%m-%d %H:%M',
455 '%Y-%m-%d %H:%M',
456 '%Y-%m-%d %I:%M%p',
456 '%Y-%m-%d %I:%M%p',
457 '%Y-%m-%d',
457 '%Y-%m-%d',
458 '%m-%d',
458 '%m-%d',
459 '%m/%d',
459 '%m/%d',
460 '%m/%d/%y',
460 '%m/%d/%y',
461 '%m/%d/%Y',
461 '%m/%d/%Y',
462 '%a %b %d %H:%M:%S %Y',
462 '%a %b %d %H:%M:%S %Y',
463 '%a %b %d %I:%M:%S%p %Y',
463 '%a %b %d %I:%M:%S%p %Y',
464 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
464 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
465 '%b %d %H:%M:%S %Y',
465 '%b %d %H:%M:%S %Y',
466 '%b %d %I:%M:%S%p %Y',
466 '%b %d %I:%M:%S%p %Y',
467 '%b %d %H:%M:%S',
467 '%b %d %H:%M:%S',
468 '%b %d %I:%M:%S%p',
468 '%b %d %I:%M:%S%p',
469 '%b %d %H:%M',
469 '%b %d %H:%M',
470 '%b %d %I:%M%p',
470 '%b %d %I:%M%p',
471 '%b %d %Y',
471 '%b %d %Y',
472 '%b %d',
472 '%b %d',
473 '%H:%M:%S',
473 '%H:%M:%S',
474 '%I:%M:%S%p',
474 '%I:%M:%S%p',
475 '%H:%M',
475 '%H:%M',
476 '%I:%M%p',
476 '%I:%M%p',
477 )
477 )
478
478
479 extendeddateformats = defaultdateformats + (
479 extendeddateformats = defaultdateformats + (
480 "%Y",
480 "%Y",
481 "%Y-%m",
481 "%Y-%m",
482 "%b",
482 "%b",
483 "%b %Y",
483 "%b %Y",
484 )
484 )
485
485
486 def cachefunc(func):
486 def cachefunc(func):
487 '''cache the result of function calls'''
487 '''cache the result of function calls'''
488 # XXX doesn't handle keywords args
488 # XXX doesn't handle keywords args
489 if func.__code__.co_argcount == 0:
489 if func.__code__.co_argcount == 0:
490 cache = []
490 cache = []
491 def f():
491 def f():
492 if len(cache) == 0:
492 if len(cache) == 0:
493 cache.append(func())
493 cache.append(func())
494 return cache[0]
494 return cache[0]
495 return f
495 return f
496 cache = {}
496 cache = {}
497 if func.__code__.co_argcount == 1:
497 if func.__code__.co_argcount == 1:
498 # we gain a small amount of time because
498 # we gain a small amount of time because
499 # we don't need to pack/unpack the list
499 # we don't need to pack/unpack the list
500 def f(arg):
500 def f(arg):
501 if arg not in cache:
501 if arg not in cache:
502 cache[arg] = func(arg)
502 cache[arg] = func(arg)
503 return cache[arg]
503 return cache[arg]
504 else:
504 else:
505 def f(*args):
505 def f(*args):
506 if args not in cache:
506 if args not in cache:
507 cache[args] = func(*args)
507 cache[args] = func(*args)
508 return cache[args]
508 return cache[args]
509
509
510 return f
510 return f
511
511
512 class sortdict(dict):
512 class sortdict(dict):
513 '''a simple sorted dictionary'''
513 '''a simple sorted dictionary'''
514 def __init__(self, data=None):
514 def __init__(self, data=None):
515 self._list = []
515 self._list = []
516 if data:
516 if data:
517 self.update(data)
517 self.update(data)
518 def copy(self):
518 def copy(self):
519 return sortdict(self)
519 return sortdict(self)
520 def __setitem__(self, key, val):
520 def __setitem__(self, key, val):
521 if key in self:
521 if key in self:
522 self._list.remove(key)
522 self._list.remove(key)
523 self._list.append(key)
523 self._list.append(key)
524 dict.__setitem__(self, key, val)
524 dict.__setitem__(self, key, val)
525 def __iter__(self):
525 def __iter__(self):
526 return self._list.__iter__()
526 return self._list.__iter__()
527 def update(self, src):
527 def update(self, src):
528 if isinstance(src, dict):
528 if isinstance(src, dict):
529 src = src.iteritems()
529 src = src.iteritems()
530 for k, v in src:
530 for k, v in src:
531 self[k] = v
531 self[k] = v
532 def clear(self):
532 def clear(self):
533 dict.clear(self)
533 dict.clear(self)
534 self._list = []
534 self._list = []
535 def items(self):
535 def items(self):
536 return [(k, self[k]) for k in self._list]
536 return [(k, self[k]) for k in self._list]
537 def __delitem__(self, key):
537 def __delitem__(self, key):
538 dict.__delitem__(self, key)
538 dict.__delitem__(self, key)
539 self._list.remove(key)
539 self._list.remove(key)
540 def pop(self, key, *args, **kwargs):
540 def pop(self, key, *args, **kwargs):
541 dict.pop(self, key, *args, **kwargs)
541 dict.pop(self, key, *args, **kwargs)
542 try:
542 try:
543 self._list.remove(key)
543 self._list.remove(key)
544 except ValueError:
544 except ValueError:
545 pass
545 pass
546 def keys(self):
546 def keys(self):
547 return self._list
547 return self._list
548 def iterkeys(self):
548 def iterkeys(self):
549 return self._list.__iter__()
549 return self._list.__iter__()
550 def iteritems(self):
550 def iteritems(self):
551 for k in self._list:
551 for k in self._list:
552 yield k, self[k]
552 yield k, self[k]
553 def insert(self, index, key, val):
553 def insert(self, index, key, val):
554 self._list.insert(index, key)
554 self._list.insert(index, key)
555 dict.__setitem__(self, key, val)
555 dict.__setitem__(self, key, val)
556 def __repr__(self):
556 def __repr__(self):
557 if not self:
557 if not self:
558 return '%s()' % self.__class__.__name__
558 return '%s()' % self.__class__.__name__
559 return '%s(%r)' % (self.__class__.__name__, self.items())
559 return '%s(%r)' % (self.__class__.__name__, self.items())
560
560
561 class _lrucachenode(object):
561 class _lrucachenode(object):
562 """A node in a doubly linked list.
562 """A node in a doubly linked list.
563
563
564 Holds a reference to nodes on either side as well as a key-value
564 Holds a reference to nodes on either side as well as a key-value
565 pair for the dictionary entry.
565 pair for the dictionary entry.
566 """
566 """
567 __slots__ = (u'next', u'prev', u'key', u'value')
567 __slots__ = (u'next', u'prev', u'key', u'value')
568
568
569 def __init__(self):
569 def __init__(self):
570 self.next = None
570 self.next = None
571 self.prev = None
571 self.prev = None
572
572
573 self.key = _notset
573 self.key = _notset
574 self.value = None
574 self.value = None
575
575
576 def markempty(self):
576 def markempty(self):
577 """Mark the node as emptied."""
577 """Mark the node as emptied."""
578 self.key = _notset
578 self.key = _notset
579
579
580 class lrucachedict(object):
580 class lrucachedict(object):
581 """Dict that caches most recent accesses and sets.
581 """Dict that caches most recent accesses and sets.
582
582
583 The dict consists of an actual backing dict - indexed by original
583 The dict consists of an actual backing dict - indexed by original
584 key - and a doubly linked circular list defining the order of entries in
584 key - and a doubly linked circular list defining the order of entries in
585 the cache.
585 the cache.
586
586
587 The head node is the newest entry in the cache. If the cache is full,
587 The head node is the newest entry in the cache. If the cache is full,
588 we recycle head.prev and make it the new head. Cache accesses result in
588 we recycle head.prev and make it the new head. Cache accesses result in
589 the node being moved to before the existing head and being marked as the
589 the node being moved to before the existing head and being marked as the
590 new head node.
590 new head node.
591 """
591 """
592 def __init__(self, max):
592 def __init__(self, max):
593 self._cache = {}
593 self._cache = {}
594
594
595 self._head = head = _lrucachenode()
595 self._head = head = _lrucachenode()
596 head.prev = head
596 head.prev = head
597 head.next = head
597 head.next = head
598 self._size = 1
598 self._size = 1
599 self._capacity = max
599 self._capacity = max
600
600
601 def __len__(self):
601 def __len__(self):
602 return len(self._cache)
602 return len(self._cache)
603
603
604 def __contains__(self, k):
604 def __contains__(self, k):
605 return k in self._cache
605 return k in self._cache
606
606
607 def __iter__(self):
607 def __iter__(self):
608 # We don't have to iterate in cache order, but why not.
608 # We don't have to iterate in cache order, but why not.
609 n = self._head
609 n = self._head
610 for i in range(len(self._cache)):
610 for i in range(len(self._cache)):
611 yield n.key
611 yield n.key
612 n = n.next
612 n = n.next
613
613
614 def __getitem__(self, k):
614 def __getitem__(self, k):
615 node = self._cache[k]
615 node = self._cache[k]
616 self._movetohead(node)
616 self._movetohead(node)
617 return node.value
617 return node.value
618
618
619 def __setitem__(self, k, v):
619 def __setitem__(self, k, v):
620 node = self._cache.get(k)
620 node = self._cache.get(k)
621 # Replace existing value and mark as newest.
621 # Replace existing value and mark as newest.
622 if node is not None:
622 if node is not None:
623 node.value = v
623 node.value = v
624 self._movetohead(node)
624 self._movetohead(node)
625 return
625 return
626
626
627 if self._size < self._capacity:
627 if self._size < self._capacity:
628 node = self._addcapacity()
628 node = self._addcapacity()
629 else:
629 else:
630 # Grab the last/oldest item.
630 # Grab the last/oldest item.
631 node = self._head.prev
631 node = self._head.prev
632
632
633 # At capacity. Kill the old entry.
633 # At capacity. Kill the old entry.
634 if node.key is not _notset:
634 if node.key is not _notset:
635 del self._cache[node.key]
635 del self._cache[node.key]
636
636
637 node.key = k
637 node.key = k
638 node.value = v
638 node.value = v
639 self._cache[k] = node
639 self._cache[k] = node
640 # And mark it as newest entry. No need to adjust order since it
640 # And mark it as newest entry. No need to adjust order since it
641 # is already self._head.prev.
641 # is already self._head.prev.
642 self._head = node
642 self._head = node
643
643
644 def __delitem__(self, k):
644 def __delitem__(self, k):
645 node = self._cache.pop(k)
645 node = self._cache.pop(k)
646 node.markempty()
646 node.markempty()
647
647
648 # Temporarily mark as newest item before re-adjusting head to make
648 # Temporarily mark as newest item before re-adjusting head to make
649 # this node the oldest item.
649 # this node the oldest item.
650 self._movetohead(node)
650 self._movetohead(node)
651 self._head = node.next
651 self._head = node.next
652
652
653 # Additional dict methods.
653 # Additional dict methods.
654
654
655 def get(self, k, default=None):
655 def get(self, k, default=None):
656 try:
656 try:
657 return self._cache[k].value
657 return self._cache[k].value
658 except KeyError:
658 except KeyError:
659 return default
659 return default
660
660
661 def clear(self):
661 def clear(self):
662 n = self._head
662 n = self._head
663 while n.key is not _notset:
663 while n.key is not _notset:
664 n.markempty()
664 n.markempty()
665 n = n.next
665 n = n.next
666
666
667 self._cache.clear()
667 self._cache.clear()
668
668
669 def copy(self):
669 def copy(self):
670 result = lrucachedict(self._capacity)
670 result = lrucachedict(self._capacity)
671 n = self._head.prev
671 n = self._head.prev
672 # Iterate in oldest-to-newest order, so the copy has the right ordering
672 # Iterate in oldest-to-newest order, so the copy has the right ordering
673 for i in range(len(self._cache)):
673 for i in range(len(self._cache)):
674 result[n.key] = n.value
674 result[n.key] = n.value
675 n = n.prev
675 n = n.prev
676 return result
676 return result
677
677
678 def _movetohead(self, node):
678 def _movetohead(self, node):
679 """Mark a node as the newest, making it the new head.
679 """Mark a node as the newest, making it the new head.
680
680
681 When a node is accessed, it becomes the freshest entry in the LRU
681 When a node is accessed, it becomes the freshest entry in the LRU
682 list, which is denoted by self._head.
682 list, which is denoted by self._head.
683
683
684 Visually, let's make ``N`` the new head node (* denotes head):
684 Visually, let's make ``N`` the new head node (* denotes head):
685
685
686 previous/oldest <-> head <-> next/next newest
686 previous/oldest <-> head <-> next/next newest
687
687
688 ----<->--- A* ---<->-----
688 ----<->--- A* ---<->-----
689 | |
689 | |
690 E <-> D <-> N <-> C <-> B
690 E <-> D <-> N <-> C <-> B
691
691
692 To:
692 To:
693
693
694 ----<->--- N* ---<->-----
694 ----<->--- N* ---<->-----
695 | |
695 | |
696 E <-> D <-> C <-> B <-> A
696 E <-> D <-> C <-> B <-> A
697
697
698 This requires the following moves:
698 This requires the following moves:
699
699
700 C.next = D (node.prev.next = node.next)
700 C.next = D (node.prev.next = node.next)
701 D.prev = C (node.next.prev = node.prev)
701 D.prev = C (node.next.prev = node.prev)
702 E.next = N (head.prev.next = node)
702 E.next = N (head.prev.next = node)
703 N.prev = E (node.prev = head.prev)
703 N.prev = E (node.prev = head.prev)
704 N.next = A (node.next = head)
704 N.next = A (node.next = head)
705 A.prev = N (head.prev = node)
705 A.prev = N (head.prev = node)
706 """
706 """
707 head = self._head
707 head = self._head
708 # C.next = D
708 # C.next = D
709 node.prev.next = node.next
709 node.prev.next = node.next
710 # D.prev = C
710 # D.prev = C
711 node.next.prev = node.prev
711 node.next.prev = node.prev
712 # N.prev = E
712 # N.prev = E
713 node.prev = head.prev
713 node.prev = head.prev
714 # N.next = A
714 # N.next = A
715 # It is tempting to do just "head" here, however if node is
715 # It is tempting to do just "head" here, however if node is
716 # adjacent to head, this will do bad things.
716 # adjacent to head, this will do bad things.
717 node.next = head.prev.next
717 node.next = head.prev.next
718 # E.next = N
718 # E.next = N
719 node.next.prev = node
719 node.next.prev = node
720 # A.prev = N
720 # A.prev = N
721 node.prev.next = node
721 node.prev.next = node
722
722
723 self._head = node
723 self._head = node
724
724
725 def _addcapacity(self):
725 def _addcapacity(self):
726 """Add a node to the circular linked list.
726 """Add a node to the circular linked list.
727
727
728 The new node is inserted before the head node.
728 The new node is inserted before the head node.
729 """
729 """
730 head = self._head
730 head = self._head
731 node = _lrucachenode()
731 node = _lrucachenode()
732 head.prev.next = node
732 head.prev.next = node
733 node.prev = head.prev
733 node.prev = head.prev
734 node.next = head
734 node.next = head
735 head.prev = node
735 head.prev = node
736 self._size += 1
736 self._size += 1
737 return node
737 return node
738
738
739 def lrucachefunc(func):
739 def lrucachefunc(func):
740 '''cache most recent results of function calls'''
740 '''cache most recent results of function calls'''
741 cache = {}
741 cache = {}
742 order = collections.deque()
742 order = collections.deque()
743 if func.__code__.co_argcount == 1:
743 if func.__code__.co_argcount == 1:
744 def f(arg):
744 def f(arg):
745 if arg not in cache:
745 if arg not in cache:
746 if len(cache) > 20:
746 if len(cache) > 20:
747 del cache[order.popleft()]
747 del cache[order.popleft()]
748 cache[arg] = func(arg)
748 cache[arg] = func(arg)
749 else:
749 else:
750 order.remove(arg)
750 order.remove(arg)
751 order.append(arg)
751 order.append(arg)
752 return cache[arg]
752 return cache[arg]
753 else:
753 else:
754 def f(*args):
754 def f(*args):
755 if args not in cache:
755 if args not in cache:
756 if len(cache) > 20:
756 if len(cache) > 20:
757 del cache[order.popleft()]
757 del cache[order.popleft()]
758 cache[args] = func(*args)
758 cache[args] = func(*args)
759 else:
759 else:
760 order.remove(args)
760 order.remove(args)
761 order.append(args)
761 order.append(args)
762 return cache[args]
762 return cache[args]
763
763
764 return f
764 return f
765
765
766 class propertycache(object):
766 class propertycache(object):
767 def __init__(self, func):
767 def __init__(self, func):
768 self.func = func
768 self.func = func
769 self.name = func.__name__
769 self.name = func.__name__
770 def __get__(self, obj, type=None):
770 def __get__(self, obj, type=None):
771 result = self.func(obj)
771 result = self.func(obj)
772 self.cachevalue(obj, result)
772 self.cachevalue(obj, result)
773 return result
773 return result
774
774
775 def cachevalue(self, obj, value):
775 def cachevalue(self, obj, value):
776 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
776 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
777 obj.__dict__[self.name] = value
777 obj.__dict__[self.name] = value
778
778
779 def pipefilter(s, cmd):
779 def pipefilter(s, cmd):
780 '''filter string S through command CMD, returning its output'''
780 '''filter string S through command CMD, returning its output'''
781 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
781 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
782 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
782 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
783 pout, perr = p.communicate(s)
783 pout, perr = p.communicate(s)
784 return pout
784 return pout
785
785
786 def tempfilter(s, cmd):
786 def tempfilter(s, cmd):
787 '''filter string S through a pair of temporary files with CMD.
787 '''filter string S through a pair of temporary files with CMD.
788 CMD is used as a template to create the real command to be run,
788 CMD is used as a template to create the real command to be run,
789 with the strings INFILE and OUTFILE replaced by the real names of
789 with the strings INFILE and OUTFILE replaced by the real names of
790 the temporary files generated.'''
790 the temporary files generated.'''
791 inname, outname = None, None
791 inname, outname = None, None
792 try:
792 try:
793 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
793 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
794 fp = os.fdopen(infd, 'wb')
794 fp = os.fdopen(infd, 'wb')
795 fp.write(s)
795 fp.write(s)
796 fp.close()
796 fp.close()
797 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
797 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
798 os.close(outfd)
798 os.close(outfd)
799 cmd = cmd.replace('INFILE', inname)
799 cmd = cmd.replace('INFILE', inname)
800 cmd = cmd.replace('OUTFILE', outname)
800 cmd = cmd.replace('OUTFILE', outname)
801 code = os.system(cmd)
801 code = os.system(cmd)
802 if sys.platform == 'OpenVMS' and code & 1:
802 if sys.platform == 'OpenVMS' and code & 1:
803 code = 0
803 code = 0
804 if code:
804 if code:
805 raise Abort(_("command '%s' failed: %s") %
805 raise Abort(_("command '%s' failed: %s") %
806 (cmd, explainexit(code)))
806 (cmd, explainexit(code)))
807 return readfile(outname)
807 return readfile(outname)
808 finally:
808 finally:
809 try:
809 try:
810 if inname:
810 if inname:
811 os.unlink(inname)
811 os.unlink(inname)
812 except OSError:
812 except OSError:
813 pass
813 pass
814 try:
814 try:
815 if outname:
815 if outname:
816 os.unlink(outname)
816 os.unlink(outname)
817 except OSError:
817 except OSError:
818 pass
818 pass
819
819
820 filtertable = {
820 filtertable = {
821 'tempfile:': tempfilter,
821 'tempfile:': tempfilter,
822 'pipe:': pipefilter,
822 'pipe:': pipefilter,
823 }
823 }
824
824
825 def filter(s, cmd):
825 def filter(s, cmd):
826 "filter a string through a command that transforms its input to its output"
826 "filter a string through a command that transforms its input to its output"
827 for name, fn in filtertable.iteritems():
827 for name, fn in filtertable.iteritems():
828 if cmd.startswith(name):
828 if cmd.startswith(name):
829 return fn(s, cmd[len(name):].lstrip())
829 return fn(s, cmd[len(name):].lstrip())
830 return pipefilter(s, cmd)
830 return pipefilter(s, cmd)
831
831
832 def binary(s):
832 def binary(s):
833 """return true if a string is binary data"""
833 """return true if a string is binary data"""
834 return bool(s and '\0' in s)
834 return bool(s and '\0' in s)
835
835
836 def increasingchunks(source, min=1024, max=65536):
836 def increasingchunks(source, min=1024, max=65536):
837 '''return no less than min bytes per chunk while data remains,
837 '''return no less than min bytes per chunk while data remains,
838 doubling min after each chunk until it reaches max'''
838 doubling min after each chunk until it reaches max'''
839 def log2(x):
839 def log2(x):
840 if not x:
840 if not x:
841 return 0
841 return 0
842 i = 0
842 i = 0
843 while x:
843 while x:
844 x >>= 1
844 x >>= 1
845 i += 1
845 i += 1
846 return i - 1
846 return i - 1
847
847
848 buf = []
848 buf = []
849 blen = 0
849 blen = 0
850 for chunk in source:
850 for chunk in source:
851 buf.append(chunk)
851 buf.append(chunk)
852 blen += len(chunk)
852 blen += len(chunk)
853 if blen >= min:
853 if blen >= min:
854 if min < max:
854 if min < max:
855 min = min << 1
855 min = min << 1
856 nmin = 1 << log2(blen)
856 nmin = 1 << log2(blen)
857 if nmin > min:
857 if nmin > min:
858 min = nmin
858 min = nmin
859 if min > max:
859 if min > max:
860 min = max
860 min = max
861 yield ''.join(buf)
861 yield ''.join(buf)
862 blen = 0
862 blen = 0
863 buf = []
863 buf = []
864 if buf:
864 if buf:
865 yield ''.join(buf)
865 yield ''.join(buf)
866
866
867 Abort = error.Abort
867 Abort = error.Abort
868
868
869 def always(fn):
869 def always(fn):
870 return True
870 return True
871
871
872 def never(fn):
872 def never(fn):
873 return False
873 return False
874
874
875 def nogc(func):
875 def nogc(func):
876 """disable garbage collector
876 """disable garbage collector
877
877
878 Python's garbage collector triggers a GC each time a certain number of
878 Python's garbage collector triggers a GC each time a certain number of
879 container objects (the number being defined by gc.get_threshold()) are
879 container objects (the number being defined by gc.get_threshold()) are
880 allocated even when marked not to be tracked by the collector. Tracking has
880 allocated even when marked not to be tracked by the collector. Tracking has
881 no effect on when GCs are triggered, only on what objects the GC looks
881 no effect on when GCs are triggered, only on what objects the GC looks
882 into. As a workaround, disable GC while building complex (huge)
882 into. As a workaround, disable GC while building complex (huge)
883 containers.
883 containers.
884
884
885 This garbage collector issue have been fixed in 2.7.
885 This garbage collector issue have been fixed in 2.7.
886 """
886 """
887 if sys.version_info >= (2, 7):
887 if sys.version_info >= (2, 7):
888 return func
888 return func
889 def wrapper(*args, **kwargs):
889 def wrapper(*args, **kwargs):
890 gcenabled = gc.isenabled()
890 gcenabled = gc.isenabled()
891 gc.disable()
891 gc.disable()
892 try:
892 try:
893 return func(*args, **kwargs)
893 return func(*args, **kwargs)
894 finally:
894 finally:
895 if gcenabled:
895 if gcenabled:
896 gc.enable()
896 gc.enable()
897 return wrapper
897 return wrapper
898
898
899 def pathto(root, n1, n2):
899 def pathto(root, n1, n2):
900 '''return the relative path from one place to another.
900 '''return the relative path from one place to another.
901 root should use os.sep to separate directories
901 root should use os.sep to separate directories
902 n1 should use os.sep to separate directories
902 n1 should use os.sep to separate directories
903 n2 should use "/" to separate directories
903 n2 should use "/" to separate directories
904 returns an os.sep-separated path.
904 returns an os.sep-separated path.
905
905
906 If n1 is a relative path, it's assumed it's
906 If n1 is a relative path, it's assumed it's
907 relative to root.
907 relative to root.
908 n2 should always be relative to root.
908 n2 should always be relative to root.
909 '''
909 '''
910 if not n1:
910 if not n1:
911 return localpath(n2)
911 return localpath(n2)
912 if os.path.isabs(n1):
912 if os.path.isabs(n1):
913 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
913 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
914 return os.path.join(root, localpath(n2))
914 return os.path.join(root, localpath(n2))
915 n2 = '/'.join((pconvert(root), n2))
915 n2 = '/'.join((pconvert(root), n2))
916 a, b = splitpath(n1), n2.split('/')
916 a, b = splitpath(n1), n2.split('/')
917 a.reverse()
917 a.reverse()
918 b.reverse()
918 b.reverse()
919 while a and b and a[-1] == b[-1]:
919 while a and b and a[-1] == b[-1]:
920 a.pop()
920 a.pop()
921 b.pop()
921 b.pop()
922 b.reverse()
922 b.reverse()
923 return os.sep.join((['..'] * len(a)) + b) or '.'
923 return os.sep.join((['..'] * len(a)) + b) or '.'
924
924
925 def mainfrozen():
925 def mainfrozen():
926 """return True if we are a frozen executable.
926 """return True if we are a frozen executable.
927
927
928 The code supports py2exe (most common, Windows only) and tools/freeze
928 The code supports py2exe (most common, Windows only) and tools/freeze
929 (portable, not much used).
929 (portable, not much used).
930 """
930 """
931 return (safehasattr(sys, "frozen") or # new py2exe
931 return (safehasattr(sys, "frozen") or # new py2exe
932 safehasattr(sys, "importers") or # old py2exe
932 safehasattr(sys, "importers") or # old py2exe
933 imp.is_frozen(u"__main__")) # tools/freeze
933 imp.is_frozen(u"__main__")) # tools/freeze
934
934
935 # the location of data files matching the source code
935 # the location of data files matching the source code
936 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
936 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
937 # executable version (py2exe) doesn't support __file__
937 # executable version (py2exe) doesn't support __file__
938 datapath = os.path.dirname(sys.executable)
938 datapath = os.path.dirname(sys.executable)
939 else:
939 else:
940 datapath = os.path.dirname(__file__)
940 datapath = os.path.dirname(__file__)
941
941
942 if not isinstance(datapath, bytes):
942 if not isinstance(datapath, bytes):
943 datapath = pycompat.fsencode(datapath)
943 datapath = pycompat.fsencode(datapath)
944
944
945 i18n.setdatapath(datapath)
945 i18n.setdatapath(datapath)
946
946
947 _hgexecutable = None
947 _hgexecutable = None
948
948
949 def hgexecutable():
949 def hgexecutable():
950 """return location of the 'hg' executable.
950 """return location of the 'hg' executable.
951
951
952 Defaults to $HG or 'hg' in the search path.
952 Defaults to $HG or 'hg' in the search path.
953 """
953 """
954 if _hgexecutable is None:
954 if _hgexecutable is None:
955 hg = os.environ.get('HG')
955 hg = os.environ.get('HG')
956 mainmod = sys.modules['__main__']
956 mainmod = sys.modules['__main__']
957 if hg:
957 if hg:
958 _sethgexecutable(hg)
958 _sethgexecutable(hg)
959 elif mainfrozen():
959 elif mainfrozen():
960 if getattr(sys, 'frozen', None) == 'macosx_app':
960 if getattr(sys, 'frozen', None) == 'macosx_app':
961 # Env variable set by py2app
961 # Env variable set by py2app
962 _sethgexecutable(os.environ['EXECUTABLEPATH'])
962 _sethgexecutable(os.environ['EXECUTABLEPATH'])
963 else:
963 else:
964 _sethgexecutable(sys.executable)
964 _sethgexecutable(sys.executable)
965 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
965 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
966 _sethgexecutable(mainmod.__file__)
966 _sethgexecutable(mainmod.__file__)
967 else:
967 else:
968 exe = findexe('hg') or os.path.basename(sys.argv[0])
968 exe = findexe('hg') or os.path.basename(sys.argv[0])
969 _sethgexecutable(exe)
969 _sethgexecutable(exe)
970 return _hgexecutable
970 return _hgexecutable
971
971
972 def _sethgexecutable(path):
972 def _sethgexecutable(path):
973 """set location of the 'hg' executable"""
973 """set location of the 'hg' executable"""
974 global _hgexecutable
974 global _hgexecutable
975 _hgexecutable = path
975 _hgexecutable = path
976
976
977 def _isstdout(f):
977 def _isstdout(f):
978 fileno = getattr(f, 'fileno', None)
978 fileno = getattr(f, 'fileno', None)
979 return fileno and fileno() == sys.__stdout__.fileno()
979 return fileno and fileno() == sys.__stdout__.fileno()
980
980
981 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
981 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
982 '''enhanced shell command execution.
982 '''enhanced shell command execution.
983 run with environment maybe modified, maybe in different dir.
983 run with environment maybe modified, maybe in different dir.
984
984
985 if command fails and onerr is None, return status, else raise onerr
985 if command fails and onerr is None, return status, else raise onerr
986 object as exception.
986 object as exception.
987
987
988 if out is specified, it is assumed to be a file-like object that has a
988 if out is specified, it is assumed to be a file-like object that has a
989 write() method. stdout and stderr will be redirected to out.'''
989 write() method. stdout and stderr will be redirected to out.'''
990 if environ is None:
990 if environ is None:
991 environ = {}
991 environ = {}
992 try:
992 try:
993 sys.stdout.flush()
993 sys.stdout.flush()
994 except Exception:
994 except Exception:
995 pass
995 pass
996 def py2shell(val):
996 def py2shell(val):
997 'convert python object into string that is useful to shell'
997 'convert python object into string that is useful to shell'
998 if val is None or val is False:
998 if val is None or val is False:
999 return '0'
999 return '0'
1000 if val is True:
1000 if val is True:
1001 return '1'
1001 return '1'
1002 return str(val)
1002 return str(val)
1003 origcmd = cmd
1003 origcmd = cmd
1004 cmd = quotecommand(cmd)
1004 cmd = quotecommand(cmd)
1005 if sys.platform == 'plan9' and (sys.version_info[0] == 2
1005 if sys.platform == 'plan9' and (sys.version_info[0] == 2
1006 and sys.version_info[1] < 7):
1006 and sys.version_info[1] < 7):
1007 # subprocess kludge to work around issues in half-baked Python
1007 # subprocess kludge to work around issues in half-baked Python
1008 # ports, notably bichued/python:
1008 # ports, notably bichued/python:
1009 if not cwd is None:
1009 if not cwd is None:
1010 os.chdir(cwd)
1010 os.chdir(cwd)
1011 rc = os.system(cmd)
1011 rc = os.system(cmd)
1012 else:
1012 else:
1013 env = dict(os.environ)
1013 env = dict(os.environ)
1014 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1014 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1015 env['HG'] = hgexecutable()
1015 env['HG'] = hgexecutable()
1016 if out is None or _isstdout(out):
1016 if out is None or _isstdout(out):
1017 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1017 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1018 env=env, cwd=cwd)
1018 env=env, cwd=cwd)
1019 else:
1019 else:
1020 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1020 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1021 env=env, cwd=cwd, stdout=subprocess.PIPE,
1021 env=env, cwd=cwd, stdout=subprocess.PIPE,
1022 stderr=subprocess.STDOUT)
1022 stderr=subprocess.STDOUT)
1023 for line in iter(proc.stdout.readline, ''):
1023 for line in iter(proc.stdout.readline, ''):
1024 out.write(line)
1024 out.write(line)
1025 proc.wait()
1025 proc.wait()
1026 rc = proc.returncode
1026 rc = proc.returncode
1027 if sys.platform == 'OpenVMS' and rc & 1:
1027 if sys.platform == 'OpenVMS' and rc & 1:
1028 rc = 0
1028 rc = 0
1029 if rc and onerr:
1029 if rc and onerr:
1030 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1030 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1031 explainexit(rc)[0])
1031 explainexit(rc)[0])
1032 if errprefix:
1032 if errprefix:
1033 errmsg = '%s: %s' % (errprefix, errmsg)
1033 errmsg = '%s: %s' % (errprefix, errmsg)
1034 raise onerr(errmsg)
1034 raise onerr(errmsg)
1035 return rc
1035 return rc
1036
1036
1037 def checksignature(func):
1037 def checksignature(func):
1038 '''wrap a function with code to check for calling errors'''
1038 '''wrap a function with code to check for calling errors'''
1039 def check(*args, **kwargs):
1039 def check(*args, **kwargs):
1040 try:
1040 try:
1041 return func(*args, **kwargs)
1041 return func(*args, **kwargs)
1042 except TypeError:
1042 except TypeError:
1043 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1043 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1044 raise error.SignatureError
1044 raise error.SignatureError
1045 raise
1045 raise
1046
1046
1047 return check
1047 return check
1048
1048
1049 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1049 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1050 '''copy a file, preserving mode and optionally other stat info like
1050 '''copy a file, preserving mode and optionally other stat info like
1051 atime/mtime
1051 atime/mtime
1052
1052
1053 checkambig argument is used with filestat, and is useful only if
1053 checkambig argument is used with filestat, and is useful only if
1054 destination file is guarded by any lock (e.g. repo.lock or
1054 destination file is guarded by any lock (e.g. repo.lock or
1055 repo.wlock).
1055 repo.wlock).
1056
1056
1057 copystat and checkambig should be exclusive.
1057 copystat and checkambig should be exclusive.
1058 '''
1058 '''
1059 assert not (copystat and checkambig)
1059 assert not (copystat and checkambig)
1060 oldstat = None
1060 oldstat = None
1061 if os.path.lexists(dest):
1061 if os.path.lexists(dest):
1062 if checkambig:
1062 if checkambig:
1063 oldstat = checkambig and filestat(dest)
1063 oldstat = checkambig and filestat(dest)
1064 unlink(dest)
1064 unlink(dest)
1065 # hardlinks are problematic on CIFS, quietly ignore this flag
1065 # hardlinks are problematic on CIFS, quietly ignore this flag
1066 # until we find a way to work around it cleanly (issue4546)
1066 # until we find a way to work around it cleanly (issue4546)
1067 if False and hardlink:
1067 if False and hardlink:
1068 try:
1068 try:
1069 oslink(src, dest)
1069 oslink(src, dest)
1070 return
1070 return
1071 except (IOError, OSError):
1071 except (IOError, OSError):
1072 pass # fall back to normal copy
1072 pass # fall back to normal copy
1073 if os.path.islink(src):
1073 if os.path.islink(src):
1074 os.symlink(os.readlink(src), dest)
1074 os.symlink(os.readlink(src), dest)
1075 # copytime is ignored for symlinks, but in general copytime isn't needed
1075 # copytime is ignored for symlinks, but in general copytime isn't needed
1076 # for them anyway
1076 # for them anyway
1077 else:
1077 else:
1078 try:
1078 try:
1079 shutil.copyfile(src, dest)
1079 shutil.copyfile(src, dest)
1080 if copystat:
1080 if copystat:
1081 # copystat also copies mode
1081 # copystat also copies mode
1082 shutil.copystat(src, dest)
1082 shutil.copystat(src, dest)
1083 else:
1083 else:
1084 shutil.copymode(src, dest)
1084 shutil.copymode(src, dest)
1085 if oldstat and oldstat.stat:
1085 if oldstat and oldstat.stat:
1086 newstat = filestat(dest)
1086 newstat = filestat(dest)
1087 if newstat.isambig(oldstat):
1087 if newstat.isambig(oldstat):
1088 # stat of copied file is ambiguous to original one
1088 # stat of copied file is ambiguous to original one
1089 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1089 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1090 os.utime(dest, (advanced, advanced))
1090 os.utime(dest, (advanced, advanced))
1091 except shutil.Error as inst:
1091 except shutil.Error as inst:
1092 raise Abort(str(inst))
1092 raise Abort(str(inst))
1093
1093
1094 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1094 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1095 """Copy a directory tree using hardlinks if possible."""
1095 """Copy a directory tree using hardlinks if possible."""
1096 num = 0
1096 num = 0
1097
1097
1098 if hardlink is None:
1098 if hardlink is None:
1099 hardlink = (os.stat(src).st_dev ==
1099 hardlink = (os.stat(src).st_dev ==
1100 os.stat(os.path.dirname(dst)).st_dev)
1100 os.stat(os.path.dirname(dst)).st_dev)
1101 if hardlink:
1101 if hardlink:
1102 topic = _('linking')
1102 topic = _('linking')
1103 else:
1103 else:
1104 topic = _('copying')
1104 topic = _('copying')
1105
1105
1106 if os.path.isdir(src):
1106 if os.path.isdir(src):
1107 os.mkdir(dst)
1107 os.mkdir(dst)
1108 for name, kind in osutil.listdir(src):
1108 for name, kind in osutil.listdir(src):
1109 srcname = os.path.join(src, name)
1109 srcname = os.path.join(src, name)
1110 dstname = os.path.join(dst, name)
1110 dstname = os.path.join(dst, name)
1111 def nprog(t, pos):
1111 def nprog(t, pos):
1112 if pos is not None:
1112 if pos is not None:
1113 return progress(t, pos + num)
1113 return progress(t, pos + num)
1114 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1114 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1115 num += n
1115 num += n
1116 else:
1116 else:
1117 if hardlink:
1117 if hardlink:
1118 try:
1118 try:
1119 oslink(src, dst)
1119 oslink(src, dst)
1120 except (IOError, OSError):
1120 except (IOError, OSError):
1121 hardlink = False
1121 hardlink = False
1122 shutil.copy(src, dst)
1122 shutil.copy(src, dst)
1123 else:
1123 else:
1124 shutil.copy(src, dst)
1124 shutil.copy(src, dst)
1125 num += 1
1125 num += 1
1126 progress(topic, num)
1126 progress(topic, num)
1127 progress(topic, None)
1127 progress(topic, None)
1128
1128
1129 return hardlink, num
1129 return hardlink, num
1130
1130
1131 _winreservednames = '''con prn aux nul
1131 _winreservednames = '''con prn aux nul
1132 com1 com2 com3 com4 com5 com6 com7 com8 com9
1132 com1 com2 com3 com4 com5 com6 com7 com8 com9
1133 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1133 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1134 _winreservedchars = ':*?"<>|'
1134 _winreservedchars = ':*?"<>|'
1135 def checkwinfilename(path):
1135 def checkwinfilename(path):
1136 r'''Check that the base-relative path is a valid filename on Windows.
1136 r'''Check that the base-relative path is a valid filename on Windows.
1137 Returns None if the path is ok, or a UI string describing the problem.
1137 Returns None if the path is ok, or a UI string describing the problem.
1138
1138
1139 >>> checkwinfilename("just/a/normal/path")
1139 >>> checkwinfilename("just/a/normal/path")
1140 >>> checkwinfilename("foo/bar/con.xml")
1140 >>> checkwinfilename("foo/bar/con.xml")
1141 "filename contains 'con', which is reserved on Windows"
1141 "filename contains 'con', which is reserved on Windows"
1142 >>> checkwinfilename("foo/con.xml/bar")
1142 >>> checkwinfilename("foo/con.xml/bar")
1143 "filename contains 'con', which is reserved on Windows"
1143 "filename contains 'con', which is reserved on Windows"
1144 >>> checkwinfilename("foo/bar/xml.con")
1144 >>> checkwinfilename("foo/bar/xml.con")
1145 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1145 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1146 "filename contains 'AUX', which is reserved on Windows"
1146 "filename contains 'AUX', which is reserved on Windows"
1147 >>> checkwinfilename("foo/bar/bla:.txt")
1147 >>> checkwinfilename("foo/bar/bla:.txt")
1148 "filename contains ':', which is reserved on Windows"
1148 "filename contains ':', which is reserved on Windows"
1149 >>> checkwinfilename("foo/bar/b\07la.txt")
1149 >>> checkwinfilename("foo/bar/b\07la.txt")
1150 "filename contains '\\x07', which is invalid on Windows"
1150 "filename contains '\\x07', which is invalid on Windows"
1151 >>> checkwinfilename("foo/bar/bla ")
1151 >>> checkwinfilename("foo/bar/bla ")
1152 "filename ends with ' ', which is not allowed on Windows"
1152 "filename ends with ' ', which is not allowed on Windows"
1153 >>> checkwinfilename("../bar")
1153 >>> checkwinfilename("../bar")
1154 >>> checkwinfilename("foo\\")
1154 >>> checkwinfilename("foo\\")
1155 "filename ends with '\\', which is invalid on Windows"
1155 "filename ends with '\\', which is invalid on Windows"
1156 >>> checkwinfilename("foo\\/bar")
1156 >>> checkwinfilename("foo\\/bar")
1157 "directory name ends with '\\', which is invalid on Windows"
1157 "directory name ends with '\\', which is invalid on Windows"
1158 '''
1158 '''
1159 if path.endswith('\\'):
1159 if path.endswith('\\'):
1160 return _("filename ends with '\\', which is invalid on Windows")
1160 return _("filename ends with '\\', which is invalid on Windows")
1161 if '\\/' in path:
1161 if '\\/' in path:
1162 return _("directory name ends with '\\', which is invalid on Windows")
1162 return _("directory name ends with '\\', which is invalid on Windows")
1163 for n in path.replace('\\', '/').split('/'):
1163 for n in path.replace('\\', '/').split('/'):
1164 if not n:
1164 if not n:
1165 continue
1165 continue
1166 for c in n:
1166 for c in n:
1167 if c in _winreservedchars:
1167 if c in _winreservedchars:
1168 return _("filename contains '%s', which is reserved "
1168 return _("filename contains '%s', which is reserved "
1169 "on Windows") % c
1169 "on Windows") % c
1170 if ord(c) <= 31:
1170 if ord(c) <= 31:
1171 return _("filename contains %r, which is invalid "
1171 return _("filename contains %r, which is invalid "
1172 "on Windows") % c
1172 "on Windows") % c
1173 base = n.split('.')[0]
1173 base = n.split('.')[0]
1174 if base and base.lower() in _winreservednames:
1174 if base and base.lower() in _winreservednames:
1175 return _("filename contains '%s', which is reserved "
1175 return _("filename contains '%s', which is reserved "
1176 "on Windows") % base
1176 "on Windows") % base
1177 t = n[-1]
1177 t = n[-1]
1178 if t in '. ' and n not in '..':
1178 if t in '. ' and n not in '..':
1179 return _("filename ends with '%s', which is not allowed "
1179 return _("filename ends with '%s', which is not allowed "
1180 "on Windows") % t
1180 "on Windows") % t
1181
1181
1182 if os.name == 'nt':
1182 if os.name == 'nt':
1183 checkosfilename = checkwinfilename
1183 checkosfilename = checkwinfilename
1184 else:
1184 else:
1185 checkosfilename = platform.checkosfilename
1185 checkosfilename = platform.checkosfilename
1186
1186
1187 def makelock(info, pathname):
1187 def makelock(info, pathname):
1188 try:
1188 try:
1189 return os.symlink(info, pathname)
1189 return os.symlink(info, pathname)
1190 except OSError as why:
1190 except OSError as why:
1191 if why.errno == errno.EEXIST:
1191 if why.errno == errno.EEXIST:
1192 raise
1192 raise
1193 except AttributeError: # no symlink in os
1193 except AttributeError: # no symlink in os
1194 pass
1194 pass
1195
1195
1196 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1196 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1197 os.write(ld, info)
1197 os.write(ld, info)
1198 os.close(ld)
1198 os.close(ld)
1199
1199
1200 def readlock(pathname):
1200 def readlock(pathname):
1201 try:
1201 try:
1202 return os.readlink(pathname)
1202 return os.readlink(pathname)
1203 except OSError as why:
1203 except OSError as why:
1204 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1204 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1205 raise
1205 raise
1206 except AttributeError: # no symlink in os
1206 except AttributeError: # no symlink in os
1207 pass
1207 pass
1208 fp = posixfile(pathname)
1208 fp = posixfile(pathname)
1209 r = fp.read()
1209 r = fp.read()
1210 fp.close()
1210 fp.close()
1211 return r
1211 return r
1212
1212
1213 def fstat(fp):
1213 def fstat(fp):
1214 '''stat file object that may not have fileno method.'''
1214 '''stat file object that may not have fileno method.'''
1215 try:
1215 try:
1216 return os.fstat(fp.fileno())
1216 return os.fstat(fp.fileno())
1217 except AttributeError:
1217 except AttributeError:
1218 return os.stat(fp.name)
1218 return os.stat(fp.name)
1219
1219
1220 # File system features
1220 # File system features
1221
1221
1222 def fscasesensitive(path):
1222 def fscasesensitive(path):
1223 """
1223 """
1224 Return true if the given path is on a case-sensitive filesystem
1224 Return true if the given path is on a case-sensitive filesystem
1225
1225
1226 Requires a path (like /foo/.hg) ending with a foldable final
1226 Requires a path (like /foo/.hg) ending with a foldable final
1227 directory component.
1227 directory component.
1228 """
1228 """
1229 s1 = os.lstat(path)
1229 s1 = os.lstat(path)
1230 d, b = os.path.split(path)
1230 d, b = os.path.split(path)
1231 b2 = b.upper()
1231 b2 = b.upper()
1232 if b == b2:
1232 if b == b2:
1233 b2 = b.lower()
1233 b2 = b.lower()
1234 if b == b2:
1234 if b == b2:
1235 return True # no evidence against case sensitivity
1235 return True # no evidence against case sensitivity
1236 p2 = os.path.join(d, b2)
1236 p2 = os.path.join(d, b2)
1237 try:
1237 try:
1238 s2 = os.lstat(p2)
1238 s2 = os.lstat(p2)
1239 if s2 == s1:
1239 if s2 == s1:
1240 return False
1240 return False
1241 return True
1241 return True
1242 except OSError:
1242 except OSError:
1243 return True
1243 return True
1244
1244
1245 try:
1245 try:
1246 import re2
1246 import re2
1247 _re2 = None
1247 _re2 = None
1248 except ImportError:
1248 except ImportError:
1249 _re2 = False
1249 _re2 = False
1250
1250
1251 class _re(object):
1251 class _re(object):
1252 def _checkre2(self):
1252 def _checkre2(self):
1253 global _re2
1253 global _re2
1254 try:
1254 try:
1255 # check if match works, see issue3964
1255 # check if match works, see issue3964
1256 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1256 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1257 except ImportError:
1257 except ImportError:
1258 _re2 = False
1258 _re2 = False
1259
1259
1260 def compile(self, pat, flags=0):
1260 def compile(self, pat, flags=0):
1261 '''Compile a regular expression, using re2 if possible
1261 '''Compile a regular expression, using re2 if possible
1262
1262
1263 For best performance, use only re2-compatible regexp features. The
1263 For best performance, use only re2-compatible regexp features. The
1264 only flags from the re module that are re2-compatible are
1264 only flags from the re module that are re2-compatible are
1265 IGNORECASE and MULTILINE.'''
1265 IGNORECASE and MULTILINE.'''
1266 if _re2 is None:
1266 if _re2 is None:
1267 self._checkre2()
1267 self._checkre2()
1268 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1268 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1269 if flags & remod.IGNORECASE:
1269 if flags & remod.IGNORECASE:
1270 pat = '(?i)' + pat
1270 pat = '(?i)' + pat
1271 if flags & remod.MULTILINE:
1271 if flags & remod.MULTILINE:
1272 pat = '(?m)' + pat
1272 pat = '(?m)' + pat
1273 try:
1273 try:
1274 return re2.compile(pat)
1274 return re2.compile(pat)
1275 except re2.error:
1275 except re2.error:
1276 pass
1276 pass
1277 return remod.compile(pat, flags)
1277 return remod.compile(pat, flags)
1278
1278
1279 @propertycache
1279 @propertycache
1280 def escape(self):
1280 def escape(self):
1281 '''Return the version of escape corresponding to self.compile.
1281 '''Return the version of escape corresponding to self.compile.
1282
1282
1283 This is imperfect because whether re2 or re is used for a particular
1283 This is imperfect because whether re2 or re is used for a particular
1284 function depends on the flags, etc, but it's the best we can do.
1284 function depends on the flags, etc, but it's the best we can do.
1285 '''
1285 '''
1286 global _re2
1286 global _re2
1287 if _re2 is None:
1287 if _re2 is None:
1288 self._checkre2()
1288 self._checkre2()
1289 if _re2:
1289 if _re2:
1290 return re2.escape
1290 return re2.escape
1291 else:
1291 else:
1292 return remod.escape
1292 return remod.escape
1293
1293
1294 re = _re()
1294 re = _re()
1295
1295
1296 _fspathcache = {}
1296 _fspathcache = {}
1297 def fspath(name, root):
1297 def fspath(name, root):
1298 '''Get name in the case stored in the filesystem
1298 '''Get name in the case stored in the filesystem
1299
1299
1300 The name should be relative to root, and be normcase-ed for efficiency.
1300 The name should be relative to root, and be normcase-ed for efficiency.
1301
1301
1302 Note that this function is unnecessary, and should not be
1302 Note that this function is unnecessary, and should not be
1303 called, for case-sensitive filesystems (simply because it's expensive).
1303 called, for case-sensitive filesystems (simply because it's expensive).
1304
1304
1305 The root should be normcase-ed, too.
1305 The root should be normcase-ed, too.
1306 '''
1306 '''
1307 def _makefspathcacheentry(dir):
1307 def _makefspathcacheentry(dir):
1308 return dict((normcase(n), n) for n in os.listdir(dir))
1308 return dict((normcase(n), n) for n in os.listdir(dir))
1309
1309
1310 seps = os.sep
1310 seps = os.sep
1311 if os.altsep:
1311 if os.altsep:
1312 seps = seps + os.altsep
1312 seps = seps + os.altsep
1313 # Protect backslashes. This gets silly very quickly.
1313 # Protect backslashes. This gets silly very quickly.
1314 seps.replace('\\','\\\\')
1314 seps.replace('\\','\\\\')
1315 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1315 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1316 dir = os.path.normpath(root)
1316 dir = os.path.normpath(root)
1317 result = []
1317 result = []
1318 for part, sep in pattern.findall(name):
1318 for part, sep in pattern.findall(name):
1319 if sep:
1319 if sep:
1320 result.append(sep)
1320 result.append(sep)
1321 continue
1321 continue
1322
1322
1323 if dir not in _fspathcache:
1323 if dir not in _fspathcache:
1324 _fspathcache[dir] = _makefspathcacheentry(dir)
1324 _fspathcache[dir] = _makefspathcacheentry(dir)
1325 contents = _fspathcache[dir]
1325 contents = _fspathcache[dir]
1326
1326
1327 found = contents.get(part)
1327 found = contents.get(part)
1328 if not found:
1328 if not found:
1329 # retry "once per directory" per "dirstate.walk" which
1329 # retry "once per directory" per "dirstate.walk" which
1330 # may take place for each patches of "hg qpush", for example
1330 # may take place for each patches of "hg qpush", for example
1331 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1331 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1332 found = contents.get(part)
1332 found = contents.get(part)
1333
1333
1334 result.append(found or part)
1334 result.append(found or part)
1335 dir = os.path.join(dir, part)
1335 dir = os.path.join(dir, part)
1336
1336
1337 return ''.join(result)
1337 return ''.join(result)
1338
1338
1339 def checknlink(testfile):
1339 def checknlink(testfile):
1340 '''check whether hardlink count reporting works properly'''
1340 '''check whether hardlink count reporting works properly'''
1341
1341
1342 # testfile may be open, so we need a separate file for checking to
1342 # testfile may be open, so we need a separate file for checking to
1343 # work around issue2543 (or testfile may get lost on Samba shares)
1343 # work around issue2543 (or testfile may get lost on Samba shares)
1344 f1 = testfile + ".hgtmp1"
1344 f1 = testfile + ".hgtmp1"
1345 if os.path.lexists(f1):
1345 if os.path.lexists(f1):
1346 return False
1346 return False
1347 try:
1347 try:
1348 posixfile(f1, 'w').close()
1348 posixfile(f1, 'w').close()
1349 except IOError:
1349 except IOError:
1350 try:
1350 try:
1351 os.unlink(f1)
1351 os.unlink(f1)
1352 except OSError:
1352 except OSError:
1353 pass
1353 pass
1354 return False
1354 return False
1355
1355
1356 f2 = testfile + ".hgtmp2"
1356 f2 = testfile + ".hgtmp2"
1357 fd = None
1357 fd = None
1358 try:
1358 try:
1359 oslink(f1, f2)
1359 oslink(f1, f2)
1360 # nlinks() may behave differently for files on Windows shares if
1360 # nlinks() may behave differently for files on Windows shares if
1361 # the file is open.
1361 # the file is open.
1362 fd = posixfile(f2)
1362 fd = posixfile(f2)
1363 return nlinks(f2) > 1
1363 return nlinks(f2) > 1
1364 except OSError:
1364 except OSError:
1365 return False
1365 return False
1366 finally:
1366 finally:
1367 if fd is not None:
1367 if fd is not None:
1368 fd.close()
1368 fd.close()
1369 for f in (f1, f2):
1369 for f in (f1, f2):
1370 try:
1370 try:
1371 os.unlink(f)
1371 os.unlink(f)
1372 except OSError:
1372 except OSError:
1373 pass
1373 pass
1374
1374
1375 def endswithsep(path):
1375 def endswithsep(path):
1376 '''Check path ends with os.sep or os.altsep.'''
1376 '''Check path ends with os.sep or os.altsep.'''
1377 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1377 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1378
1378
1379 def splitpath(path):
1379 def splitpath(path):
1380 '''Split path by os.sep.
1380 '''Split path by os.sep.
1381 Note that this function does not use os.altsep because this is
1381 Note that this function does not use os.altsep because this is
1382 an alternative of simple "xxx.split(os.sep)".
1382 an alternative of simple "xxx.split(os.sep)".
1383 It is recommended to use os.path.normpath() before using this
1383 It is recommended to use os.path.normpath() before using this
1384 function if need.'''
1384 function if need.'''
1385 return path.split(os.sep)
1385 return path.split(os.sep)
1386
1386
1387 def gui():
1387 def gui():
1388 '''Are we running in a GUI?'''
1388 '''Are we running in a GUI?'''
1389 if sys.platform == 'darwin':
1389 if sys.platform == 'darwin':
1390 if 'SSH_CONNECTION' in os.environ:
1390 if 'SSH_CONNECTION' in os.environ:
1391 # handle SSH access to a box where the user is logged in
1391 # handle SSH access to a box where the user is logged in
1392 return False
1392 return False
1393 elif getattr(osutil, 'isgui', None):
1393 elif getattr(osutil, 'isgui', None):
1394 # check if a CoreGraphics session is available
1394 # check if a CoreGraphics session is available
1395 return osutil.isgui()
1395 return osutil.isgui()
1396 else:
1396 else:
1397 # pure build; use a safe default
1397 # pure build; use a safe default
1398 return True
1398 return True
1399 else:
1399 else:
1400 return os.name == "nt" or os.environ.get("DISPLAY")
1400 return os.name == "nt" or os.environ.get("DISPLAY")
1401
1401
1402 def mktempcopy(name, emptyok=False, createmode=None):
1402 def mktempcopy(name, emptyok=False, createmode=None):
1403 """Create a temporary file with the same contents from name
1403 """Create a temporary file with the same contents from name
1404
1404
1405 The permission bits are copied from the original file.
1405 The permission bits are copied from the original file.
1406
1406
1407 If the temporary file is going to be truncated immediately, you
1407 If the temporary file is going to be truncated immediately, you
1408 can use emptyok=True as an optimization.
1408 can use emptyok=True as an optimization.
1409
1409
1410 Returns the name of the temporary file.
1410 Returns the name of the temporary file.
1411 """
1411 """
1412 d, fn = os.path.split(name)
1412 d, fn = os.path.split(name)
1413 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1413 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1414 os.close(fd)
1414 os.close(fd)
1415 # Temporary files are created with mode 0600, which is usually not
1415 # Temporary files are created with mode 0600, which is usually not
1416 # what we want. If the original file already exists, just copy
1416 # what we want. If the original file already exists, just copy
1417 # its mode. Otherwise, manually obey umask.
1417 # its mode. Otherwise, manually obey umask.
1418 copymode(name, temp, createmode)
1418 copymode(name, temp, createmode)
1419 if emptyok:
1419 if emptyok:
1420 return temp
1420 return temp
1421 try:
1421 try:
1422 try:
1422 try:
1423 ifp = posixfile(name, "rb")
1423 ifp = posixfile(name, "rb")
1424 except IOError as inst:
1424 except IOError as inst:
1425 if inst.errno == errno.ENOENT:
1425 if inst.errno == errno.ENOENT:
1426 return temp
1426 return temp
1427 if not getattr(inst, 'filename', None):
1427 if not getattr(inst, 'filename', None):
1428 inst.filename = name
1428 inst.filename = name
1429 raise
1429 raise
1430 ofp = posixfile(temp, "wb")
1430 ofp = posixfile(temp, "wb")
1431 for chunk in filechunkiter(ifp):
1431 for chunk in filechunkiter(ifp):
1432 ofp.write(chunk)
1432 ofp.write(chunk)
1433 ifp.close()
1433 ifp.close()
1434 ofp.close()
1434 ofp.close()
1435 except: # re-raises
1435 except: # re-raises
1436 try: os.unlink(temp)
1436 try: os.unlink(temp)
1437 except OSError: pass
1437 except OSError: pass
1438 raise
1438 raise
1439 return temp
1439 return temp
1440
1440
1441 class filestat(object):
1441 class filestat(object):
1442 """help to exactly detect change of a file
1442 """help to exactly detect change of a file
1443
1443
1444 'stat' attribute is result of 'os.stat()' if specified 'path'
1444 'stat' attribute is result of 'os.stat()' if specified 'path'
1445 exists. Otherwise, it is None. This can avoid preparative
1445 exists. Otherwise, it is None. This can avoid preparative
1446 'exists()' examination on client side of this class.
1446 'exists()' examination on client side of this class.
1447 """
1447 """
1448 def __init__(self, path):
1448 def __init__(self, path):
1449 try:
1449 try:
1450 self.stat = os.stat(path)
1450 self.stat = os.stat(path)
1451 except OSError as err:
1451 except OSError as err:
1452 if err.errno != errno.ENOENT:
1452 if err.errno != errno.ENOENT:
1453 raise
1453 raise
1454 self.stat = None
1454 self.stat = None
1455
1455
1456 __hash__ = object.__hash__
1456 __hash__ = object.__hash__
1457
1457
1458 def __eq__(self, old):
1458 def __eq__(self, old):
1459 try:
1459 try:
1460 # if ambiguity between stat of new and old file is
1460 # if ambiguity between stat of new and old file is
1461 # avoided, comparison of size, ctime and mtime is enough
1461 # avoided, comparison of size, ctime and mtime is enough
1462 # to exactly detect change of a file regardless of platform
1462 # to exactly detect change of a file regardless of platform
1463 return (self.stat.st_size == old.stat.st_size and
1463 return (self.stat.st_size == old.stat.st_size and
1464 self.stat.st_ctime == old.stat.st_ctime and
1464 self.stat.st_ctime == old.stat.st_ctime and
1465 self.stat.st_mtime == old.stat.st_mtime)
1465 self.stat.st_mtime == old.stat.st_mtime)
1466 except AttributeError:
1466 except AttributeError:
1467 return False
1467 return False
1468
1468
1469 def isambig(self, old):
1469 def isambig(self, old):
1470 """Examine whether new (= self) stat is ambiguous against old one
1470 """Examine whether new (= self) stat is ambiguous against old one
1471
1471
1472 "S[N]" below means stat of a file at N-th change:
1472 "S[N]" below means stat of a file at N-th change:
1473
1473
1474 - S[n-1].ctime < S[n].ctime: can detect change of a file
1474 - S[n-1].ctime < S[n].ctime: can detect change of a file
1475 - S[n-1].ctime == S[n].ctime
1475 - S[n-1].ctime == S[n].ctime
1476 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1476 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1477 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1477 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1478 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1478 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1479 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1479 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1480
1480
1481 Case (*2) above means that a file was changed twice or more at
1481 Case (*2) above means that a file was changed twice or more at
1482 same time in sec (= S[n-1].ctime), and comparison of timestamp
1482 same time in sec (= S[n-1].ctime), and comparison of timestamp
1483 is ambiguous.
1483 is ambiguous.
1484
1484
1485 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1485 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1486 timestamp is ambiguous".
1486 timestamp is ambiguous".
1487
1487
1488 But advancing mtime only in case (*2) doesn't work as
1488 But advancing mtime only in case (*2) doesn't work as
1489 expected, because naturally advanced S[n].mtime in case (*1)
1489 expected, because naturally advanced S[n].mtime in case (*1)
1490 might be equal to manually advanced S[n-1 or earlier].mtime.
1490 might be equal to manually advanced S[n-1 or earlier].mtime.
1491
1491
1492 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1492 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1493 treated as ambiguous regardless of mtime, to avoid overlooking
1493 treated as ambiguous regardless of mtime, to avoid overlooking
1494 by confliction between such mtime.
1494 by confliction between such mtime.
1495
1495
1496 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1496 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1497 S[n].mtime", even if size of a file isn't changed.
1497 S[n].mtime", even if size of a file isn't changed.
1498 """
1498 """
1499 try:
1499 try:
1500 return (self.stat.st_ctime == old.stat.st_ctime)
1500 return (self.stat.st_ctime == old.stat.st_ctime)
1501 except AttributeError:
1501 except AttributeError:
1502 return False
1502 return False
1503
1503
1504 def avoidambig(self, path, old):
1504 def avoidambig(self, path, old):
1505 """Change file stat of specified path to avoid ambiguity
1505 """Change file stat of specified path to avoid ambiguity
1506
1506
1507 'old' should be previous filestat of 'path'.
1507 'old' should be previous filestat of 'path'.
1508
1508
1509 This skips avoiding ambiguity, if a process doesn't have
1509 This skips avoiding ambiguity, if a process doesn't have
1510 appropriate privileges for 'path'.
1510 appropriate privileges for 'path'.
1511 """
1511 """
1512 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1512 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1513 try:
1513 try:
1514 os.utime(path, (advanced, advanced))
1514 os.utime(path, (advanced, advanced))
1515 except OSError as inst:
1515 except OSError as inst:
1516 if inst.errno == errno.EPERM:
1516 if inst.errno == errno.EPERM:
1517 # utime() on the file created by another user causes EPERM,
1517 # utime() on the file created by another user causes EPERM,
1518 # if a process doesn't have appropriate privileges
1518 # if a process doesn't have appropriate privileges
1519 return
1519 return
1520 raise
1520 raise
1521
1521
1522 def __ne__(self, other):
1522 def __ne__(self, other):
1523 return not self == other
1523 return not self == other
1524
1524
1525 class atomictempfile(object):
1525 class atomictempfile(object):
1526 '''writable file object that atomically updates a file
1526 '''writable file object that atomically updates a file
1527
1527
1528 All writes will go to a temporary copy of the original file. Call
1528 All writes will go to a temporary copy of the original file. Call
1529 close() when you are done writing, and atomictempfile will rename
1529 close() when you are done writing, and atomictempfile will rename
1530 the temporary copy to the original name, making the changes
1530 the temporary copy to the original name, making the changes
1531 visible. If the object is destroyed without being closed, all your
1531 visible. If the object is destroyed without being closed, all your
1532 writes are discarded.
1532 writes are discarded.
1533
1533
1534 checkambig argument of constructor is used with filestat, and is
1534 checkambig argument of constructor is used with filestat, and is
1535 useful only if target file is guarded by any lock (e.g. repo.lock
1535 useful only if target file is guarded by any lock (e.g. repo.lock
1536 or repo.wlock).
1536 or repo.wlock).
1537 '''
1537 '''
1538 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1538 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1539 self.__name = name # permanent name
1539 self.__name = name # permanent name
1540 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1540 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1541 createmode=createmode)
1541 createmode=createmode)
1542 self._fp = posixfile(self._tempname, mode)
1542 self._fp = posixfile(self._tempname, mode)
1543 self._checkambig = checkambig
1543 self._checkambig = checkambig
1544
1544
1545 # delegated methods
1545 # delegated methods
1546 self.read = self._fp.read
1546 self.read = self._fp.read
1547 self.write = self._fp.write
1547 self.write = self._fp.write
1548 self.seek = self._fp.seek
1548 self.seek = self._fp.seek
1549 self.tell = self._fp.tell
1549 self.tell = self._fp.tell
1550 self.fileno = self._fp.fileno
1550 self.fileno = self._fp.fileno
1551
1551
1552 def close(self):
1552 def close(self):
1553 if not self._fp.closed:
1553 if not self._fp.closed:
1554 self._fp.close()
1554 self._fp.close()
1555 filename = localpath(self.__name)
1555 filename = localpath(self.__name)
1556 oldstat = self._checkambig and filestat(filename)
1556 oldstat = self._checkambig and filestat(filename)
1557 if oldstat and oldstat.stat:
1557 if oldstat and oldstat.stat:
1558 rename(self._tempname, filename)
1558 rename(self._tempname, filename)
1559 newstat = filestat(filename)
1559 newstat = filestat(filename)
1560 if newstat.isambig(oldstat):
1560 if newstat.isambig(oldstat):
1561 # stat of changed file is ambiguous to original one
1561 # stat of changed file is ambiguous to original one
1562 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1562 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1563 os.utime(filename, (advanced, advanced))
1563 os.utime(filename, (advanced, advanced))
1564 else:
1564 else:
1565 rename(self._tempname, filename)
1565 rename(self._tempname, filename)
1566
1566
1567 def discard(self):
1567 def discard(self):
1568 if not self._fp.closed:
1568 if not self._fp.closed:
1569 try:
1569 try:
1570 os.unlink(self._tempname)
1570 os.unlink(self._tempname)
1571 except OSError:
1571 except OSError:
1572 pass
1572 pass
1573 self._fp.close()
1573 self._fp.close()
1574
1574
1575 def __del__(self):
1575 def __del__(self):
1576 if safehasattr(self, '_fp'): # constructor actually did something
1576 if safehasattr(self, '_fp'): # constructor actually did something
1577 self.discard()
1577 self.discard()
1578
1578
1579 def __enter__(self):
1579 def __enter__(self):
1580 return self
1580 return self
1581
1581
1582 def __exit__(self, exctype, excvalue, traceback):
1582 def __exit__(self, exctype, excvalue, traceback):
1583 if exctype is not None:
1583 if exctype is not None:
1584 self.discard()
1584 self.discard()
1585 else:
1585 else:
1586 self.close()
1586 self.close()
1587
1587
1588 def makedirs(name, mode=None, notindexed=False):
1588 def makedirs(name, mode=None, notindexed=False):
1589 """recursive directory creation with parent mode inheritance
1589 """recursive directory creation with parent mode inheritance
1590
1590
1591 Newly created directories are marked as "not to be indexed by
1591 Newly created directories are marked as "not to be indexed by
1592 the content indexing service", if ``notindexed`` is specified
1592 the content indexing service", if ``notindexed`` is specified
1593 for "write" mode access.
1593 for "write" mode access.
1594 """
1594 """
1595 try:
1595 try:
1596 makedir(name, notindexed)
1596 makedir(name, notindexed)
1597 except OSError as err:
1597 except OSError as err:
1598 if err.errno == errno.EEXIST:
1598 if err.errno == errno.EEXIST:
1599 return
1599 return
1600 if err.errno != errno.ENOENT or not name:
1600 if err.errno != errno.ENOENT or not name:
1601 raise
1601 raise
1602 parent = os.path.dirname(os.path.abspath(name))
1602 parent = os.path.dirname(os.path.abspath(name))
1603 if parent == name:
1603 if parent == name:
1604 raise
1604 raise
1605 makedirs(parent, mode, notindexed)
1605 makedirs(parent, mode, notindexed)
1606 try:
1606 try:
1607 makedir(name, notindexed)
1607 makedir(name, notindexed)
1608 except OSError as err:
1608 except OSError as err:
1609 # Catch EEXIST to handle races
1609 # Catch EEXIST to handle races
1610 if err.errno == errno.EEXIST:
1610 if err.errno == errno.EEXIST:
1611 return
1611 return
1612 raise
1612 raise
1613 if mode is not None:
1613 if mode is not None:
1614 os.chmod(name, mode)
1614 os.chmod(name, mode)
1615
1615
1616 def readfile(path):
1616 def readfile(path):
1617 with open(path, 'rb') as fp:
1617 with open(path, 'rb') as fp:
1618 return fp.read()
1618 return fp.read()
1619
1619
1620 def writefile(path, text):
1620 def writefile(path, text):
1621 with open(path, 'wb') as fp:
1621 with open(path, 'wb') as fp:
1622 fp.write(text)
1622 fp.write(text)
1623
1623
1624 def appendfile(path, text):
1624 def appendfile(path, text):
1625 with open(path, 'ab') as fp:
1625 with open(path, 'ab') as fp:
1626 fp.write(text)
1626 fp.write(text)
1627
1627
1628 class chunkbuffer(object):
1628 class chunkbuffer(object):
1629 """Allow arbitrary sized chunks of data to be efficiently read from an
1629 """Allow arbitrary sized chunks of data to be efficiently read from an
1630 iterator over chunks of arbitrary size."""
1630 iterator over chunks of arbitrary size."""
1631
1631
1632 def __init__(self, in_iter):
1632 def __init__(self, in_iter):
1633 """in_iter is the iterator that's iterating over the input chunks.
1633 """in_iter is the iterator that's iterating over the input chunks.
1634 targetsize is how big a buffer to try to maintain."""
1634 targetsize is how big a buffer to try to maintain."""
1635 def splitbig(chunks):
1635 def splitbig(chunks):
1636 for chunk in chunks:
1636 for chunk in chunks:
1637 if len(chunk) > 2**20:
1637 if len(chunk) > 2**20:
1638 pos = 0
1638 pos = 0
1639 while pos < len(chunk):
1639 while pos < len(chunk):
1640 end = pos + 2 ** 18
1640 end = pos + 2 ** 18
1641 yield chunk[pos:end]
1641 yield chunk[pos:end]
1642 pos = end
1642 pos = end
1643 else:
1643 else:
1644 yield chunk
1644 yield chunk
1645 self.iter = splitbig(in_iter)
1645 self.iter = splitbig(in_iter)
1646 self._queue = collections.deque()
1646 self._queue = collections.deque()
1647 self._chunkoffset = 0
1647 self._chunkoffset = 0
1648
1648
1649 def read(self, l=None):
1649 def read(self, l=None):
1650 """Read L bytes of data from the iterator of chunks of data.
1650 """Read L bytes of data from the iterator of chunks of data.
1651 Returns less than L bytes if the iterator runs dry.
1651 Returns less than L bytes if the iterator runs dry.
1652
1652
1653 If size parameter is omitted, read everything"""
1653 If size parameter is omitted, read everything"""
1654 if l is None:
1654 if l is None:
1655 return ''.join(self.iter)
1655 return ''.join(self.iter)
1656
1656
1657 left = l
1657 left = l
1658 buf = []
1658 buf = []
1659 queue = self._queue
1659 queue = self._queue
1660 while left > 0:
1660 while left > 0:
1661 # refill the queue
1661 # refill the queue
1662 if not queue:
1662 if not queue:
1663 target = 2**18
1663 target = 2**18
1664 for chunk in self.iter:
1664 for chunk in self.iter:
1665 queue.append(chunk)
1665 queue.append(chunk)
1666 target -= len(chunk)
1666 target -= len(chunk)
1667 if target <= 0:
1667 if target <= 0:
1668 break
1668 break
1669 if not queue:
1669 if not queue:
1670 break
1670 break
1671
1671
1672 # The easy way to do this would be to queue.popleft(), modify the
1672 # The easy way to do this would be to queue.popleft(), modify the
1673 # chunk (if necessary), then queue.appendleft(). However, for cases
1673 # chunk (if necessary), then queue.appendleft(). However, for cases
1674 # where we read partial chunk content, this incurs 2 dequeue
1674 # where we read partial chunk content, this incurs 2 dequeue
1675 # mutations and creates a new str for the remaining chunk in the
1675 # mutations and creates a new str for the remaining chunk in the
1676 # queue. Our code below avoids this overhead.
1676 # queue. Our code below avoids this overhead.
1677
1677
1678 chunk = queue[0]
1678 chunk = queue[0]
1679 chunkl = len(chunk)
1679 chunkl = len(chunk)
1680 offset = self._chunkoffset
1680 offset = self._chunkoffset
1681
1681
1682 # Use full chunk.
1682 # Use full chunk.
1683 if offset == 0 and left >= chunkl:
1683 if offset == 0 and left >= chunkl:
1684 left -= chunkl
1684 left -= chunkl
1685 queue.popleft()
1685 queue.popleft()
1686 buf.append(chunk)
1686 buf.append(chunk)
1687 # self._chunkoffset remains at 0.
1687 # self._chunkoffset remains at 0.
1688 continue
1688 continue
1689
1689
1690 chunkremaining = chunkl - offset
1690 chunkremaining = chunkl - offset
1691
1691
1692 # Use all of unconsumed part of chunk.
1692 # Use all of unconsumed part of chunk.
1693 if left >= chunkremaining:
1693 if left >= chunkremaining:
1694 left -= chunkremaining
1694 left -= chunkremaining
1695 queue.popleft()
1695 queue.popleft()
1696 # offset == 0 is enabled by block above, so this won't merely
1696 # offset == 0 is enabled by block above, so this won't merely
1697 # copy via ``chunk[0:]``.
1697 # copy via ``chunk[0:]``.
1698 buf.append(chunk[offset:])
1698 buf.append(chunk[offset:])
1699 self._chunkoffset = 0
1699 self._chunkoffset = 0
1700
1700
1701 # Partial chunk needed.
1701 # Partial chunk needed.
1702 else:
1702 else:
1703 buf.append(chunk[offset:offset + left])
1703 buf.append(chunk[offset:offset + left])
1704 self._chunkoffset += left
1704 self._chunkoffset += left
1705 left -= chunkremaining
1705 left -= chunkremaining
1706
1706
1707 return ''.join(buf)
1707 return ''.join(buf)
1708
1708
1709 def filechunkiter(f, size=131072, limit=None):
1709 def filechunkiter(f, size=131072, limit=None):
1710 """Create a generator that produces the data in the file size
1710 """Create a generator that produces the data in the file size
1711 (default 131072) bytes at a time, up to optional limit (default is
1711 (default 131072) bytes at a time, up to optional limit (default is
1712 to read all data). Chunks may be less than size bytes if the
1712 to read all data). Chunks may be less than size bytes if the
1713 chunk is the last chunk in the file, or the file is a socket or
1713 chunk is the last chunk in the file, or the file is a socket or
1714 some other type of file that sometimes reads less data than is
1714 some other type of file that sometimes reads less data than is
1715 requested."""
1715 requested."""
1716 assert size >= 0
1716 assert size >= 0
1717 assert limit is None or limit >= 0
1717 assert limit is None or limit >= 0
1718 while True:
1718 while True:
1719 if limit is None:
1719 if limit is None:
1720 nbytes = size
1720 nbytes = size
1721 else:
1721 else:
1722 nbytes = min(limit, size)
1722 nbytes = min(limit, size)
1723 s = nbytes and f.read(nbytes)
1723 s = nbytes and f.read(nbytes)
1724 if not s:
1724 if not s:
1725 break
1725 break
1726 if limit:
1726 if limit:
1727 limit -= len(s)
1727 limit -= len(s)
1728 yield s
1728 yield s
1729
1729
1730 def makedate(timestamp=None):
1730 def makedate(timestamp=None):
1731 '''Return a unix timestamp (or the current time) as a (unixtime,
1731 '''Return a unix timestamp (or the current time) as a (unixtime,
1732 offset) tuple based off the local timezone.'''
1732 offset) tuple based off the local timezone.'''
1733 if timestamp is None:
1733 if timestamp is None:
1734 timestamp = time.time()
1734 timestamp = time.time()
1735 if timestamp < 0:
1735 if timestamp < 0:
1736 hint = _("check your clock")
1736 hint = _("check your clock")
1737 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1737 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1738 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1738 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1739 datetime.datetime.fromtimestamp(timestamp))
1739 datetime.datetime.fromtimestamp(timestamp))
1740 tz = delta.days * 86400 + delta.seconds
1740 tz = delta.days * 86400 + delta.seconds
1741 return timestamp, tz
1741 return timestamp, tz
1742
1742
1743 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1743 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1744 """represent a (unixtime, offset) tuple as a localized time.
1744 """represent a (unixtime, offset) tuple as a localized time.
1745 unixtime is seconds since the epoch, and offset is the time zone's
1745 unixtime is seconds since the epoch, and offset is the time zone's
1746 number of seconds away from UTC.
1746 number of seconds away from UTC.
1747
1747
1748 >>> datestr((0, 0))
1748 >>> datestr((0, 0))
1749 'Thu Jan 01 00:00:00 1970 +0000'
1749 'Thu Jan 01 00:00:00 1970 +0000'
1750 >>> datestr((42, 0))
1750 >>> datestr((42, 0))
1751 'Thu Jan 01 00:00:42 1970 +0000'
1751 'Thu Jan 01 00:00:42 1970 +0000'
1752 >>> datestr((-42, 0))
1752 >>> datestr((-42, 0))
1753 'Wed Dec 31 23:59:18 1969 +0000'
1753 'Wed Dec 31 23:59:18 1969 +0000'
1754 >>> datestr((0x7fffffff, 0))
1754 >>> datestr((0x7fffffff, 0))
1755 'Tue Jan 19 03:14:07 2038 +0000'
1755 'Tue Jan 19 03:14:07 2038 +0000'
1756 >>> datestr((-0x80000000, 0))
1756 >>> datestr((-0x80000000, 0))
1757 'Fri Dec 13 20:45:52 1901 +0000'
1757 'Fri Dec 13 20:45:52 1901 +0000'
1758 """
1758 """
1759 t, tz = date or makedate()
1759 t, tz = date or makedate()
1760 if "%1" in format or "%2" in format or "%z" in format:
1760 if "%1" in format or "%2" in format or "%z" in format:
1761 sign = (tz > 0) and "-" or "+"
1761 sign = (tz > 0) and "-" or "+"
1762 minutes = abs(tz) // 60
1762 minutes = abs(tz) // 60
1763 q, r = divmod(minutes, 60)
1763 q, r = divmod(minutes, 60)
1764 format = format.replace("%z", "%1%2")
1764 format = format.replace("%z", "%1%2")
1765 format = format.replace("%1", "%c%02d" % (sign, q))
1765 format = format.replace("%1", "%c%02d" % (sign, q))
1766 format = format.replace("%2", "%02d" % r)
1766 format = format.replace("%2", "%02d" % r)
1767 d = t - tz
1767 d = t - tz
1768 if d > 0x7fffffff:
1768 if d > 0x7fffffff:
1769 d = 0x7fffffff
1769 d = 0x7fffffff
1770 elif d < -0x80000000:
1770 elif d < -0x80000000:
1771 d = -0x80000000
1771 d = -0x80000000
1772 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1772 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1773 # because they use the gmtime() system call which is buggy on Windows
1773 # because they use the gmtime() system call which is buggy on Windows
1774 # for negative values.
1774 # for negative values.
1775 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1775 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1776 s = t.strftime(format)
1776 s = t.strftime(format)
1777 return s
1777 return s
1778
1778
1779 def shortdate(date=None):
1779 def shortdate(date=None):
1780 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1780 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1781 return datestr(date, format='%Y-%m-%d')
1781 return datestr(date, format='%Y-%m-%d')
1782
1782
1783 def parsetimezone(s):
1783 def parsetimezone(s):
1784 """find a trailing timezone, if any, in string, and return a
1784 """find a trailing timezone, if any, in string, and return a
1785 (offset, remainder) pair"""
1785 (offset, remainder) pair"""
1786
1786
1787 if s.endswith("GMT") or s.endswith("UTC"):
1787 if s.endswith("GMT") or s.endswith("UTC"):
1788 return 0, s[:-3].rstrip()
1788 return 0, s[:-3].rstrip()
1789
1789
1790 # Unix-style timezones [+-]hhmm
1790 # Unix-style timezones [+-]hhmm
1791 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1791 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1792 sign = (s[-5] == "+") and 1 or -1
1792 sign = (s[-5] == "+") and 1 or -1
1793 hours = int(s[-4:-2])
1793 hours = int(s[-4:-2])
1794 minutes = int(s[-2:])
1794 minutes = int(s[-2:])
1795 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1795 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1796
1796
1797 # ISO8601 trailing Z
1797 # ISO8601 trailing Z
1798 if s.endswith("Z") and s[-2:-1].isdigit():
1798 if s.endswith("Z") and s[-2:-1].isdigit():
1799 return 0, s[:-1]
1799 return 0, s[:-1]
1800
1800
1801 # ISO8601-style [+-]hh:mm
1801 # ISO8601-style [+-]hh:mm
1802 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1802 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1803 s[-5:-3].isdigit() and s[-2:].isdigit()):
1803 s[-5:-3].isdigit() and s[-2:].isdigit()):
1804 sign = (s[-6] == "+") and 1 or -1
1804 sign = (s[-6] == "+") and 1 or -1
1805 hours = int(s[-5:-3])
1805 hours = int(s[-5:-3])
1806 minutes = int(s[-2:])
1806 minutes = int(s[-2:])
1807 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1807 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1808
1808
1809 return None, s
1809 return None, s
1810
1810
1811 def strdate(string, format, defaults=[]):
1811 def strdate(string, format, defaults=[]):
1812 """parse a localized time string and return a (unixtime, offset) tuple.
1812 """parse a localized time string and return a (unixtime, offset) tuple.
1813 if the string cannot be parsed, ValueError is raised."""
1813 if the string cannot be parsed, ValueError is raised."""
1814 # NOTE: unixtime = localunixtime + offset
1814 # NOTE: unixtime = localunixtime + offset
1815 offset, date = parsetimezone(string)
1815 offset, date = parsetimezone(string)
1816
1816
1817 # add missing elements from defaults
1817 # add missing elements from defaults
1818 usenow = False # default to using biased defaults
1818 usenow = False # default to using biased defaults
1819 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1819 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1820 found = [True for p in part if ("%"+p) in format]
1820 found = [True for p in part if ("%"+p) in format]
1821 if not found:
1821 if not found:
1822 date += "@" + defaults[part][usenow]
1822 date += "@" + defaults[part][usenow]
1823 format += "@%" + part[0]
1823 format += "@%" + part[0]
1824 else:
1824 else:
1825 # We've found a specific time element, less specific time
1825 # We've found a specific time element, less specific time
1826 # elements are relative to today
1826 # elements are relative to today
1827 usenow = True
1827 usenow = True
1828
1828
1829 timetuple = time.strptime(date, format)
1829 timetuple = time.strptime(date, format)
1830 localunixtime = int(calendar.timegm(timetuple))
1830 localunixtime = int(calendar.timegm(timetuple))
1831 if offset is None:
1831 if offset is None:
1832 # local timezone
1832 # local timezone
1833 unixtime = int(time.mktime(timetuple))
1833 unixtime = int(time.mktime(timetuple))
1834 offset = unixtime - localunixtime
1834 offset = unixtime - localunixtime
1835 else:
1835 else:
1836 unixtime = localunixtime + offset
1836 unixtime = localunixtime + offset
1837 return unixtime, offset
1837 return unixtime, offset
1838
1838
1839 def parsedate(date, formats=None, bias=None):
1839 def parsedate(date, formats=None, bias=None):
1840 """parse a localized date/time and return a (unixtime, offset) tuple.
1840 """parse a localized date/time and return a (unixtime, offset) tuple.
1841
1841
1842 The date may be a "unixtime offset" string or in one of the specified
1842 The date may be a "unixtime offset" string or in one of the specified
1843 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1843 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1844
1844
1845 >>> parsedate(' today ') == parsedate(\
1845 >>> parsedate(' today ') == parsedate(\
1846 datetime.date.today().strftime('%b %d'))
1846 datetime.date.today().strftime('%b %d'))
1847 True
1847 True
1848 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1848 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1849 datetime.timedelta(days=1)\
1849 datetime.timedelta(days=1)\
1850 ).strftime('%b %d'))
1850 ).strftime('%b %d'))
1851 True
1851 True
1852 >>> now, tz = makedate()
1852 >>> now, tz = makedate()
1853 >>> strnow, strtz = parsedate('now')
1853 >>> strnow, strtz = parsedate('now')
1854 >>> (strnow - now) < 1
1854 >>> (strnow - now) < 1
1855 True
1855 True
1856 >>> tz == strtz
1856 >>> tz == strtz
1857 True
1857 True
1858 """
1858 """
1859 if bias is None:
1859 if bias is None:
1860 bias = {}
1860 bias = {}
1861 if not date:
1861 if not date:
1862 return 0, 0
1862 return 0, 0
1863 if isinstance(date, tuple) and len(date) == 2:
1863 if isinstance(date, tuple) and len(date) == 2:
1864 return date
1864 return date
1865 if not formats:
1865 if not formats:
1866 formats = defaultdateformats
1866 formats = defaultdateformats
1867 date = date.strip()
1867 date = date.strip()
1868
1868
1869 if date == 'now' or date == _('now'):
1869 if date == 'now' or date == _('now'):
1870 return makedate()
1870 return makedate()
1871 if date == 'today' or date == _('today'):
1871 if date == 'today' or date == _('today'):
1872 date = datetime.date.today().strftime('%b %d')
1872 date = datetime.date.today().strftime('%b %d')
1873 elif date == 'yesterday' or date == _('yesterday'):
1873 elif date == 'yesterday' or date == _('yesterday'):
1874 date = (datetime.date.today() -
1874 date = (datetime.date.today() -
1875 datetime.timedelta(days=1)).strftime('%b %d')
1875 datetime.timedelta(days=1)).strftime('%b %d')
1876
1876
1877 try:
1877 try:
1878 when, offset = map(int, date.split(' '))
1878 when, offset = map(int, date.split(' '))
1879 except ValueError:
1879 except ValueError:
1880 # fill out defaults
1880 # fill out defaults
1881 now = makedate()
1881 now = makedate()
1882 defaults = {}
1882 defaults = {}
1883 for part in ("d", "mb", "yY", "HI", "M", "S"):
1883 for part in ("d", "mb", "yY", "HI", "M", "S"):
1884 # this piece is for rounding the specific end of unknowns
1884 # this piece is for rounding the specific end of unknowns
1885 b = bias.get(part)
1885 b = bias.get(part)
1886 if b is None:
1886 if b is None:
1887 if part[0] in "HMS":
1887 if part[0] in "HMS":
1888 b = "00"
1888 b = "00"
1889 else:
1889 else:
1890 b = "0"
1890 b = "0"
1891
1891
1892 # this piece is for matching the generic end to today's date
1892 # this piece is for matching the generic end to today's date
1893 n = datestr(now, "%" + part[0])
1893 n = datestr(now, "%" + part[0])
1894
1894
1895 defaults[part] = (b, n)
1895 defaults[part] = (b, n)
1896
1896
1897 for format in formats:
1897 for format in formats:
1898 try:
1898 try:
1899 when, offset = strdate(date, format, defaults)
1899 when, offset = strdate(date, format, defaults)
1900 except (ValueError, OverflowError):
1900 except (ValueError, OverflowError):
1901 pass
1901 pass
1902 else:
1902 else:
1903 break
1903 break
1904 else:
1904 else:
1905 raise Abort(_('invalid date: %r') % date)
1905 raise Abort(_('invalid date: %r') % date)
1906 # validate explicit (probably user-specified) date and
1906 # validate explicit (probably user-specified) date and
1907 # time zone offset. values must fit in signed 32 bits for
1907 # time zone offset. values must fit in signed 32 bits for
1908 # current 32-bit linux runtimes. timezones go from UTC-12
1908 # current 32-bit linux runtimes. timezones go from UTC-12
1909 # to UTC+14
1909 # to UTC+14
1910 if when < -0x80000000 or when > 0x7fffffff:
1910 if when < -0x80000000 or when > 0x7fffffff:
1911 raise Abort(_('date exceeds 32 bits: %d') % when)
1911 raise Abort(_('date exceeds 32 bits: %d') % when)
1912 if offset < -50400 or offset > 43200:
1912 if offset < -50400 or offset > 43200:
1913 raise Abort(_('impossible time zone offset: %d') % offset)
1913 raise Abort(_('impossible time zone offset: %d') % offset)
1914 return when, offset
1914 return when, offset
1915
1915
1916 def matchdate(date):
1916 def matchdate(date):
1917 """Return a function that matches a given date match specifier
1917 """Return a function that matches a given date match specifier
1918
1918
1919 Formats include:
1919 Formats include:
1920
1920
1921 '{date}' match a given date to the accuracy provided
1921 '{date}' match a given date to the accuracy provided
1922
1922
1923 '<{date}' on or before a given date
1923 '<{date}' on or before a given date
1924
1924
1925 '>{date}' on or after a given date
1925 '>{date}' on or after a given date
1926
1926
1927 >>> p1 = parsedate("10:29:59")
1927 >>> p1 = parsedate("10:29:59")
1928 >>> p2 = parsedate("10:30:00")
1928 >>> p2 = parsedate("10:30:00")
1929 >>> p3 = parsedate("10:30:59")
1929 >>> p3 = parsedate("10:30:59")
1930 >>> p4 = parsedate("10:31:00")
1930 >>> p4 = parsedate("10:31:00")
1931 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1931 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1932 >>> f = matchdate("10:30")
1932 >>> f = matchdate("10:30")
1933 >>> f(p1[0])
1933 >>> f(p1[0])
1934 False
1934 False
1935 >>> f(p2[0])
1935 >>> f(p2[0])
1936 True
1936 True
1937 >>> f(p3[0])
1937 >>> f(p3[0])
1938 True
1938 True
1939 >>> f(p4[0])
1939 >>> f(p4[0])
1940 False
1940 False
1941 >>> f(p5[0])
1941 >>> f(p5[0])
1942 False
1942 False
1943 """
1943 """
1944
1944
1945 def lower(date):
1945 def lower(date):
1946 d = {'mb': "1", 'd': "1"}
1946 d = {'mb': "1", 'd': "1"}
1947 return parsedate(date, extendeddateformats, d)[0]
1947 return parsedate(date, extendeddateformats, d)[0]
1948
1948
1949 def upper(date):
1949 def upper(date):
1950 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1950 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1951 for days in ("31", "30", "29"):
1951 for days in ("31", "30", "29"):
1952 try:
1952 try:
1953 d["d"] = days
1953 d["d"] = days
1954 return parsedate(date, extendeddateformats, d)[0]
1954 return parsedate(date, extendeddateformats, d)[0]
1955 except Abort:
1955 except Abort:
1956 pass
1956 pass
1957 d["d"] = "28"
1957 d["d"] = "28"
1958 return parsedate(date, extendeddateformats, d)[0]
1958 return parsedate(date, extendeddateformats, d)[0]
1959
1959
1960 date = date.strip()
1960 date = date.strip()
1961
1961
1962 if not date:
1962 if not date:
1963 raise Abort(_("dates cannot consist entirely of whitespace"))
1963 raise Abort(_("dates cannot consist entirely of whitespace"))
1964 elif date[0] == "<":
1964 elif date[0] == "<":
1965 if not date[1:]:
1965 if not date[1:]:
1966 raise Abort(_("invalid day spec, use '<DATE'"))
1966 raise Abort(_("invalid day spec, use '<DATE'"))
1967 when = upper(date[1:])
1967 when = upper(date[1:])
1968 return lambda x: x <= when
1968 return lambda x: x <= when
1969 elif date[0] == ">":
1969 elif date[0] == ">":
1970 if not date[1:]:
1970 if not date[1:]:
1971 raise Abort(_("invalid day spec, use '>DATE'"))
1971 raise Abort(_("invalid day spec, use '>DATE'"))
1972 when = lower(date[1:])
1972 when = lower(date[1:])
1973 return lambda x: x >= when
1973 return lambda x: x >= when
1974 elif date[0] == "-":
1974 elif date[0] == "-":
1975 try:
1975 try:
1976 days = int(date[1:])
1976 days = int(date[1:])
1977 except ValueError:
1977 except ValueError:
1978 raise Abort(_("invalid day spec: %s") % date[1:])
1978 raise Abort(_("invalid day spec: %s") % date[1:])
1979 if days < 0:
1979 if days < 0:
1980 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1980 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1981 % date[1:])
1981 % date[1:])
1982 when = makedate()[0] - days * 3600 * 24
1982 when = makedate()[0] - days * 3600 * 24
1983 return lambda x: x >= when
1983 return lambda x: x >= when
1984 elif " to " in date:
1984 elif " to " in date:
1985 a, b = date.split(" to ")
1985 a, b = date.split(" to ")
1986 start, stop = lower(a), upper(b)
1986 start, stop = lower(a), upper(b)
1987 return lambda x: x >= start and x <= stop
1987 return lambda x: x >= start and x <= stop
1988 else:
1988 else:
1989 start, stop = lower(date), upper(date)
1989 start, stop = lower(date), upper(date)
1990 return lambda x: x >= start and x <= stop
1990 return lambda x: x >= start and x <= stop
1991
1991
1992 def stringmatcher(pattern):
1992 def stringmatcher(pattern):
1993 """
1993 """
1994 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1994 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1995 returns the matcher name, pattern, and matcher function.
1995 returns the matcher name, pattern, and matcher function.
1996 missing or unknown prefixes are treated as literal matches.
1996 missing or unknown prefixes are treated as literal matches.
1997
1997
1998 helper for tests:
1998 helper for tests:
1999 >>> def test(pattern, *tests):
1999 >>> def test(pattern, *tests):
2000 ... kind, pattern, matcher = stringmatcher(pattern)
2000 ... kind, pattern, matcher = stringmatcher(pattern)
2001 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2001 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2002
2002
2003 exact matching (no prefix):
2003 exact matching (no prefix):
2004 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2004 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2005 ('literal', 'abcdefg', [False, False, True])
2005 ('literal', 'abcdefg', [False, False, True])
2006
2006
2007 regex matching ('re:' prefix)
2007 regex matching ('re:' prefix)
2008 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2008 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2009 ('re', 'a.+b', [False, False, True])
2009 ('re', 'a.+b', [False, False, True])
2010
2010
2011 force exact matches ('literal:' prefix)
2011 force exact matches ('literal:' prefix)
2012 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2012 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2013 ('literal', 're:foobar', [False, True])
2013 ('literal', 're:foobar', [False, True])
2014
2014
2015 unknown prefixes are ignored and treated as literals
2015 unknown prefixes are ignored and treated as literals
2016 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2016 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2017 ('literal', 'foo:bar', [False, False, True])
2017 ('literal', 'foo:bar', [False, False, True])
2018 """
2018 """
2019 if pattern.startswith('re:'):
2019 if pattern.startswith('re:'):
2020 pattern = pattern[3:]
2020 pattern = pattern[3:]
2021 try:
2021 try:
2022 regex = remod.compile(pattern)
2022 regex = remod.compile(pattern)
2023 except remod.error as e:
2023 except remod.error as e:
2024 raise error.ParseError(_('invalid regular expression: %s')
2024 raise error.ParseError(_('invalid regular expression: %s')
2025 % e)
2025 % e)
2026 return 're', pattern, regex.search
2026 return 're', pattern, regex.search
2027 elif pattern.startswith('literal:'):
2027 elif pattern.startswith('literal:'):
2028 pattern = pattern[8:]
2028 pattern = pattern[8:]
2029 return 'literal', pattern, pattern.__eq__
2029 return 'literal', pattern, pattern.__eq__
2030
2030
2031 def shortuser(user):
2031 def shortuser(user):
2032 """Return a short representation of a user name or email address."""
2032 """Return a short representation of a user name or email address."""
2033 f = user.find('@')
2033 f = user.find('@')
2034 if f >= 0:
2034 if f >= 0:
2035 user = user[:f]
2035 user = user[:f]
2036 f = user.find('<')
2036 f = user.find('<')
2037 if f >= 0:
2037 if f >= 0:
2038 user = user[f + 1:]
2038 user = user[f + 1:]
2039 f = user.find(' ')
2039 f = user.find(' ')
2040 if f >= 0:
2040 if f >= 0:
2041 user = user[:f]
2041 user = user[:f]
2042 f = user.find('.')
2042 f = user.find('.')
2043 if f >= 0:
2043 if f >= 0:
2044 user = user[:f]
2044 user = user[:f]
2045 return user
2045 return user
2046
2046
2047 def emailuser(user):
2047 def emailuser(user):
2048 """Return the user portion of an email address."""
2048 """Return the user portion of an email address."""
2049 f = user.find('@')
2049 f = user.find('@')
2050 if f >= 0:
2050 if f >= 0:
2051 user = user[:f]
2051 user = user[:f]
2052 f = user.find('<')
2052 f = user.find('<')
2053 if f >= 0:
2053 if f >= 0:
2054 user = user[f + 1:]
2054 user = user[f + 1:]
2055 return user
2055 return user
2056
2056
2057 def email(author):
2057 def email(author):
2058 '''get email of author.'''
2058 '''get email of author.'''
2059 r = author.find('>')
2059 r = author.find('>')
2060 if r == -1:
2060 if r == -1:
2061 r = None
2061 r = None
2062 return author[author.find('<') + 1:r]
2062 return author[author.find('<') + 1:r]
2063
2063
2064 def ellipsis(text, maxlength=400):
2064 def ellipsis(text, maxlength=400):
2065 """Trim string to at most maxlength (default: 400) columns in display."""
2065 """Trim string to at most maxlength (default: 400) columns in display."""
2066 return encoding.trim(text, maxlength, ellipsis='...')
2066 return encoding.trim(text, maxlength, ellipsis='...')
2067
2067
2068 def unitcountfn(*unittable):
2068 def unitcountfn(*unittable):
2069 '''return a function that renders a readable count of some quantity'''
2069 '''return a function that renders a readable count of some quantity'''
2070
2070
2071 def go(count):
2071 def go(count):
2072 for multiplier, divisor, format in unittable:
2072 for multiplier, divisor, format in unittable:
2073 if count >= divisor * multiplier:
2073 if count >= divisor * multiplier:
2074 return format % (count / float(divisor))
2074 return format % (count / float(divisor))
2075 return unittable[-1][2] % count
2075 return unittable[-1][2] % count
2076
2076
2077 return go
2077 return go
2078
2078
2079 bytecount = unitcountfn(
2079 bytecount = unitcountfn(
2080 (100, 1 << 30, _('%.0f GB')),
2080 (100, 1 << 30, _('%.0f GB')),
2081 (10, 1 << 30, _('%.1f GB')),
2081 (10, 1 << 30, _('%.1f GB')),
2082 (1, 1 << 30, _('%.2f GB')),
2082 (1, 1 << 30, _('%.2f GB')),
2083 (100, 1 << 20, _('%.0f MB')),
2083 (100, 1 << 20, _('%.0f MB')),
2084 (10, 1 << 20, _('%.1f MB')),
2084 (10, 1 << 20, _('%.1f MB')),
2085 (1, 1 << 20, _('%.2f MB')),
2085 (1, 1 << 20, _('%.2f MB')),
2086 (100, 1 << 10, _('%.0f KB')),
2086 (100, 1 << 10, _('%.0f KB')),
2087 (10, 1 << 10, _('%.1f KB')),
2087 (10, 1 << 10, _('%.1f KB')),
2088 (1, 1 << 10, _('%.2f KB')),
2088 (1, 1 << 10, _('%.2f KB')),
2089 (1, 1, _('%.0f bytes')),
2089 (1, 1, _('%.0f bytes')),
2090 )
2090 )
2091
2091
2092 def uirepr(s):
2092 def uirepr(s):
2093 # Avoid double backslash in Windows path repr()
2093 # Avoid double backslash in Windows path repr()
2094 return repr(s).replace('\\\\', '\\')
2094 return repr(s).replace('\\\\', '\\')
2095
2095
2096 # delay import of textwrap
2096 # delay import of textwrap
2097 def MBTextWrapper(**kwargs):
2097 def MBTextWrapper(**kwargs):
2098 class tw(textwrap.TextWrapper):
2098 class tw(textwrap.TextWrapper):
2099 """
2099 """
2100 Extend TextWrapper for width-awareness.
2100 Extend TextWrapper for width-awareness.
2101
2101
2102 Neither number of 'bytes' in any encoding nor 'characters' is
2102 Neither number of 'bytes' in any encoding nor 'characters' is
2103 appropriate to calculate terminal columns for specified string.
2103 appropriate to calculate terminal columns for specified string.
2104
2104
2105 Original TextWrapper implementation uses built-in 'len()' directly,
2105 Original TextWrapper implementation uses built-in 'len()' directly,
2106 so overriding is needed to use width information of each characters.
2106 so overriding is needed to use width information of each characters.
2107
2107
2108 In addition, characters classified into 'ambiguous' width are
2108 In addition, characters classified into 'ambiguous' width are
2109 treated as wide in East Asian area, but as narrow in other.
2109 treated as wide in East Asian area, but as narrow in other.
2110
2110
2111 This requires use decision to determine width of such characters.
2111 This requires use decision to determine width of such characters.
2112 """
2112 """
2113 def _cutdown(self, ucstr, space_left):
2113 def _cutdown(self, ucstr, space_left):
2114 l = 0
2114 l = 0
2115 colwidth = encoding.ucolwidth
2115 colwidth = encoding.ucolwidth
2116 for i in xrange(len(ucstr)):
2116 for i in xrange(len(ucstr)):
2117 l += colwidth(ucstr[i])
2117 l += colwidth(ucstr[i])
2118 if space_left < l:
2118 if space_left < l:
2119 return (ucstr[:i], ucstr[i:])
2119 return (ucstr[:i], ucstr[i:])
2120 return ucstr, ''
2120 return ucstr, ''
2121
2121
2122 # overriding of base class
2122 # overriding of base class
2123 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2123 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2124 space_left = max(width - cur_len, 1)
2124 space_left = max(width - cur_len, 1)
2125
2125
2126 if self.break_long_words:
2126 if self.break_long_words:
2127 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2127 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2128 cur_line.append(cut)
2128 cur_line.append(cut)
2129 reversed_chunks[-1] = res
2129 reversed_chunks[-1] = res
2130 elif not cur_line:
2130 elif not cur_line:
2131 cur_line.append(reversed_chunks.pop())
2131 cur_line.append(reversed_chunks.pop())
2132
2132
2133 # this overriding code is imported from TextWrapper of Python 2.6
2133 # this overriding code is imported from TextWrapper of Python 2.6
2134 # to calculate columns of string by 'encoding.ucolwidth()'
2134 # to calculate columns of string by 'encoding.ucolwidth()'
2135 def _wrap_chunks(self, chunks):
2135 def _wrap_chunks(self, chunks):
2136 colwidth = encoding.ucolwidth
2136 colwidth = encoding.ucolwidth
2137
2137
2138 lines = []
2138 lines = []
2139 if self.width <= 0:
2139 if self.width <= 0:
2140 raise ValueError("invalid width %r (must be > 0)" % self.width)
2140 raise ValueError("invalid width %r (must be > 0)" % self.width)
2141
2141
2142 # Arrange in reverse order so items can be efficiently popped
2142 # Arrange in reverse order so items can be efficiently popped
2143 # from a stack of chucks.
2143 # from a stack of chucks.
2144 chunks.reverse()
2144 chunks.reverse()
2145
2145
2146 while chunks:
2146 while chunks:
2147
2147
2148 # Start the list of chunks that will make up the current line.
2148 # Start the list of chunks that will make up the current line.
2149 # cur_len is just the length of all the chunks in cur_line.
2149 # cur_len is just the length of all the chunks in cur_line.
2150 cur_line = []
2150 cur_line = []
2151 cur_len = 0
2151 cur_len = 0
2152
2152
2153 # Figure out which static string will prefix this line.
2153 # Figure out which static string will prefix this line.
2154 if lines:
2154 if lines:
2155 indent = self.subsequent_indent
2155 indent = self.subsequent_indent
2156 else:
2156 else:
2157 indent = self.initial_indent
2157 indent = self.initial_indent
2158
2158
2159 # Maximum width for this line.
2159 # Maximum width for this line.
2160 width = self.width - len(indent)
2160 width = self.width - len(indent)
2161
2161
2162 # First chunk on line is whitespace -- drop it, unless this
2162 # First chunk on line is whitespace -- drop it, unless this
2163 # is the very beginning of the text (i.e. no lines started yet).
2163 # is the very beginning of the text (i.e. no lines started yet).
2164 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2164 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2165 del chunks[-1]
2165 del chunks[-1]
2166
2166
2167 while chunks:
2167 while chunks:
2168 l = colwidth(chunks[-1])
2168 l = colwidth(chunks[-1])
2169
2169
2170 # Can at least squeeze this chunk onto the current line.
2170 # Can at least squeeze this chunk onto the current line.
2171 if cur_len + l <= width:
2171 if cur_len + l <= width:
2172 cur_line.append(chunks.pop())
2172 cur_line.append(chunks.pop())
2173 cur_len += l
2173 cur_len += l
2174
2174
2175 # Nope, this line is full.
2175 # Nope, this line is full.
2176 else:
2176 else:
2177 break
2177 break
2178
2178
2179 # The current line is full, and the next chunk is too big to
2179 # The current line is full, and the next chunk is too big to
2180 # fit on *any* line (not just this one).
2180 # fit on *any* line (not just this one).
2181 if chunks and colwidth(chunks[-1]) > width:
2181 if chunks and colwidth(chunks[-1]) > width:
2182 self._handle_long_word(chunks, cur_line, cur_len, width)
2182 self._handle_long_word(chunks, cur_line, cur_len, width)
2183
2183
2184 # If the last chunk on this line is all whitespace, drop it.
2184 # If the last chunk on this line is all whitespace, drop it.
2185 if (self.drop_whitespace and
2185 if (self.drop_whitespace and
2186 cur_line and cur_line[-1].strip() == ''):
2186 cur_line and cur_line[-1].strip() == ''):
2187 del cur_line[-1]
2187 del cur_line[-1]
2188
2188
2189 # Convert current line back to a string and store it in list
2189 # Convert current line back to a string and store it in list
2190 # of all lines (return value).
2190 # of all lines (return value).
2191 if cur_line:
2191 if cur_line:
2192 lines.append(indent + ''.join(cur_line))
2192 lines.append(indent + ''.join(cur_line))
2193
2193
2194 return lines
2194 return lines
2195
2195
2196 global MBTextWrapper
2196 global MBTextWrapper
2197 MBTextWrapper = tw
2197 MBTextWrapper = tw
2198 return tw(**kwargs)
2198 return tw(**kwargs)
2199
2199
2200 def wrap(line, width, initindent='', hangindent=''):
2200 def wrap(line, width, initindent='', hangindent=''):
2201 maxindent = max(len(hangindent), len(initindent))
2201 maxindent = max(len(hangindent), len(initindent))
2202 if width <= maxindent:
2202 if width <= maxindent:
2203 # adjust for weird terminal size
2203 # adjust for weird terminal size
2204 width = max(78, maxindent + 1)
2204 width = max(78, maxindent + 1)
2205 line = line.decode(encoding.encoding, encoding.encodingmode)
2205 line = line.decode(encoding.encoding, encoding.encodingmode)
2206 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2206 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2207 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2207 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2208 wrapper = MBTextWrapper(width=width,
2208 wrapper = MBTextWrapper(width=width,
2209 initial_indent=initindent,
2209 initial_indent=initindent,
2210 subsequent_indent=hangindent)
2210 subsequent_indent=hangindent)
2211 return wrapper.fill(line).encode(encoding.encoding)
2211 return wrapper.fill(line).encode(encoding.encoding)
2212
2212
2213 if (pyplatform.python_implementation() == 'CPython' and
2213 if (pyplatform.python_implementation() == 'CPython' and
2214 sys.version_info < (3, 0)):
2214 sys.version_info < (3, 0)):
2215 # There is an issue in CPython that some IO methods do not handle EINTR
2215 # There is an issue in CPython that some IO methods do not handle EINTR
2216 # correctly. The following table shows what CPython version (and functions)
2216 # correctly. The following table shows what CPython version (and functions)
2217 # are affected (buggy: has the EINTR bug, okay: otherwise):
2217 # are affected (buggy: has the EINTR bug, okay: otherwise):
2218 #
2218 #
2219 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2219 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2220 # --------------------------------------------------
2220 # --------------------------------------------------
2221 # fp.__iter__ | buggy | buggy | okay
2221 # fp.__iter__ | buggy | buggy | okay
2222 # fp.read* | buggy | okay [1] | okay
2222 # fp.read* | buggy | okay [1] | okay
2223 #
2223 #
2224 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2224 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2225 #
2225 #
2226 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2226 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2227 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2227 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2228 #
2228 #
2229 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2229 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2230 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2230 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2231 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2231 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2232 # fp.__iter__ but not other fp.read* methods.
2232 # fp.__iter__ but not other fp.read* methods.
2233 #
2233 #
2234 # On modern systems like Linux, the "read" syscall cannot be interrupted
2234 # On modern systems like Linux, the "read" syscall cannot be interrupted
2235 # when reading "fast" files like on-disk files. So the EINTR issue only
2235 # when reading "fast" files like on-disk files. So the EINTR issue only
2236 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2236 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2237 # files approximately as "fast" files and use the fast (unsafe) code path,
2237 # files approximately as "fast" files and use the fast (unsafe) code path,
2238 # to minimize the performance impact.
2238 # to minimize the performance impact.
2239 if sys.version_info >= (2, 7, 4):
2239 if sys.version_info >= (2, 7, 4):
2240 # fp.readline deals with EINTR correctly, use it as a workaround.
2240 # fp.readline deals with EINTR correctly, use it as a workaround.
2241 def _safeiterfile(fp):
2241 def _safeiterfile(fp):
2242 return iter(fp.readline, '')
2242 return iter(fp.readline, '')
2243 else:
2243 else:
2244 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2244 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2245 # note: this may block longer than necessary because of bufsize.
2245 # note: this may block longer than necessary because of bufsize.
2246 def _safeiterfile(fp, bufsize=4096):
2246 def _safeiterfile(fp, bufsize=4096):
2247 fd = fp.fileno()
2247 fd = fp.fileno()
2248 line = ''
2248 line = ''
2249 while True:
2249 while True:
2250 try:
2250 try:
2251 buf = os.read(fd, bufsize)
2251 buf = os.read(fd, bufsize)
2252 except OSError as ex:
2252 except OSError as ex:
2253 # os.read only raises EINTR before any data is read
2253 # os.read only raises EINTR before any data is read
2254 if ex.errno == errno.EINTR:
2254 if ex.errno == errno.EINTR:
2255 continue
2255 continue
2256 else:
2256 else:
2257 raise
2257 raise
2258 line += buf
2258 line += buf
2259 if '\n' in buf:
2259 if '\n' in buf:
2260 splitted = line.splitlines(True)
2260 splitted = line.splitlines(True)
2261 line = ''
2261 line = ''
2262 for l in splitted:
2262 for l in splitted:
2263 if l[-1] == '\n':
2263 if l[-1] == '\n':
2264 yield l
2264 yield l
2265 else:
2265 else:
2266 line = l
2266 line = l
2267 if not buf:
2267 if not buf:
2268 break
2268 break
2269 if line:
2269 if line:
2270 yield line
2270 yield line
2271
2271
2272 def iterfile(fp):
2272 def iterfile(fp):
2273 fastpath = True
2273 fastpath = True
2274 if type(fp) is file:
2274 if type(fp) is file:
2275 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2275 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2276 if fastpath:
2276 if fastpath:
2277 return fp
2277 return fp
2278 else:
2278 else:
2279 return _safeiterfile(fp)
2279 return _safeiterfile(fp)
2280 else:
2280 else:
2281 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2281 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2282 def iterfile(fp):
2282 def iterfile(fp):
2283 return fp
2283 return fp
2284
2284
2285 def iterlines(iterator):
2285 def iterlines(iterator):
2286 for chunk in iterator:
2286 for chunk in iterator:
2287 for line in chunk.splitlines():
2287 for line in chunk.splitlines():
2288 yield line
2288 yield line
2289
2289
2290 def expandpath(path):
2290 def expandpath(path):
2291 return os.path.expanduser(os.path.expandvars(path))
2291 return os.path.expanduser(os.path.expandvars(path))
2292
2292
2293 def hgcmd():
2293 def hgcmd():
2294 """Return the command used to execute current hg
2294 """Return the command used to execute current hg
2295
2295
2296 This is different from hgexecutable() because on Windows we want
2296 This is different from hgexecutable() because on Windows we want
2297 to avoid things opening new shell windows like batch files, so we
2297 to avoid things opening new shell windows like batch files, so we
2298 get either the python call or current executable.
2298 get either the python call or current executable.
2299 """
2299 """
2300 if mainfrozen():
2300 if mainfrozen():
2301 if getattr(sys, 'frozen', None) == 'macosx_app':
2301 if getattr(sys, 'frozen', None) == 'macosx_app':
2302 # Env variable set by py2app
2302 # Env variable set by py2app
2303 return [os.environ['EXECUTABLEPATH']]
2303 return [os.environ['EXECUTABLEPATH']]
2304 else:
2304 else:
2305 return [sys.executable]
2305 return [sys.executable]
2306 return gethgcmd()
2306 return gethgcmd()
2307
2307
2308 def rundetached(args, condfn):
2308 def rundetached(args, condfn):
2309 """Execute the argument list in a detached process.
2309 """Execute the argument list in a detached process.
2310
2310
2311 condfn is a callable which is called repeatedly and should return
2311 condfn is a callable which is called repeatedly and should return
2312 True once the child process is known to have started successfully.
2312 True once the child process is known to have started successfully.
2313 At this point, the child process PID is returned. If the child
2313 At this point, the child process PID is returned. If the child
2314 process fails to start or finishes before condfn() evaluates to
2314 process fails to start or finishes before condfn() evaluates to
2315 True, return -1.
2315 True, return -1.
2316 """
2316 """
2317 # Windows case is easier because the child process is either
2317 # Windows case is easier because the child process is either
2318 # successfully starting and validating the condition or exiting
2318 # successfully starting and validating the condition or exiting
2319 # on failure. We just poll on its PID. On Unix, if the child
2319 # on failure. We just poll on its PID. On Unix, if the child
2320 # process fails to start, it will be left in a zombie state until
2320 # process fails to start, it will be left in a zombie state until
2321 # the parent wait on it, which we cannot do since we expect a long
2321 # the parent wait on it, which we cannot do since we expect a long
2322 # running process on success. Instead we listen for SIGCHLD telling
2322 # running process on success. Instead we listen for SIGCHLD telling
2323 # us our child process terminated.
2323 # us our child process terminated.
2324 terminated = set()
2324 terminated = set()
2325 def handler(signum, frame):
2325 def handler(signum, frame):
2326 terminated.add(os.wait())
2326 terminated.add(os.wait())
2327 prevhandler = None
2327 prevhandler = None
2328 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2328 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2329 if SIGCHLD is not None:
2329 if SIGCHLD is not None:
2330 prevhandler = signal.signal(SIGCHLD, handler)
2330 prevhandler = signal.signal(SIGCHLD, handler)
2331 try:
2331 try:
2332 pid = spawndetached(args)
2332 pid = spawndetached(args)
2333 while not condfn():
2333 while not condfn():
2334 if ((pid in terminated or not testpid(pid))
2334 if ((pid in terminated or not testpid(pid))
2335 and not condfn()):
2335 and not condfn()):
2336 return -1
2336 return -1
2337 time.sleep(0.1)
2337 time.sleep(0.1)
2338 return pid
2338 return pid
2339 finally:
2339 finally:
2340 if prevhandler is not None:
2340 if prevhandler is not None:
2341 signal.signal(signal.SIGCHLD, prevhandler)
2341 signal.signal(signal.SIGCHLD, prevhandler)
2342
2342
2343 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2343 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2344 """Return the result of interpolating items in the mapping into string s.
2344 """Return the result of interpolating items in the mapping into string s.
2345
2345
2346 prefix is a single character string, or a two character string with
2346 prefix is a single character string, or a two character string with
2347 a backslash as the first character if the prefix needs to be escaped in
2347 a backslash as the first character if the prefix needs to be escaped in
2348 a regular expression.
2348 a regular expression.
2349
2349
2350 fn is an optional function that will be applied to the replacement text
2350 fn is an optional function that will be applied to the replacement text
2351 just before replacement.
2351 just before replacement.
2352
2352
2353 escape_prefix is an optional flag that allows using doubled prefix for
2353 escape_prefix is an optional flag that allows using doubled prefix for
2354 its escaping.
2354 its escaping.
2355 """
2355 """
2356 fn = fn or (lambda s: s)
2356 fn = fn or (lambda s: s)
2357 patterns = '|'.join(mapping.keys())
2357 patterns = '|'.join(mapping.keys())
2358 if escape_prefix:
2358 if escape_prefix:
2359 patterns += '|' + prefix
2359 patterns += '|' + prefix
2360 if len(prefix) > 1:
2360 if len(prefix) > 1:
2361 prefix_char = prefix[1:]
2361 prefix_char = prefix[1:]
2362 else:
2362 else:
2363 prefix_char = prefix
2363 prefix_char = prefix
2364 mapping[prefix_char] = prefix_char
2364 mapping[prefix_char] = prefix_char
2365 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2365 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2366 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2366 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2367
2367
2368 def getport(port):
2368 def getport(port):
2369 """Return the port for a given network service.
2369 """Return the port for a given network service.
2370
2370
2371 If port is an integer, it's returned as is. If it's a string, it's
2371 If port is an integer, it's returned as is. If it's a string, it's
2372 looked up using socket.getservbyname(). If there's no matching
2372 looked up using socket.getservbyname(). If there's no matching
2373 service, error.Abort is raised.
2373 service, error.Abort is raised.
2374 """
2374 """
2375 try:
2375 try:
2376 return int(port)
2376 return int(port)
2377 except ValueError:
2377 except ValueError:
2378 pass
2378 pass
2379
2379
2380 try:
2380 try:
2381 return socket.getservbyname(port)
2381 return socket.getservbyname(port)
2382 except socket.error:
2382 except socket.error:
2383 raise Abort(_("no port number associated with service '%s'") % port)
2383 raise Abort(_("no port number associated with service '%s'") % port)
2384
2384
2385 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2385 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2386 '0': False, 'no': False, 'false': False, 'off': False,
2386 '0': False, 'no': False, 'false': False, 'off': False,
2387 'never': False}
2387 'never': False}
2388
2388
2389 def parsebool(s):
2389 def parsebool(s):
2390 """Parse s into a boolean.
2390 """Parse s into a boolean.
2391
2391
2392 If s is not a valid boolean, returns None.
2392 If s is not a valid boolean, returns None.
2393 """
2393 """
2394 return _booleans.get(s.lower(), None)
2394 return _booleans.get(s.lower(), None)
2395
2395
2396 _hextochr = dict((a + b, chr(int(a + b, 16)))
2396 _hextochr = dict((a + b, chr(int(a + b, 16)))
2397 for a in string.hexdigits for b in string.hexdigits)
2397 for a in string.hexdigits for b in string.hexdigits)
2398
2398
2399 class url(object):
2399 class url(object):
2400 r"""Reliable URL parser.
2400 r"""Reliable URL parser.
2401
2401
2402 This parses URLs and provides attributes for the following
2402 This parses URLs and provides attributes for the following
2403 components:
2403 components:
2404
2404
2405 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2405 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2406
2406
2407 Missing components are set to None. The only exception is
2407 Missing components are set to None. The only exception is
2408 fragment, which is set to '' if present but empty.
2408 fragment, which is set to '' if present but empty.
2409
2409
2410 If parsefragment is False, fragment is included in query. If
2410 If parsefragment is False, fragment is included in query. If
2411 parsequery is False, query is included in path. If both are
2411 parsequery is False, query is included in path. If both are
2412 False, both fragment and query are included in path.
2412 False, both fragment and query are included in path.
2413
2413
2414 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2414 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2415
2415
2416 Note that for backward compatibility reasons, bundle URLs do not
2416 Note that for backward compatibility reasons, bundle URLs do not
2417 take host names. That means 'bundle://../' has a path of '../'.
2417 take host names. That means 'bundle://../' has a path of '../'.
2418
2418
2419 Examples:
2419 Examples:
2420
2420
2421 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2421 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2422 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2422 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2423 >>> url('ssh://[::1]:2200//home/joe/repo')
2423 >>> url('ssh://[::1]:2200//home/joe/repo')
2424 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2424 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2425 >>> url('file:///home/joe/repo')
2425 >>> url('file:///home/joe/repo')
2426 <url scheme: 'file', path: '/home/joe/repo'>
2426 <url scheme: 'file', path: '/home/joe/repo'>
2427 >>> url('file:///c:/temp/foo/')
2427 >>> url('file:///c:/temp/foo/')
2428 <url scheme: 'file', path: 'c:/temp/foo/'>
2428 <url scheme: 'file', path: 'c:/temp/foo/'>
2429 >>> url('bundle:foo')
2429 >>> url('bundle:foo')
2430 <url scheme: 'bundle', path: 'foo'>
2430 <url scheme: 'bundle', path: 'foo'>
2431 >>> url('bundle://../foo')
2431 >>> url('bundle://../foo')
2432 <url scheme: 'bundle', path: '../foo'>
2432 <url scheme: 'bundle', path: '../foo'>
2433 >>> url(r'c:\foo\bar')
2433 >>> url(r'c:\foo\bar')
2434 <url path: 'c:\\foo\\bar'>
2434 <url path: 'c:\\foo\\bar'>
2435 >>> url(r'\\blah\blah\blah')
2435 >>> url(r'\\blah\blah\blah')
2436 <url path: '\\\\blah\\blah\\blah'>
2436 <url path: '\\\\blah\\blah\\blah'>
2437 >>> url(r'\\blah\blah\blah#baz')
2437 >>> url(r'\\blah\blah\blah#baz')
2438 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2438 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2439 >>> url(r'file:///C:\users\me')
2439 >>> url(r'file:///C:\users\me')
2440 <url scheme: 'file', path: 'C:\\users\\me'>
2440 <url scheme: 'file', path: 'C:\\users\\me'>
2441
2441
2442 Authentication credentials:
2442 Authentication credentials:
2443
2443
2444 >>> url('ssh://joe:xyz@x/repo')
2444 >>> url('ssh://joe:xyz@x/repo')
2445 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2445 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2446 >>> url('ssh://joe@x/repo')
2446 >>> url('ssh://joe@x/repo')
2447 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2447 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2448
2448
2449 Query strings and fragments:
2449 Query strings and fragments:
2450
2450
2451 >>> url('http://host/a?b#c')
2451 >>> url('http://host/a?b#c')
2452 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2452 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2453 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2453 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2454 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2454 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2455
2455
2456 Empty path:
2456 Empty path:
2457
2457
2458 >>> url('')
2458 >>> url('')
2459 <url path: ''>
2459 <url path: ''>
2460 >>> url('#a')
2460 >>> url('#a')
2461 <url path: '', fragment: 'a'>
2461 <url path: '', fragment: 'a'>
2462 >>> url('http://host/')
2462 >>> url('http://host/')
2463 <url scheme: 'http', host: 'host', path: ''>
2463 <url scheme: 'http', host: 'host', path: ''>
2464 >>> url('http://host/#a')
2464 >>> url('http://host/#a')
2465 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2465 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2466
2466
2467 Only scheme:
2467 Only scheme:
2468
2468
2469 >>> url('http:')
2469 >>> url('http:')
2470 <url scheme: 'http'>
2470 <url scheme: 'http'>
2471 """
2471 """
2472
2472
2473 _safechars = "!~*'()+"
2473 _safechars = "!~*'()+"
2474 _safepchars = "/!~*'()+:\\"
2474 _safepchars = "/!~*'()+:\\"
2475 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2475 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2476
2476
2477 def __init__(self, path, parsequery=True, parsefragment=True):
2477 def __init__(self, path, parsequery=True, parsefragment=True):
2478 # We slowly chomp away at path until we have only the path left
2478 # We slowly chomp away at path until we have only the path left
2479 self.scheme = self.user = self.passwd = self.host = None
2479 self.scheme = self.user = self.passwd = self.host = None
2480 self.port = self.path = self.query = self.fragment = None
2480 self.port = self.path = self.query = self.fragment = None
2481 self._localpath = True
2481 self._localpath = True
2482 self._hostport = ''
2482 self._hostport = ''
2483 self._origpath = path
2483 self._origpath = path
2484
2484
2485 if parsefragment and '#' in path:
2485 if parsefragment and '#' in path:
2486 path, self.fragment = path.split('#', 1)
2486 path, self.fragment = path.split('#', 1)
2487
2487
2488 # special case for Windows drive letters and UNC paths
2488 # special case for Windows drive letters and UNC paths
2489 if hasdriveletter(path) or path.startswith('\\\\'):
2489 if hasdriveletter(path) or path.startswith('\\\\'):
2490 self.path = path
2490 self.path = path
2491 return
2491 return
2492
2492
2493 # For compatibility reasons, we can't handle bundle paths as
2493 # For compatibility reasons, we can't handle bundle paths as
2494 # normal URLS
2494 # normal URLS
2495 if path.startswith('bundle:'):
2495 if path.startswith('bundle:'):
2496 self.scheme = 'bundle'
2496 self.scheme = 'bundle'
2497 path = path[7:]
2497 path = path[7:]
2498 if path.startswith('//'):
2498 if path.startswith('//'):
2499 path = path[2:]
2499 path = path[2:]
2500 self.path = path
2500 self.path = path
2501 return
2501 return
2502
2502
2503 if self._matchscheme(path):
2503 if self._matchscheme(path):
2504 parts = path.split(':', 1)
2504 parts = path.split(':', 1)
2505 if parts[0]:
2505 if parts[0]:
2506 self.scheme, path = parts
2506 self.scheme, path = parts
2507 self._localpath = False
2507 self._localpath = False
2508
2508
2509 if not path:
2509 if not path:
2510 path = None
2510 path = None
2511 if self._localpath:
2511 if self._localpath:
2512 self.path = ''
2512 self.path = ''
2513 return
2513 return
2514 else:
2514 else:
2515 if self._localpath:
2515 if self._localpath:
2516 self.path = path
2516 self.path = path
2517 return
2517 return
2518
2518
2519 if parsequery and '?' in path:
2519 if parsequery and '?' in path:
2520 path, self.query = path.split('?', 1)
2520 path, self.query = path.split('?', 1)
2521 if not path:
2521 if not path:
2522 path = None
2522 path = None
2523 if not self.query:
2523 if not self.query:
2524 self.query = None
2524 self.query = None
2525
2525
2526 # // is required to specify a host/authority
2526 # // is required to specify a host/authority
2527 if path and path.startswith('//'):
2527 if path and path.startswith('//'):
2528 parts = path[2:].split('/', 1)
2528 parts = path[2:].split('/', 1)
2529 if len(parts) > 1:
2529 if len(parts) > 1:
2530 self.host, path = parts
2530 self.host, path = parts
2531 else:
2531 else:
2532 self.host = parts[0]
2532 self.host = parts[0]
2533 path = None
2533 path = None
2534 if not self.host:
2534 if not self.host:
2535 self.host = None
2535 self.host = None
2536 # path of file:///d is /d
2536 # path of file:///d is /d
2537 # path of file:///d:/ is d:/, not /d:/
2537 # path of file:///d:/ is d:/, not /d:/
2538 if path and not hasdriveletter(path):
2538 if path and not hasdriveletter(path):
2539 path = '/' + path
2539 path = '/' + path
2540
2540
2541 if self.host and '@' in self.host:
2541 if self.host and '@' in self.host:
2542 self.user, self.host = self.host.rsplit('@', 1)
2542 self.user, self.host = self.host.rsplit('@', 1)
2543 if ':' in self.user:
2543 if ':' in self.user:
2544 self.user, self.passwd = self.user.split(':', 1)
2544 self.user, self.passwd = self.user.split(':', 1)
2545 if not self.host:
2545 if not self.host:
2546 self.host = None
2546 self.host = None
2547
2547
2548 # Don't split on colons in IPv6 addresses without ports
2548 # Don't split on colons in IPv6 addresses without ports
2549 if (self.host and ':' in self.host and
2549 if (self.host and ':' in self.host and
2550 not (self.host.startswith('[') and self.host.endswith(']'))):
2550 not (self.host.startswith('[') and self.host.endswith(']'))):
2551 self._hostport = self.host
2551 self._hostport = self.host
2552 self.host, self.port = self.host.rsplit(':', 1)
2552 self.host, self.port = self.host.rsplit(':', 1)
2553 if not self.host:
2553 if not self.host:
2554 self.host = None
2554 self.host = None
2555
2555
2556 if (self.host and self.scheme == 'file' and
2556 if (self.host and self.scheme == 'file' and
2557 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2557 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2558 raise Abort(_('file:// URLs can only refer to localhost'))
2558 raise Abort(_('file:// URLs can only refer to localhost'))
2559
2559
2560 self.path = path
2560 self.path = path
2561
2561
2562 # leave the query string escaped
2562 # leave the query string escaped
2563 for a in ('user', 'passwd', 'host', 'port',
2563 for a in ('user', 'passwd', 'host', 'port',
2564 'path', 'fragment'):
2564 'path', 'fragment'):
2565 v = getattr(self, a)
2565 v = getattr(self, a)
2566 if v is not None:
2566 if v is not None:
2567 setattr(self, a, pycompat.urlunquote(v))
2567 setattr(self, a, pycompat.urlunquote(v))
2568
2568
2569 def __repr__(self):
2569 def __repr__(self):
2570 attrs = []
2570 attrs = []
2571 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2571 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2572 'query', 'fragment'):
2572 'query', 'fragment'):
2573 v = getattr(self, a)
2573 v = getattr(self, a)
2574 if v is not None:
2574 if v is not None:
2575 attrs.append('%s: %r' % (a, v))
2575 attrs.append('%s: %r' % (a, v))
2576 return '<url %s>' % ', '.join(attrs)
2576 return '<url %s>' % ', '.join(attrs)
2577
2577
2578 def __str__(self):
2578 def __str__(self):
2579 r"""Join the URL's components back into a URL string.
2579 r"""Join the URL's components back into a URL string.
2580
2580
2581 Examples:
2581 Examples:
2582
2582
2583 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2583 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2584 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2584 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2585 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2585 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2586 'http://user:pw@host:80/?foo=bar&baz=42'
2586 'http://user:pw@host:80/?foo=bar&baz=42'
2587 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2587 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2588 'http://user:pw@host:80/?foo=bar%3dbaz'
2588 'http://user:pw@host:80/?foo=bar%3dbaz'
2589 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2589 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2590 'ssh://user:pw@[::1]:2200//home/joe#'
2590 'ssh://user:pw@[::1]:2200//home/joe#'
2591 >>> str(url('http://localhost:80//'))
2591 >>> str(url('http://localhost:80//'))
2592 'http://localhost:80//'
2592 'http://localhost:80//'
2593 >>> str(url('http://localhost:80/'))
2593 >>> str(url('http://localhost:80/'))
2594 'http://localhost:80/'
2594 'http://localhost:80/'
2595 >>> str(url('http://localhost:80'))
2595 >>> str(url('http://localhost:80'))
2596 'http://localhost:80/'
2596 'http://localhost:80/'
2597 >>> str(url('bundle:foo'))
2597 >>> str(url('bundle:foo'))
2598 'bundle:foo'
2598 'bundle:foo'
2599 >>> str(url('bundle://../foo'))
2599 >>> str(url('bundle://../foo'))
2600 'bundle:../foo'
2600 'bundle:../foo'
2601 >>> str(url('path'))
2601 >>> str(url('path'))
2602 'path'
2602 'path'
2603 >>> str(url('file:///tmp/foo/bar'))
2603 >>> str(url('file:///tmp/foo/bar'))
2604 'file:///tmp/foo/bar'
2604 'file:///tmp/foo/bar'
2605 >>> str(url('file:///c:/tmp/foo/bar'))
2605 >>> str(url('file:///c:/tmp/foo/bar'))
2606 'file:///c:/tmp/foo/bar'
2606 'file:///c:/tmp/foo/bar'
2607 >>> print url(r'bundle:foo\bar')
2607 >>> print url(r'bundle:foo\bar')
2608 bundle:foo\bar
2608 bundle:foo\bar
2609 >>> print url(r'file:///D:\data\hg')
2609 >>> print url(r'file:///D:\data\hg')
2610 file:///D:\data\hg
2610 file:///D:\data\hg
2611 """
2611 """
2612 if self._localpath:
2612 if self._localpath:
2613 s = self.path
2613 s = self.path
2614 if self.scheme == 'bundle':
2614 if self.scheme == 'bundle':
2615 s = 'bundle:' + s
2615 s = 'bundle:' + s
2616 if self.fragment:
2616 if self.fragment:
2617 s += '#' + self.fragment
2617 s += '#' + self.fragment
2618 return s
2618 return s
2619
2619
2620 s = self.scheme + ':'
2620 s = self.scheme + ':'
2621 if self.user or self.passwd or self.host:
2621 if self.user or self.passwd or self.host:
2622 s += '//'
2622 s += '//'
2623 elif self.scheme and (not self.path or self.path.startswith('/')
2623 elif self.scheme and (not self.path or self.path.startswith('/')
2624 or hasdriveletter(self.path)):
2624 or hasdriveletter(self.path)):
2625 s += '//'
2625 s += '//'
2626 if hasdriveletter(self.path):
2626 if hasdriveletter(self.path):
2627 s += '/'
2627 s += '/'
2628 if self.user:
2628 if self.user:
2629 s += urlreq.quote(self.user, safe=self._safechars)
2629 s += urlreq.quote(self.user, safe=self._safechars)
2630 if self.passwd:
2630 if self.passwd:
2631 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2631 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2632 if self.user or self.passwd:
2632 if self.user or self.passwd:
2633 s += '@'
2633 s += '@'
2634 if self.host:
2634 if self.host:
2635 if not (self.host.startswith('[') and self.host.endswith(']')):
2635 if not (self.host.startswith('[') and self.host.endswith(']')):
2636 s += urlreq.quote(self.host)
2636 s += urlreq.quote(self.host)
2637 else:
2637 else:
2638 s += self.host
2638 s += self.host
2639 if self.port:
2639 if self.port:
2640 s += ':' + urlreq.quote(self.port)
2640 s += ':' + urlreq.quote(self.port)
2641 if self.host:
2641 if self.host:
2642 s += '/'
2642 s += '/'
2643 if self.path:
2643 if self.path:
2644 # TODO: similar to the query string, we should not unescape the
2644 # TODO: similar to the query string, we should not unescape the
2645 # path when we store it, the path might contain '%2f' = '/',
2645 # path when we store it, the path might contain '%2f' = '/',
2646 # which we should *not* escape.
2646 # which we should *not* escape.
2647 s += urlreq.quote(self.path, safe=self._safepchars)
2647 s += urlreq.quote(self.path, safe=self._safepchars)
2648 if self.query:
2648 if self.query:
2649 # we store the query in escaped form.
2649 # we store the query in escaped form.
2650 s += '?' + self.query
2650 s += '?' + self.query
2651 if self.fragment is not None:
2651 if self.fragment is not None:
2652 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2652 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2653 return s
2653 return s
2654
2654
2655 def authinfo(self):
2655 def authinfo(self):
2656 user, passwd = self.user, self.passwd
2656 user, passwd = self.user, self.passwd
2657 try:
2657 try:
2658 self.user, self.passwd = None, None
2658 self.user, self.passwd = None, None
2659 s = str(self)
2659 s = str(self)
2660 finally:
2660 finally:
2661 self.user, self.passwd = user, passwd
2661 self.user, self.passwd = user, passwd
2662 if not self.user:
2662 if not self.user:
2663 return (s, None)
2663 return (s, None)
2664 # authinfo[1] is passed to urllib2 password manager, and its
2664 # authinfo[1] is passed to urllib2 password manager, and its
2665 # URIs must not contain credentials. The host is passed in the
2665 # URIs must not contain credentials. The host is passed in the
2666 # URIs list because Python < 2.4.3 uses only that to search for
2666 # URIs list because Python < 2.4.3 uses only that to search for
2667 # a password.
2667 # a password.
2668 return (s, (None, (s, self.host),
2668 return (s, (None, (s, self.host),
2669 self.user, self.passwd or ''))
2669 self.user, self.passwd or ''))
2670
2670
2671 def isabs(self):
2671 def isabs(self):
2672 if self.scheme and self.scheme != 'file':
2672 if self.scheme and self.scheme != 'file':
2673 return True # remote URL
2673 return True # remote URL
2674 if hasdriveletter(self.path):
2674 if hasdriveletter(self.path):
2675 return True # absolute for our purposes - can't be joined()
2675 return True # absolute for our purposes - can't be joined()
2676 if self.path.startswith(r'\\'):
2676 if self.path.startswith(r'\\'):
2677 return True # Windows UNC path
2677 return True # Windows UNC path
2678 if self.path.startswith('/'):
2678 if self.path.startswith('/'):
2679 return True # POSIX-style
2679 return True # POSIX-style
2680 return False
2680 return False
2681
2681
2682 def localpath(self):
2682 def localpath(self):
2683 if self.scheme == 'file' or self.scheme == 'bundle':
2683 if self.scheme == 'file' or self.scheme == 'bundle':
2684 path = self.path or '/'
2684 path = self.path or '/'
2685 # For Windows, we need to promote hosts containing drive
2685 # For Windows, we need to promote hosts containing drive
2686 # letters to paths with drive letters.
2686 # letters to paths with drive letters.
2687 if hasdriveletter(self._hostport):
2687 if hasdriveletter(self._hostport):
2688 path = self._hostport + '/' + self.path
2688 path = self._hostport + '/' + self.path
2689 elif (self.host is not None and self.path
2689 elif (self.host is not None and self.path
2690 and not hasdriveletter(path)):
2690 and not hasdriveletter(path)):
2691 path = '/' + path
2691 path = '/' + path
2692 return path
2692 return path
2693 return self._origpath
2693 return self._origpath
2694
2694
2695 def islocal(self):
2695 def islocal(self):
2696 '''whether localpath will return something that posixfile can open'''
2696 '''whether localpath will return something that posixfile can open'''
2697 return (not self.scheme or self.scheme == 'file'
2697 return (not self.scheme or self.scheme == 'file'
2698 or self.scheme == 'bundle')
2698 or self.scheme == 'bundle')
2699
2699
2700 def hasscheme(path):
2700 def hasscheme(path):
2701 return bool(url(path).scheme)
2701 return bool(url(path).scheme)
2702
2702
2703 def hasdriveletter(path):
2703 def hasdriveletter(path):
2704 return path and path[1:2] == ':' and path[0:1].isalpha()
2704 return path and path[1:2] == ':' and path[0:1].isalpha()
2705
2705
2706 def urllocalpath(path):
2706 def urllocalpath(path):
2707 return url(path, parsequery=False, parsefragment=False).localpath()
2707 return url(path, parsequery=False, parsefragment=False).localpath()
2708
2708
2709 def hidepassword(u):
2709 def hidepassword(u):
2710 '''hide user credential in a url string'''
2710 '''hide user credential in a url string'''
2711 u = url(u)
2711 u = url(u)
2712 if u.passwd:
2712 if u.passwd:
2713 u.passwd = '***'
2713 u.passwd = '***'
2714 return str(u)
2714 return str(u)
2715
2715
2716 def removeauth(u):
2716 def removeauth(u):
2717 '''remove all authentication information from a url string'''
2717 '''remove all authentication information from a url string'''
2718 u = url(u)
2718 u = url(u)
2719 u.user = u.passwd = None
2719 u.user = u.passwd = None
2720 return str(u)
2720 return str(u)
2721
2721
2722 def isatty(fp):
2722 def isatty(fp):
2723 try:
2723 try:
2724 return fp.isatty()
2724 return fp.isatty()
2725 except AttributeError:
2725 except AttributeError:
2726 return False
2726 return False
2727
2727
2728 timecount = unitcountfn(
2728 timecount = unitcountfn(
2729 (1, 1e3, _('%.0f s')),
2729 (1, 1e3, _('%.0f s')),
2730 (100, 1, _('%.1f s')),
2730 (100, 1, _('%.1f s')),
2731 (10, 1, _('%.2f s')),
2731 (10, 1, _('%.2f s')),
2732 (1, 1, _('%.3f s')),
2732 (1, 1, _('%.3f s')),
2733 (100, 0.001, _('%.1f ms')),
2733 (100, 0.001, _('%.1f ms')),
2734 (10, 0.001, _('%.2f ms')),
2734 (10, 0.001, _('%.2f ms')),
2735 (1, 0.001, _('%.3f ms')),
2735 (1, 0.001, _('%.3f ms')),
2736 (100, 0.000001, _('%.1f us')),
2736 (100, 0.000001, _('%.1f us')),
2737 (10, 0.000001, _('%.2f us')),
2737 (10, 0.000001, _('%.2f us')),
2738 (1, 0.000001, _('%.3f us')),
2738 (1, 0.000001, _('%.3f us')),
2739 (100, 0.000000001, _('%.1f ns')),
2739 (100, 0.000000001, _('%.1f ns')),
2740 (10, 0.000000001, _('%.2f ns')),
2740 (10, 0.000000001, _('%.2f ns')),
2741 (1, 0.000000001, _('%.3f ns')),
2741 (1, 0.000000001, _('%.3f ns')),
2742 )
2742 )
2743
2743
2744 _timenesting = [0]
2744 _timenesting = [0]
2745
2745
2746 def timed(func):
2746 def timed(func):
2747 '''Report the execution time of a function call to stderr.
2747 '''Report the execution time of a function call to stderr.
2748
2748
2749 During development, use as a decorator when you need to measure
2749 During development, use as a decorator when you need to measure
2750 the cost of a function, e.g. as follows:
2750 the cost of a function, e.g. as follows:
2751
2751
2752 @util.timed
2752 @util.timed
2753 def foo(a, b, c):
2753 def foo(a, b, c):
2754 pass
2754 pass
2755 '''
2755 '''
2756
2756
2757 def wrapper(*args, **kwargs):
2757 def wrapper(*args, **kwargs):
2758 start = time.time()
2758 start = time.time()
2759 indent = 2
2759 indent = 2
2760 _timenesting[0] += indent
2760 _timenesting[0] += indent
2761 try:
2761 try:
2762 return func(*args, **kwargs)
2762 return func(*args, **kwargs)
2763 finally:
2763 finally:
2764 elapsed = time.time() - start
2764 elapsed = time.time() - start
2765 _timenesting[0] -= indent
2765 _timenesting[0] -= indent
2766 sys.stderr.write('%s%s: %s\n' %
2766 sys.stderr.write('%s%s: %s\n' %
2767 (' ' * _timenesting[0], func.__name__,
2767 (' ' * _timenesting[0], func.__name__,
2768 timecount(elapsed)))
2768 timecount(elapsed)))
2769 return wrapper
2769 return wrapper
2770
2770
2771 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2771 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2772 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2772 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2773
2773
2774 def sizetoint(s):
2774 def sizetoint(s):
2775 '''Convert a space specifier to a byte count.
2775 '''Convert a space specifier to a byte count.
2776
2776
2777 >>> sizetoint('30')
2777 >>> sizetoint('30')
2778 30
2778 30
2779 >>> sizetoint('2.2kb')
2779 >>> sizetoint('2.2kb')
2780 2252
2780 2252
2781 >>> sizetoint('6M')
2781 >>> sizetoint('6M')
2782 6291456
2782 6291456
2783 '''
2783 '''
2784 t = s.strip().lower()
2784 t = s.strip().lower()
2785 try:
2785 try:
2786 for k, u in _sizeunits:
2786 for k, u in _sizeunits:
2787 if t.endswith(k):
2787 if t.endswith(k):
2788 return int(float(t[:-len(k)]) * u)
2788 return int(float(t[:-len(k)]) * u)
2789 return int(t)
2789 return int(t)
2790 except ValueError:
2790 except ValueError:
2791 raise error.ParseError(_("couldn't parse size: %s") % s)
2791 raise error.ParseError(_("couldn't parse size: %s") % s)
2792
2792
2793 class hooks(object):
2793 class hooks(object):
2794 '''A collection of hook functions that can be used to extend a
2794 '''A collection of hook functions that can be used to extend a
2795 function's behavior. Hooks are called in lexicographic order,
2795 function's behavior. Hooks are called in lexicographic order,
2796 based on the names of their sources.'''
2796 based on the names of their sources.'''
2797
2797
2798 def __init__(self):
2798 def __init__(self):
2799 self._hooks = []
2799 self._hooks = []
2800
2800
2801 def add(self, source, hook):
2801 def add(self, source, hook):
2802 self._hooks.append((source, hook))
2802 self._hooks.append((source, hook))
2803
2803
2804 def __call__(self, *args):
2804 def __call__(self, *args):
2805 self._hooks.sort(key=lambda x: x[0])
2805 self._hooks.sort(key=lambda x: x[0])
2806 results = []
2806 results = []
2807 for source, hook in self._hooks:
2807 for source, hook in self._hooks:
2808 results.append(hook(*args))
2808 results.append(hook(*args))
2809 return results
2809 return results
2810
2810
2811 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2811 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2812 '''Yields lines for a nicely formatted stacktrace.
2812 '''Yields lines for a nicely formatted stacktrace.
2813 Skips the 'skip' last entries.
2813 Skips the 'skip' last entries.
2814 Each file+linenumber is formatted according to fileline.
2814 Each file+linenumber is formatted according to fileline.
2815 Each line is formatted according to line.
2815 Each line is formatted according to line.
2816 If line is None, it yields:
2816 If line is None, it yields:
2817 length of longest filepath+line number,
2817 length of longest filepath+line number,
2818 filepath+linenumber,
2818 filepath+linenumber,
2819 function
2819 function
2820
2820
2821 Not be used in production code but very convenient while developing.
2821 Not be used in production code but very convenient while developing.
2822 '''
2822 '''
2823 entries = [(fileline % (fn, ln), func)
2823 entries = [(fileline % (fn, ln), func)
2824 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2824 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2825 if entries:
2825 if entries:
2826 fnmax = max(len(entry[0]) for entry in entries)
2826 fnmax = max(len(entry[0]) for entry in entries)
2827 for fnln, func in entries:
2827 for fnln, func in entries:
2828 if line is None:
2828 if line is None:
2829 yield (fnmax, fnln, func)
2829 yield (fnmax, fnln, func)
2830 else:
2830 else:
2831 yield line % (fnmax, fnln, func)
2831 yield line % (fnmax, fnln, func)
2832
2832
2833 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2833 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2834 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2834 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2835 Skips the 'skip' last entries. By default it will flush stdout first.
2835 Skips the 'skip' last entries. By default it will flush stdout first.
2836 It can be used everywhere and intentionally does not require an ui object.
2836 It can be used everywhere and intentionally does not require an ui object.
2837 Not be used in production code but very convenient while developing.
2837 Not be used in production code but very convenient while developing.
2838 '''
2838 '''
2839 if otherf:
2839 if otherf:
2840 otherf.flush()
2840 otherf.flush()
2841 f.write('%s at:\n' % msg)
2841 f.write('%s at:\n' % msg)
2842 for line in getstackframes(skip + 1):
2842 for line in getstackframes(skip + 1):
2843 f.write(line)
2843 f.write(line)
2844 f.flush()
2844 f.flush()
2845
2845
2846 class dirs(object):
2846 class dirs(object):
2847 '''a multiset of directory names from a dirstate or manifest'''
2847 '''a multiset of directory names from a dirstate or manifest'''
2848
2848
2849 def __init__(self, map, skip=None):
2849 def __init__(self, map, skip=None):
2850 self._dirs = {}
2850 self._dirs = {}
2851 addpath = self.addpath
2851 addpath = self.addpath
2852 if safehasattr(map, 'iteritems') and skip is not None:
2852 if safehasattr(map, 'iteritems') and skip is not None:
2853 for f, s in map.iteritems():
2853 for f, s in map.iteritems():
2854 if s[0] != skip:
2854 if s[0] != skip:
2855 addpath(f)
2855 addpath(f)
2856 else:
2856 else:
2857 for f in map:
2857 for f in map:
2858 addpath(f)
2858 addpath(f)
2859
2859
2860 def addpath(self, path):
2860 def addpath(self, path):
2861 dirs = self._dirs
2861 dirs = self._dirs
2862 for base in finddirs(path):
2862 for base in finddirs(path):
2863 if base in dirs:
2863 if base in dirs:
2864 dirs[base] += 1
2864 dirs[base] += 1
2865 return
2865 return
2866 dirs[base] = 1
2866 dirs[base] = 1
2867
2867
2868 def delpath(self, path):
2868 def delpath(self, path):
2869 dirs = self._dirs
2869 dirs = self._dirs
2870 for base in finddirs(path):
2870 for base in finddirs(path):
2871 if dirs[base] > 1:
2871 if dirs[base] > 1:
2872 dirs[base] -= 1
2872 dirs[base] -= 1
2873 return
2873 return
2874 del dirs[base]
2874 del dirs[base]
2875
2875
2876 def __iter__(self):
2876 def __iter__(self):
2877 return self._dirs.iterkeys()
2877 return self._dirs.iterkeys()
2878
2878
2879 def __contains__(self, d):
2879 def __contains__(self, d):
2880 return d in self._dirs
2880 return d in self._dirs
2881
2881
2882 if safehasattr(parsers, 'dirs'):
2882 if safehasattr(parsers, 'dirs'):
2883 dirs = parsers.dirs
2883 dirs = parsers.dirs
2884
2884
2885 def finddirs(path):
2885 def finddirs(path):
2886 pos = path.rfind('/')
2886 pos = path.rfind('/')
2887 while pos != -1:
2887 while pos != -1:
2888 yield path[:pos]
2888 yield path[:pos]
2889 pos = path.rfind('/', 0, pos)
2889 pos = path.rfind('/', 0, pos)
2890
2890
2891 class ctxmanager(object):
2891 class ctxmanager(object):
2892 '''A context manager for use in 'with' blocks to allow multiple
2892 '''A context manager for use in 'with' blocks to allow multiple
2893 contexts to be entered at once. This is both safer and more
2893 contexts to be entered at once. This is both safer and more
2894 flexible than contextlib.nested.
2894 flexible than contextlib.nested.
2895
2895
2896 Once Mercurial supports Python 2.7+, this will become mostly
2896 Once Mercurial supports Python 2.7+, this will become mostly
2897 unnecessary.
2897 unnecessary.
2898 '''
2898 '''
2899
2899
2900 def __init__(self, *args):
2900 def __init__(self, *args):
2901 '''Accepts a list of no-argument functions that return context
2901 '''Accepts a list of no-argument functions that return context
2902 managers. These will be invoked at __call__ time.'''
2902 managers. These will be invoked at __call__ time.'''
2903 self._pending = args
2903 self._pending = args
2904 self._atexit = []
2904 self._atexit = []
2905
2905
2906 def __enter__(self):
2906 def __enter__(self):
2907 return self
2907 return self
2908
2908
2909 def enter(self):
2909 def enter(self):
2910 '''Create and enter context managers in the order in which they were
2910 '''Create and enter context managers in the order in which they were
2911 passed to the constructor.'''
2911 passed to the constructor.'''
2912 values = []
2912 values = []
2913 for func in self._pending:
2913 for func in self._pending:
2914 obj = func()
2914 obj = func()
2915 values.append(obj.__enter__())
2915 values.append(obj.__enter__())
2916 self._atexit.append(obj.__exit__)
2916 self._atexit.append(obj.__exit__)
2917 del self._pending
2917 del self._pending
2918 return values
2918 return values
2919
2919
2920 def atexit(self, func, *args, **kwargs):
2920 def atexit(self, func, *args, **kwargs):
2921 '''Add a function to call when this context manager exits. The
2921 '''Add a function to call when this context manager exits. The
2922 ordering of multiple atexit calls is unspecified, save that
2922 ordering of multiple atexit calls is unspecified, save that
2923 they will happen before any __exit__ functions.'''
2923 they will happen before any __exit__ functions.'''
2924 def wrapper(exc_type, exc_val, exc_tb):
2924 def wrapper(exc_type, exc_val, exc_tb):
2925 func(*args, **kwargs)
2925 func(*args, **kwargs)
2926 self._atexit.append(wrapper)
2926 self._atexit.append(wrapper)
2927 return func
2927 return func
2928
2928
2929 def __exit__(self, exc_type, exc_val, exc_tb):
2929 def __exit__(self, exc_type, exc_val, exc_tb):
2930 '''Context managers are exited in the reverse order from which
2930 '''Context managers are exited in the reverse order from which
2931 they were created.'''
2931 they were created.'''
2932 received = exc_type is not None
2932 received = exc_type is not None
2933 suppressed = False
2933 suppressed = False
2934 pending = None
2934 pending = None
2935 self._atexit.reverse()
2935 self._atexit.reverse()
2936 for exitfunc in self._atexit:
2936 for exitfunc in self._atexit:
2937 try:
2937 try:
2938 if exitfunc(exc_type, exc_val, exc_tb):
2938 if exitfunc(exc_type, exc_val, exc_tb):
2939 suppressed = True
2939 suppressed = True
2940 exc_type = None
2940 exc_type = None
2941 exc_val = None
2941 exc_val = None
2942 exc_tb = None
2942 exc_tb = None
2943 except BaseException:
2943 except BaseException:
2944 pending = sys.exc_info()
2944 pending = sys.exc_info()
2945 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2945 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2946 del self._atexit
2946 del self._atexit
2947 if pending:
2947 if pending:
2948 raise exc_val
2948 raise exc_val
2949 return received and suppressed
2949 return received and suppressed
2950
2950
2951 # compression code
2951 # compression code
2952
2952
2953 class compressormanager(object):
2953 class compressormanager(object):
2954 """Holds registrations of various compression engines.
2954 """Holds registrations of various compression engines.
2955
2955
2956 This class essentially abstracts the differences between compression
2956 This class essentially abstracts the differences between compression
2957 engines to allow new compression formats to be added easily, possibly from
2957 engines to allow new compression formats to be added easily, possibly from
2958 extensions.
2958 extensions.
2959
2959
2960 Compressors are registered against the global instance by calling its
2960 Compressors are registered against the global instance by calling its
2961 ``register()`` method.
2961 ``register()`` method.
2962 """
2962 """
2963 def __init__(self):
2963 def __init__(self):
2964 self._engines = {}
2964 self._engines = {}
2965 # Bundle spec human name to engine name.
2965 # Bundle spec human name to engine name.
2966 self._bundlenames = {}
2966 self._bundlenames = {}
2967 # Internal bundle identifier to engine name.
2967 # Internal bundle identifier to engine name.
2968 self._bundletypes = {}
2968 self._bundletypes = {}
2969
2969
2970 def __getitem__(self, key):
2970 def __getitem__(self, key):
2971 return self._engines[key]
2971 return self._engines[key]
2972
2972
2973 def __contains__(self, key):
2973 def __contains__(self, key):
2974 return key in self._engines
2974 return key in self._engines
2975
2975
2976 def __iter__(self):
2976 def __iter__(self):
2977 return iter(self._engines.keys())
2977 return iter(self._engines.keys())
2978
2978
2979 def register(self, engine):
2979 def register(self, engine):
2980 """Register a compression engine with the manager.
2980 """Register a compression engine with the manager.
2981
2981
2982 The argument must be a ``compressionengine`` instance.
2982 The argument must be a ``compressionengine`` instance.
2983 """
2983 """
2984 if not isinstance(engine, compressionengine):
2984 if not isinstance(engine, compressionengine):
2985 raise ValueError(_('argument must be a compressionengine'))
2985 raise ValueError(_('argument must be a compressionengine'))
2986
2986
2987 name = engine.name()
2987 name = engine.name()
2988
2988
2989 if name in self._engines:
2989 if name in self._engines:
2990 raise error.Abort(_('compression engine %s already registered') %
2990 raise error.Abort(_('compression engine %s already registered') %
2991 name)
2991 name)
2992
2992
2993 bundleinfo = engine.bundletype()
2993 bundleinfo = engine.bundletype()
2994 if bundleinfo:
2994 if bundleinfo:
2995 bundlename, bundletype = bundleinfo
2995 bundlename, bundletype = bundleinfo
2996
2996
2997 if bundlename in self._bundlenames:
2997 if bundlename in self._bundlenames:
2998 raise error.Abort(_('bundle name %s already registered') %
2998 raise error.Abort(_('bundle name %s already registered') %
2999 bundlename)
2999 bundlename)
3000 if bundletype in self._bundletypes:
3000 if bundletype in self._bundletypes:
3001 raise error.Abort(_('bundle type %s already registered by %s') %
3001 raise error.Abort(_('bundle type %s already registered by %s') %
3002 (bundletype, self._bundletypes[bundletype]))
3002 (bundletype, self._bundletypes[bundletype]))
3003
3003
3004 # No external facing name declared.
3004 # No external facing name declared.
3005 if bundlename:
3005 if bundlename:
3006 self._bundlenames[bundlename] = name
3006 self._bundlenames[bundlename] = name
3007
3007
3008 self._bundletypes[bundletype] = name
3008 self._bundletypes[bundletype] = name
3009
3009
3010 self._engines[name] = engine
3010 self._engines[name] = engine
3011
3011
3012 @property
3012 @property
3013 def supportedbundlenames(self):
3013 def supportedbundlenames(self):
3014 return set(self._bundlenames.keys())
3014 return set(self._bundlenames.keys())
3015
3015
3016 @property
3016 @property
3017 def supportedbundletypes(self):
3017 def supportedbundletypes(self):
3018 return set(self._bundletypes.keys())
3018 return set(self._bundletypes.keys())
3019
3019
3020 def forbundlename(self, bundlename):
3020 def forbundlename(self, bundlename):
3021 """Obtain a compression engine registered to a bundle name.
3021 """Obtain a compression engine registered to a bundle name.
3022
3022
3023 Will raise KeyError if the bundle type isn't registered.
3023 Will raise KeyError if the bundle type isn't registered.
3024
3025 Will abort if the engine is known but not available.
3024 """
3026 """
3025 return self._engines[self._bundlenames[bundlename]]
3027 engine = self._engines[self._bundlenames[bundlename]]
3028 if not engine.available():
3029 raise error.Abort(_('compression engine %s could not be loaded') %
3030 engine.name())
3031 return engine
3026
3032
3027 def forbundletype(self, bundletype):
3033 def forbundletype(self, bundletype):
3028 """Obtain a compression engine registered to a bundle type.
3034 """Obtain a compression engine registered to a bundle type.
3029
3035
3030 Will raise KeyError if the bundle type isn't registered.
3036 Will raise KeyError if the bundle type isn't registered.
3037
3038 Will abort if the engine is known but not available.
3031 """
3039 """
3032 return self._engines[self._bundletypes[bundletype]]
3040 engine = self._engines[self._bundletypes[bundletype]]
3041 if not engine.available():
3042 raise error.Abort(_('compression engine %s could not be loaded') %
3043 engine.name())
3044 return engine
3033
3045
3034 compengines = compressormanager()
3046 compengines = compressormanager()
3035
3047
3036 class compressionengine(object):
3048 class compressionengine(object):
3037 """Base class for compression engines.
3049 """Base class for compression engines.
3038
3050
3039 Compression engines must implement the interface defined by this class.
3051 Compression engines must implement the interface defined by this class.
3040 """
3052 """
3041 def name(self):
3053 def name(self):
3042 """Returns the name of the compression engine.
3054 """Returns the name of the compression engine.
3043
3055
3044 This is the key the engine is registered under.
3056 This is the key the engine is registered under.
3045
3057
3046 This method must be implemented.
3058 This method must be implemented.
3047 """
3059 """
3048 raise NotImplementedError()
3060 raise NotImplementedError()
3049
3061
3050 def available(self):
3062 def available(self):
3051 """Whether the compression engine is available.
3063 """Whether the compression engine is available.
3052
3064
3053 The intent of this method is to allow optional compression engines
3065 The intent of this method is to allow optional compression engines
3054 that may not be available in all installations (such as engines relying
3066 that may not be available in all installations (such as engines relying
3055 on C extensions that may not be present).
3067 on C extensions that may not be present).
3056 """
3068 """
3057 return True
3069 return True
3058
3070
3059 def bundletype(self):
3071 def bundletype(self):
3060 """Describes bundle identifiers for this engine.
3072 """Describes bundle identifiers for this engine.
3061
3073
3062 If this compression engine isn't supported for bundles, returns None.
3074 If this compression engine isn't supported for bundles, returns None.
3063
3075
3064 If this engine can be used for bundles, returns a 2-tuple of strings of
3076 If this engine can be used for bundles, returns a 2-tuple of strings of
3065 the user-facing "bundle spec" compression name and an internal
3077 the user-facing "bundle spec" compression name and an internal
3066 identifier used to denote the compression format within bundles. To
3078 identifier used to denote the compression format within bundles. To
3067 exclude the name from external usage, set the first element to ``None``.
3079 exclude the name from external usage, set the first element to ``None``.
3068
3080
3069 If bundle compression is supported, the class must also implement
3081 If bundle compression is supported, the class must also implement
3070 ``compressstream`` and `decompressorreader``.
3082 ``compressstream`` and `decompressorreader``.
3071 """
3083 """
3072 return None
3084 return None
3073
3085
3074 def compressstream(self, it, opts=None):
3086 def compressstream(self, it, opts=None):
3075 """Compress an iterator of chunks.
3087 """Compress an iterator of chunks.
3076
3088
3077 The method receives an iterator (ideally a generator) of chunks of
3089 The method receives an iterator (ideally a generator) of chunks of
3078 bytes to be compressed. It returns an iterator (ideally a generator)
3090 bytes to be compressed. It returns an iterator (ideally a generator)
3079 of bytes of chunks representing the compressed output.
3091 of bytes of chunks representing the compressed output.
3080
3092
3081 Optionally accepts an argument defining how to perform compression.
3093 Optionally accepts an argument defining how to perform compression.
3082 Each engine treats this argument differently.
3094 Each engine treats this argument differently.
3083 """
3095 """
3084 raise NotImplementedError()
3096 raise NotImplementedError()
3085
3097
3086 def decompressorreader(self, fh):
3098 def decompressorreader(self, fh):
3087 """Perform decompression on a file object.
3099 """Perform decompression on a file object.
3088
3100
3089 Argument is an object with a ``read(size)`` method that returns
3101 Argument is an object with a ``read(size)`` method that returns
3090 compressed data. Return value is an object with a ``read(size)`` that
3102 compressed data. Return value is an object with a ``read(size)`` that
3091 returns uncompressed data.
3103 returns uncompressed data.
3092 """
3104 """
3093 raise NotImplementedError()
3105 raise NotImplementedError()
3094
3106
3095 class _zlibengine(compressionengine):
3107 class _zlibengine(compressionengine):
3096 def name(self):
3108 def name(self):
3097 return 'zlib'
3109 return 'zlib'
3098
3110
3099 def bundletype(self):
3111 def bundletype(self):
3100 return 'gzip', 'GZ'
3112 return 'gzip', 'GZ'
3101
3113
3102 def compressstream(self, it, opts=None):
3114 def compressstream(self, it, opts=None):
3103 opts = opts or {}
3115 opts = opts or {}
3104
3116
3105 z = zlib.compressobj(opts.get('level', -1))
3117 z = zlib.compressobj(opts.get('level', -1))
3106 for chunk in it:
3118 for chunk in it:
3107 data = z.compress(chunk)
3119 data = z.compress(chunk)
3108 # Not all calls to compress emit data. It is cheaper to inspect
3120 # Not all calls to compress emit data. It is cheaper to inspect
3109 # here than to feed empty chunks through generator.
3121 # here than to feed empty chunks through generator.
3110 if data:
3122 if data:
3111 yield data
3123 yield data
3112
3124
3113 yield z.flush()
3125 yield z.flush()
3114
3126
3115 def decompressorreader(self, fh):
3127 def decompressorreader(self, fh):
3116 def gen():
3128 def gen():
3117 d = zlib.decompressobj()
3129 d = zlib.decompressobj()
3118 for chunk in filechunkiter(fh):
3130 for chunk in filechunkiter(fh):
3119 yield d.decompress(chunk)
3131 yield d.decompress(chunk)
3120
3132
3121 return chunkbuffer(gen())
3133 return chunkbuffer(gen())
3122
3134
3123 compengines.register(_zlibengine())
3135 compengines.register(_zlibengine())
3124
3136
3125 class _bz2engine(compressionengine):
3137 class _bz2engine(compressionengine):
3126 def name(self):
3138 def name(self):
3127 return 'bz2'
3139 return 'bz2'
3128
3140
3129 def bundletype(self):
3141 def bundletype(self):
3130 return 'bzip2', 'BZ'
3142 return 'bzip2', 'BZ'
3131
3143
3132 def compressstream(self, it, opts=None):
3144 def compressstream(self, it, opts=None):
3133 opts = opts or {}
3145 opts = opts or {}
3134 z = bz2.BZ2Compressor(opts.get('level', 9))
3146 z = bz2.BZ2Compressor(opts.get('level', 9))
3135 for chunk in it:
3147 for chunk in it:
3136 data = z.compress(chunk)
3148 data = z.compress(chunk)
3137 if data:
3149 if data:
3138 yield data
3150 yield data
3139
3151
3140 yield z.flush()
3152 yield z.flush()
3141
3153
3142 def decompressorreader(self, fh):
3154 def decompressorreader(self, fh):
3143 def gen():
3155 def gen():
3144 d = bz2.BZ2Decompressor()
3156 d = bz2.BZ2Decompressor()
3145 for chunk in filechunkiter(fh):
3157 for chunk in filechunkiter(fh):
3146 yield d.decompress(chunk)
3158 yield d.decompress(chunk)
3147
3159
3148 return chunkbuffer(gen())
3160 return chunkbuffer(gen())
3149
3161
3150 compengines.register(_bz2engine())
3162 compengines.register(_bz2engine())
3151
3163
3152 class _truncatedbz2engine(compressionengine):
3164 class _truncatedbz2engine(compressionengine):
3153 def name(self):
3165 def name(self):
3154 return 'bz2truncated'
3166 return 'bz2truncated'
3155
3167
3156 def bundletype(self):
3168 def bundletype(self):
3157 return None, '_truncatedBZ'
3169 return None, '_truncatedBZ'
3158
3170
3159 # We don't implement compressstream because it is hackily handled elsewhere.
3171 # We don't implement compressstream because it is hackily handled elsewhere.
3160
3172
3161 def decompressorreader(self, fh):
3173 def decompressorreader(self, fh):
3162 def gen():
3174 def gen():
3163 # The input stream doesn't have the 'BZ' header. So add it back.
3175 # The input stream doesn't have the 'BZ' header. So add it back.
3164 d = bz2.BZ2Decompressor()
3176 d = bz2.BZ2Decompressor()
3165 d.decompress('BZ')
3177 d.decompress('BZ')
3166 for chunk in filechunkiter(fh):
3178 for chunk in filechunkiter(fh):
3167 yield d.decompress(chunk)
3179 yield d.decompress(chunk)
3168
3180
3169 return chunkbuffer(gen())
3181 return chunkbuffer(gen())
3170
3182
3171 compengines.register(_truncatedbz2engine())
3183 compengines.register(_truncatedbz2engine())
3172
3184
3173 class _noopengine(compressionengine):
3185 class _noopengine(compressionengine):
3174 def name(self):
3186 def name(self):
3175 return 'none'
3187 return 'none'
3176
3188
3177 def bundletype(self):
3189 def bundletype(self):
3178 return 'none', 'UN'
3190 return 'none', 'UN'
3179
3191
3180 def compressstream(self, it, opts=None):
3192 def compressstream(self, it, opts=None):
3181 return it
3193 return it
3182
3194
3183 def decompressorreader(self, fh):
3195 def decompressorreader(self, fh):
3184 return fh
3196 return fh
3185
3197
3186 compengines.register(_noopengine())
3198 compengines.register(_noopengine())
3187
3199
3188 # convenient shortcut
3200 # convenient shortcut
3189 dst = debugstacktrace
3201 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now