##// END OF EJS Templates
util: replace file I/O with readfile
Bryan O'Sullivan -
r27768:5ef99738 default
parent child Browse files
Show More
@@ -1,2728 +1,2725
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import urllib
37 import urllib
38 import zlib
38 import zlib
39
39
40 from . import (
40 from . import (
41 encoding,
41 encoding,
42 error,
42 error,
43 i18n,
43 i18n,
44 osutil,
44 osutil,
45 parsers,
45 parsers,
46 )
46 )
47
47
48 if os.name == 'nt':
48 if os.name == 'nt':
49 from . import windows as platform
49 from . import windows as platform
50 else:
50 else:
51 from . import posix as platform
51 from . import posix as platform
52
52
53 md5 = hashlib.md5
53 md5 = hashlib.md5
54 sha1 = hashlib.sha1
54 sha1 = hashlib.sha1
55 sha512 = hashlib.sha512
55 sha512 = hashlib.sha512
56 _ = i18n._
56 _ = i18n._
57
57
58 cachestat = platform.cachestat
58 cachestat = platform.cachestat
59 checkexec = platform.checkexec
59 checkexec = platform.checkexec
60 checklink = platform.checklink
60 checklink = platform.checklink
61 copymode = platform.copymode
61 copymode = platform.copymode
62 executablepath = platform.executablepath
62 executablepath = platform.executablepath
63 expandglobs = platform.expandglobs
63 expandglobs = platform.expandglobs
64 explainexit = platform.explainexit
64 explainexit = platform.explainexit
65 findexe = platform.findexe
65 findexe = platform.findexe
66 gethgcmd = platform.gethgcmd
66 gethgcmd = platform.gethgcmd
67 getuser = platform.getuser
67 getuser = platform.getuser
68 groupmembers = platform.groupmembers
68 groupmembers = platform.groupmembers
69 groupname = platform.groupname
69 groupname = platform.groupname
70 hidewindow = platform.hidewindow
70 hidewindow = platform.hidewindow
71 isexec = platform.isexec
71 isexec = platform.isexec
72 isowner = platform.isowner
72 isowner = platform.isowner
73 localpath = platform.localpath
73 localpath = platform.localpath
74 lookupreg = platform.lookupreg
74 lookupreg = platform.lookupreg
75 makedir = platform.makedir
75 makedir = platform.makedir
76 nlinks = platform.nlinks
76 nlinks = platform.nlinks
77 normpath = platform.normpath
77 normpath = platform.normpath
78 normcase = platform.normcase
78 normcase = platform.normcase
79 normcasespec = platform.normcasespec
79 normcasespec = platform.normcasespec
80 normcasefallback = platform.normcasefallback
80 normcasefallback = platform.normcasefallback
81 openhardlinks = platform.openhardlinks
81 openhardlinks = platform.openhardlinks
82 oslink = platform.oslink
82 oslink = platform.oslink
83 parsepatchoutput = platform.parsepatchoutput
83 parsepatchoutput = platform.parsepatchoutput
84 pconvert = platform.pconvert
84 pconvert = platform.pconvert
85 poll = platform.poll
85 poll = platform.poll
86 popen = platform.popen
86 popen = platform.popen
87 posixfile = platform.posixfile
87 posixfile = platform.posixfile
88 quotecommand = platform.quotecommand
88 quotecommand = platform.quotecommand
89 readpipe = platform.readpipe
89 readpipe = platform.readpipe
90 rename = platform.rename
90 rename = platform.rename
91 removedirs = platform.removedirs
91 removedirs = platform.removedirs
92 samedevice = platform.samedevice
92 samedevice = platform.samedevice
93 samefile = platform.samefile
93 samefile = platform.samefile
94 samestat = platform.samestat
94 samestat = platform.samestat
95 setbinary = platform.setbinary
95 setbinary = platform.setbinary
96 setflags = platform.setflags
96 setflags = platform.setflags
97 setsignalhandler = platform.setsignalhandler
97 setsignalhandler = platform.setsignalhandler
98 shellquote = platform.shellquote
98 shellquote = platform.shellquote
99 spawndetached = platform.spawndetached
99 spawndetached = platform.spawndetached
100 split = platform.split
100 split = platform.split
101 sshargs = platform.sshargs
101 sshargs = platform.sshargs
102 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
102 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
103 statisexec = platform.statisexec
103 statisexec = platform.statisexec
104 statislink = platform.statislink
104 statislink = platform.statislink
105 termwidth = platform.termwidth
105 termwidth = platform.termwidth
106 testpid = platform.testpid
106 testpid = platform.testpid
107 umask = platform.umask
107 umask = platform.umask
108 unlink = platform.unlink
108 unlink = platform.unlink
109 unlinkpath = platform.unlinkpath
109 unlinkpath = platform.unlinkpath
110 username = platform.username
110 username = platform.username
111
111
112 # Python compatibility
112 # Python compatibility
113
113
114 _notset = object()
114 _notset = object()
115
115
116 # disable Python's problematic floating point timestamps (issue4836)
116 # disable Python's problematic floating point timestamps (issue4836)
117 # (Python hypocritically says you shouldn't change this behavior in
117 # (Python hypocritically says you shouldn't change this behavior in
118 # libraries, and sure enough Mercurial is not a library.)
118 # libraries, and sure enough Mercurial is not a library.)
119 os.stat_float_times(False)
119 os.stat_float_times(False)
120
120
121 def safehasattr(thing, attr):
121 def safehasattr(thing, attr):
122 return getattr(thing, attr, _notset) is not _notset
122 return getattr(thing, attr, _notset) is not _notset
123
123
124 DIGESTS = {
124 DIGESTS = {
125 'md5': md5,
125 'md5': md5,
126 'sha1': sha1,
126 'sha1': sha1,
127 'sha512': sha512,
127 'sha512': sha512,
128 }
128 }
129 # List of digest types from strongest to weakest
129 # List of digest types from strongest to weakest
130 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
130 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
131
131
132 for k in DIGESTS_BY_STRENGTH:
132 for k in DIGESTS_BY_STRENGTH:
133 assert k in DIGESTS
133 assert k in DIGESTS
134
134
135 class digester(object):
135 class digester(object):
136 """helper to compute digests.
136 """helper to compute digests.
137
137
138 This helper can be used to compute one or more digests given their name.
138 This helper can be used to compute one or more digests given their name.
139
139
140 >>> d = digester(['md5', 'sha1'])
140 >>> d = digester(['md5', 'sha1'])
141 >>> d.update('foo')
141 >>> d.update('foo')
142 >>> [k for k in sorted(d)]
142 >>> [k for k in sorted(d)]
143 ['md5', 'sha1']
143 ['md5', 'sha1']
144 >>> d['md5']
144 >>> d['md5']
145 'acbd18db4cc2f85cedef654fccc4a4d8'
145 'acbd18db4cc2f85cedef654fccc4a4d8'
146 >>> d['sha1']
146 >>> d['sha1']
147 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
147 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
148 >>> digester.preferred(['md5', 'sha1'])
148 >>> digester.preferred(['md5', 'sha1'])
149 'sha1'
149 'sha1'
150 """
150 """
151
151
152 def __init__(self, digests, s=''):
152 def __init__(self, digests, s=''):
153 self._hashes = {}
153 self._hashes = {}
154 for k in digests:
154 for k in digests:
155 if k not in DIGESTS:
155 if k not in DIGESTS:
156 raise Abort(_('unknown digest type: %s') % k)
156 raise Abort(_('unknown digest type: %s') % k)
157 self._hashes[k] = DIGESTS[k]()
157 self._hashes[k] = DIGESTS[k]()
158 if s:
158 if s:
159 self.update(s)
159 self.update(s)
160
160
161 def update(self, data):
161 def update(self, data):
162 for h in self._hashes.values():
162 for h in self._hashes.values():
163 h.update(data)
163 h.update(data)
164
164
165 def __getitem__(self, key):
165 def __getitem__(self, key):
166 if key not in DIGESTS:
166 if key not in DIGESTS:
167 raise Abort(_('unknown digest type: %s') % k)
167 raise Abort(_('unknown digest type: %s') % k)
168 return self._hashes[key].hexdigest()
168 return self._hashes[key].hexdigest()
169
169
170 def __iter__(self):
170 def __iter__(self):
171 return iter(self._hashes)
171 return iter(self._hashes)
172
172
173 @staticmethod
173 @staticmethod
174 def preferred(supported):
174 def preferred(supported):
175 """returns the strongest digest type in both supported and DIGESTS."""
175 """returns the strongest digest type in both supported and DIGESTS."""
176
176
177 for k in DIGESTS_BY_STRENGTH:
177 for k in DIGESTS_BY_STRENGTH:
178 if k in supported:
178 if k in supported:
179 return k
179 return k
180 return None
180 return None
181
181
182 class digestchecker(object):
182 class digestchecker(object):
183 """file handle wrapper that additionally checks content against a given
183 """file handle wrapper that additionally checks content against a given
184 size and digests.
184 size and digests.
185
185
186 d = digestchecker(fh, size, {'md5': '...'})
186 d = digestchecker(fh, size, {'md5': '...'})
187
187
188 When multiple digests are given, all of them are validated.
188 When multiple digests are given, all of them are validated.
189 """
189 """
190
190
191 def __init__(self, fh, size, digests):
191 def __init__(self, fh, size, digests):
192 self._fh = fh
192 self._fh = fh
193 self._size = size
193 self._size = size
194 self._got = 0
194 self._got = 0
195 self._digests = dict(digests)
195 self._digests = dict(digests)
196 self._digester = digester(self._digests.keys())
196 self._digester = digester(self._digests.keys())
197
197
198 def read(self, length=-1):
198 def read(self, length=-1):
199 content = self._fh.read(length)
199 content = self._fh.read(length)
200 self._digester.update(content)
200 self._digester.update(content)
201 self._got += len(content)
201 self._got += len(content)
202 return content
202 return content
203
203
204 def validate(self):
204 def validate(self):
205 if self._size != self._got:
205 if self._size != self._got:
206 raise Abort(_('size mismatch: expected %d, got %d') %
206 raise Abort(_('size mismatch: expected %d, got %d') %
207 (self._size, self._got))
207 (self._size, self._got))
208 for k, v in self._digests.items():
208 for k, v in self._digests.items():
209 if v != self._digester[k]:
209 if v != self._digester[k]:
210 # i18n: first parameter is a digest name
210 # i18n: first parameter is a digest name
211 raise Abort(_('%s mismatch: expected %s, got %s') %
211 raise Abort(_('%s mismatch: expected %s, got %s') %
212 (k, v, self._digester[k]))
212 (k, v, self._digester[k]))
213
213
214 try:
214 try:
215 buffer = buffer
215 buffer = buffer
216 except NameError:
216 except NameError:
217 if sys.version_info[0] < 3:
217 if sys.version_info[0] < 3:
218 def buffer(sliceable, offset=0):
218 def buffer(sliceable, offset=0):
219 return sliceable[offset:]
219 return sliceable[offset:]
220 else:
220 else:
221 def buffer(sliceable, offset=0):
221 def buffer(sliceable, offset=0):
222 return memoryview(sliceable)[offset:]
222 return memoryview(sliceable)[offset:]
223
223
224 closefds = os.name == 'posix'
224 closefds = os.name == 'posix'
225
225
226 _chunksize = 4096
226 _chunksize = 4096
227
227
228 class bufferedinputpipe(object):
228 class bufferedinputpipe(object):
229 """a manually buffered input pipe
229 """a manually buffered input pipe
230
230
231 Python will not let us use buffered IO and lazy reading with 'polling' at
231 Python will not let us use buffered IO and lazy reading with 'polling' at
232 the same time. We cannot probe the buffer state and select will not detect
232 the same time. We cannot probe the buffer state and select will not detect
233 that data are ready to read if they are already buffered.
233 that data are ready to read if they are already buffered.
234
234
235 This class let us work around that by implementing its own buffering
235 This class let us work around that by implementing its own buffering
236 (allowing efficient readline) while offering a way to know if the buffer is
236 (allowing efficient readline) while offering a way to know if the buffer is
237 empty from the output (allowing collaboration of the buffer with polling).
237 empty from the output (allowing collaboration of the buffer with polling).
238
238
239 This class lives in the 'util' module because it makes use of the 'os'
239 This class lives in the 'util' module because it makes use of the 'os'
240 module from the python stdlib.
240 module from the python stdlib.
241 """
241 """
242
242
243 def __init__(self, input):
243 def __init__(self, input):
244 self._input = input
244 self._input = input
245 self._buffer = []
245 self._buffer = []
246 self._eof = False
246 self._eof = False
247 self._lenbuf = 0
247 self._lenbuf = 0
248
248
249 @property
249 @property
250 def hasbuffer(self):
250 def hasbuffer(self):
251 """True is any data is currently buffered
251 """True is any data is currently buffered
252
252
253 This will be used externally a pre-step for polling IO. If there is
253 This will be used externally a pre-step for polling IO. If there is
254 already data then no polling should be set in place."""
254 already data then no polling should be set in place."""
255 return bool(self._buffer)
255 return bool(self._buffer)
256
256
257 @property
257 @property
258 def closed(self):
258 def closed(self):
259 return self._input.closed
259 return self._input.closed
260
260
261 def fileno(self):
261 def fileno(self):
262 return self._input.fileno()
262 return self._input.fileno()
263
263
264 def close(self):
264 def close(self):
265 return self._input.close()
265 return self._input.close()
266
266
267 def read(self, size):
267 def read(self, size):
268 while (not self._eof) and (self._lenbuf < size):
268 while (not self._eof) and (self._lenbuf < size):
269 self._fillbuffer()
269 self._fillbuffer()
270 return self._frombuffer(size)
270 return self._frombuffer(size)
271
271
272 def readline(self, *args, **kwargs):
272 def readline(self, *args, **kwargs):
273 if 1 < len(self._buffer):
273 if 1 < len(self._buffer):
274 # this should not happen because both read and readline end with a
274 # this should not happen because both read and readline end with a
275 # _frombuffer call that collapse it.
275 # _frombuffer call that collapse it.
276 self._buffer = [''.join(self._buffer)]
276 self._buffer = [''.join(self._buffer)]
277 self._lenbuf = len(self._buffer[0])
277 self._lenbuf = len(self._buffer[0])
278 lfi = -1
278 lfi = -1
279 if self._buffer:
279 if self._buffer:
280 lfi = self._buffer[-1].find('\n')
280 lfi = self._buffer[-1].find('\n')
281 while (not self._eof) and lfi < 0:
281 while (not self._eof) and lfi < 0:
282 self._fillbuffer()
282 self._fillbuffer()
283 if self._buffer:
283 if self._buffer:
284 lfi = self._buffer[-1].find('\n')
284 lfi = self._buffer[-1].find('\n')
285 size = lfi + 1
285 size = lfi + 1
286 if lfi < 0: # end of file
286 if lfi < 0: # end of file
287 size = self._lenbuf
287 size = self._lenbuf
288 elif 1 < len(self._buffer):
288 elif 1 < len(self._buffer):
289 # we need to take previous chunks into account
289 # we need to take previous chunks into account
290 size += self._lenbuf - len(self._buffer[-1])
290 size += self._lenbuf - len(self._buffer[-1])
291 return self._frombuffer(size)
291 return self._frombuffer(size)
292
292
293 def _frombuffer(self, size):
293 def _frombuffer(self, size):
294 """return at most 'size' data from the buffer
294 """return at most 'size' data from the buffer
295
295
296 The data are removed from the buffer."""
296 The data are removed from the buffer."""
297 if size == 0 or not self._buffer:
297 if size == 0 or not self._buffer:
298 return ''
298 return ''
299 buf = self._buffer[0]
299 buf = self._buffer[0]
300 if 1 < len(self._buffer):
300 if 1 < len(self._buffer):
301 buf = ''.join(self._buffer)
301 buf = ''.join(self._buffer)
302
302
303 data = buf[:size]
303 data = buf[:size]
304 buf = buf[len(data):]
304 buf = buf[len(data):]
305 if buf:
305 if buf:
306 self._buffer = [buf]
306 self._buffer = [buf]
307 self._lenbuf = len(buf)
307 self._lenbuf = len(buf)
308 else:
308 else:
309 self._buffer = []
309 self._buffer = []
310 self._lenbuf = 0
310 self._lenbuf = 0
311 return data
311 return data
312
312
313 def _fillbuffer(self):
313 def _fillbuffer(self):
314 """read data to the buffer"""
314 """read data to the buffer"""
315 data = os.read(self._input.fileno(), _chunksize)
315 data = os.read(self._input.fileno(), _chunksize)
316 if not data:
316 if not data:
317 self._eof = True
317 self._eof = True
318 else:
318 else:
319 self._lenbuf += len(data)
319 self._lenbuf += len(data)
320 self._buffer.append(data)
320 self._buffer.append(data)
321
321
322 def popen2(cmd, env=None, newlines=False):
322 def popen2(cmd, env=None, newlines=False):
323 # Setting bufsize to -1 lets the system decide the buffer size.
323 # Setting bufsize to -1 lets the system decide the buffer size.
324 # The default for bufsize is 0, meaning unbuffered. This leads to
324 # The default for bufsize is 0, meaning unbuffered. This leads to
325 # poor performance on Mac OS X: http://bugs.python.org/issue4194
325 # poor performance on Mac OS X: http://bugs.python.org/issue4194
326 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
326 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
327 close_fds=closefds,
327 close_fds=closefds,
328 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
328 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
329 universal_newlines=newlines,
329 universal_newlines=newlines,
330 env=env)
330 env=env)
331 return p.stdin, p.stdout
331 return p.stdin, p.stdout
332
332
333 def popen3(cmd, env=None, newlines=False):
333 def popen3(cmd, env=None, newlines=False):
334 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
334 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
335 return stdin, stdout, stderr
335 return stdin, stdout, stderr
336
336
337 def popen4(cmd, env=None, newlines=False, bufsize=-1):
337 def popen4(cmd, env=None, newlines=False, bufsize=-1):
338 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
338 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
339 close_fds=closefds,
339 close_fds=closefds,
340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
341 stderr=subprocess.PIPE,
341 stderr=subprocess.PIPE,
342 universal_newlines=newlines,
342 universal_newlines=newlines,
343 env=env)
343 env=env)
344 return p.stdin, p.stdout, p.stderr, p
344 return p.stdin, p.stdout, p.stderr, p
345
345
346 def version():
346 def version():
347 """Return version information if available."""
347 """Return version information if available."""
348 try:
348 try:
349 from . import __version__
349 from . import __version__
350 return __version__.version
350 return __version__.version
351 except ImportError:
351 except ImportError:
352 return 'unknown'
352 return 'unknown'
353
353
354 def versiontuple(v=None, n=4):
354 def versiontuple(v=None, n=4):
355 """Parses a Mercurial version string into an N-tuple.
355 """Parses a Mercurial version string into an N-tuple.
356
356
357 The version string to be parsed is specified with the ``v`` argument.
357 The version string to be parsed is specified with the ``v`` argument.
358 If it isn't defined, the current Mercurial version string will be parsed.
358 If it isn't defined, the current Mercurial version string will be parsed.
359
359
360 ``n`` can be 2, 3, or 4. Here is how some version strings map to
360 ``n`` can be 2, 3, or 4. Here is how some version strings map to
361 returned values:
361 returned values:
362
362
363 >>> v = '3.6.1+190-df9b73d2d444'
363 >>> v = '3.6.1+190-df9b73d2d444'
364 >>> versiontuple(v, 2)
364 >>> versiontuple(v, 2)
365 (3, 6)
365 (3, 6)
366 >>> versiontuple(v, 3)
366 >>> versiontuple(v, 3)
367 (3, 6, 1)
367 (3, 6, 1)
368 >>> versiontuple(v, 4)
368 >>> versiontuple(v, 4)
369 (3, 6, 1, '190-df9b73d2d444')
369 (3, 6, 1, '190-df9b73d2d444')
370
370
371 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
371 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
372 (3, 6, 1, '190-df9b73d2d444+20151118')
372 (3, 6, 1, '190-df9b73d2d444+20151118')
373
373
374 >>> v = '3.6'
374 >>> v = '3.6'
375 >>> versiontuple(v, 2)
375 >>> versiontuple(v, 2)
376 (3, 6)
376 (3, 6)
377 >>> versiontuple(v, 3)
377 >>> versiontuple(v, 3)
378 (3, 6, None)
378 (3, 6, None)
379 >>> versiontuple(v, 4)
379 >>> versiontuple(v, 4)
380 (3, 6, None, None)
380 (3, 6, None, None)
381 """
381 """
382 if not v:
382 if not v:
383 v = version()
383 v = version()
384 parts = v.split('+', 1)
384 parts = v.split('+', 1)
385 if len(parts) == 1:
385 if len(parts) == 1:
386 vparts, extra = parts[0], None
386 vparts, extra = parts[0], None
387 else:
387 else:
388 vparts, extra = parts
388 vparts, extra = parts
389
389
390 vints = []
390 vints = []
391 for i in vparts.split('.'):
391 for i in vparts.split('.'):
392 try:
392 try:
393 vints.append(int(i))
393 vints.append(int(i))
394 except ValueError:
394 except ValueError:
395 break
395 break
396 # (3, 6) -> (3, 6, None)
396 # (3, 6) -> (3, 6, None)
397 while len(vints) < 3:
397 while len(vints) < 3:
398 vints.append(None)
398 vints.append(None)
399
399
400 if n == 2:
400 if n == 2:
401 return (vints[0], vints[1])
401 return (vints[0], vints[1])
402 if n == 3:
402 if n == 3:
403 return (vints[0], vints[1], vints[2])
403 return (vints[0], vints[1], vints[2])
404 if n == 4:
404 if n == 4:
405 return (vints[0], vints[1], vints[2], extra)
405 return (vints[0], vints[1], vints[2], extra)
406
406
407 # used by parsedate
407 # used by parsedate
408 defaultdateformats = (
408 defaultdateformats = (
409 '%Y-%m-%d %H:%M:%S',
409 '%Y-%m-%d %H:%M:%S',
410 '%Y-%m-%d %I:%M:%S%p',
410 '%Y-%m-%d %I:%M:%S%p',
411 '%Y-%m-%d %H:%M',
411 '%Y-%m-%d %H:%M',
412 '%Y-%m-%d %I:%M%p',
412 '%Y-%m-%d %I:%M%p',
413 '%Y-%m-%d',
413 '%Y-%m-%d',
414 '%m-%d',
414 '%m-%d',
415 '%m/%d',
415 '%m/%d',
416 '%m/%d/%y',
416 '%m/%d/%y',
417 '%m/%d/%Y',
417 '%m/%d/%Y',
418 '%a %b %d %H:%M:%S %Y',
418 '%a %b %d %H:%M:%S %Y',
419 '%a %b %d %I:%M:%S%p %Y',
419 '%a %b %d %I:%M:%S%p %Y',
420 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
420 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
421 '%b %d %H:%M:%S %Y',
421 '%b %d %H:%M:%S %Y',
422 '%b %d %I:%M:%S%p %Y',
422 '%b %d %I:%M:%S%p %Y',
423 '%b %d %H:%M:%S',
423 '%b %d %H:%M:%S',
424 '%b %d %I:%M:%S%p',
424 '%b %d %I:%M:%S%p',
425 '%b %d %H:%M',
425 '%b %d %H:%M',
426 '%b %d %I:%M%p',
426 '%b %d %I:%M%p',
427 '%b %d %Y',
427 '%b %d %Y',
428 '%b %d',
428 '%b %d',
429 '%H:%M:%S',
429 '%H:%M:%S',
430 '%I:%M:%S%p',
430 '%I:%M:%S%p',
431 '%H:%M',
431 '%H:%M',
432 '%I:%M%p',
432 '%I:%M%p',
433 )
433 )
434
434
435 extendeddateformats = defaultdateformats + (
435 extendeddateformats = defaultdateformats + (
436 "%Y",
436 "%Y",
437 "%Y-%m",
437 "%Y-%m",
438 "%b",
438 "%b",
439 "%b %Y",
439 "%b %Y",
440 )
440 )
441
441
442 def cachefunc(func):
442 def cachefunc(func):
443 '''cache the result of function calls'''
443 '''cache the result of function calls'''
444 # XXX doesn't handle keywords args
444 # XXX doesn't handle keywords args
445 if func.func_code.co_argcount == 0:
445 if func.func_code.co_argcount == 0:
446 cache = []
446 cache = []
447 def f():
447 def f():
448 if len(cache) == 0:
448 if len(cache) == 0:
449 cache.append(func())
449 cache.append(func())
450 return cache[0]
450 return cache[0]
451 return f
451 return f
452 cache = {}
452 cache = {}
453 if func.func_code.co_argcount == 1:
453 if func.func_code.co_argcount == 1:
454 # we gain a small amount of time because
454 # we gain a small amount of time because
455 # we don't need to pack/unpack the list
455 # we don't need to pack/unpack the list
456 def f(arg):
456 def f(arg):
457 if arg not in cache:
457 if arg not in cache:
458 cache[arg] = func(arg)
458 cache[arg] = func(arg)
459 return cache[arg]
459 return cache[arg]
460 else:
460 else:
461 def f(*args):
461 def f(*args):
462 if args not in cache:
462 if args not in cache:
463 cache[args] = func(*args)
463 cache[args] = func(*args)
464 return cache[args]
464 return cache[args]
465
465
466 return f
466 return f
467
467
468 class sortdict(dict):
468 class sortdict(dict):
469 '''a simple sorted dictionary'''
469 '''a simple sorted dictionary'''
470 def __init__(self, data=None):
470 def __init__(self, data=None):
471 self._list = []
471 self._list = []
472 if data:
472 if data:
473 self.update(data)
473 self.update(data)
474 def copy(self):
474 def copy(self):
475 return sortdict(self)
475 return sortdict(self)
476 def __setitem__(self, key, val):
476 def __setitem__(self, key, val):
477 if key in self:
477 if key in self:
478 self._list.remove(key)
478 self._list.remove(key)
479 self._list.append(key)
479 self._list.append(key)
480 dict.__setitem__(self, key, val)
480 dict.__setitem__(self, key, val)
481 def __iter__(self):
481 def __iter__(self):
482 return self._list.__iter__()
482 return self._list.__iter__()
483 def update(self, src):
483 def update(self, src):
484 if isinstance(src, dict):
484 if isinstance(src, dict):
485 src = src.iteritems()
485 src = src.iteritems()
486 for k, v in src:
486 for k, v in src:
487 self[k] = v
487 self[k] = v
488 def clear(self):
488 def clear(self):
489 dict.clear(self)
489 dict.clear(self)
490 self._list = []
490 self._list = []
491 def items(self):
491 def items(self):
492 return [(k, self[k]) for k in self._list]
492 return [(k, self[k]) for k in self._list]
493 def __delitem__(self, key):
493 def __delitem__(self, key):
494 dict.__delitem__(self, key)
494 dict.__delitem__(self, key)
495 self._list.remove(key)
495 self._list.remove(key)
496 def pop(self, key, *args, **kwargs):
496 def pop(self, key, *args, **kwargs):
497 dict.pop(self, key, *args, **kwargs)
497 dict.pop(self, key, *args, **kwargs)
498 try:
498 try:
499 self._list.remove(key)
499 self._list.remove(key)
500 except ValueError:
500 except ValueError:
501 pass
501 pass
502 def keys(self):
502 def keys(self):
503 return self._list
503 return self._list
504 def iterkeys(self):
504 def iterkeys(self):
505 return self._list.__iter__()
505 return self._list.__iter__()
506 def iteritems(self):
506 def iteritems(self):
507 for k in self._list:
507 for k in self._list:
508 yield k, self[k]
508 yield k, self[k]
509 def insert(self, index, key, val):
509 def insert(self, index, key, val):
510 self._list.insert(index, key)
510 self._list.insert(index, key)
511 dict.__setitem__(self, key, val)
511 dict.__setitem__(self, key, val)
512
512
513 class _lrucachenode(object):
513 class _lrucachenode(object):
514 """A node in a doubly linked list.
514 """A node in a doubly linked list.
515
515
516 Holds a reference to nodes on either side as well as a key-value
516 Holds a reference to nodes on either side as well as a key-value
517 pair for the dictionary entry.
517 pair for the dictionary entry.
518 """
518 """
519 __slots__ = ('next', 'prev', 'key', 'value')
519 __slots__ = ('next', 'prev', 'key', 'value')
520
520
521 def __init__(self):
521 def __init__(self):
522 self.next = None
522 self.next = None
523 self.prev = None
523 self.prev = None
524
524
525 self.key = _notset
525 self.key = _notset
526 self.value = None
526 self.value = None
527
527
528 def markempty(self):
528 def markempty(self):
529 """Mark the node as emptied."""
529 """Mark the node as emptied."""
530 self.key = _notset
530 self.key = _notset
531
531
532 class lrucachedict(object):
532 class lrucachedict(object):
533 """Dict that caches most recent accesses and sets.
533 """Dict that caches most recent accesses and sets.
534
534
535 The dict consists of an actual backing dict - indexed by original
535 The dict consists of an actual backing dict - indexed by original
536 key - and a doubly linked circular list defining the order of entries in
536 key - and a doubly linked circular list defining the order of entries in
537 the cache.
537 the cache.
538
538
539 The head node is the newest entry in the cache. If the cache is full,
539 The head node is the newest entry in the cache. If the cache is full,
540 we recycle head.prev and make it the new head. Cache accesses result in
540 we recycle head.prev and make it the new head. Cache accesses result in
541 the node being moved to before the existing head and being marked as the
541 the node being moved to before the existing head and being marked as the
542 new head node.
542 new head node.
543 """
543 """
544 def __init__(self, max):
544 def __init__(self, max):
545 self._cache = {}
545 self._cache = {}
546
546
547 self._head = head = _lrucachenode()
547 self._head = head = _lrucachenode()
548 head.prev = head
548 head.prev = head
549 head.next = head
549 head.next = head
550 self._size = 1
550 self._size = 1
551 self._capacity = max
551 self._capacity = max
552
552
553 def __len__(self):
553 def __len__(self):
554 return len(self._cache)
554 return len(self._cache)
555
555
556 def __contains__(self, k):
556 def __contains__(self, k):
557 return k in self._cache
557 return k in self._cache
558
558
559 def __iter__(self):
559 def __iter__(self):
560 # We don't have to iterate in cache order, but why not.
560 # We don't have to iterate in cache order, but why not.
561 n = self._head
561 n = self._head
562 for i in range(len(self._cache)):
562 for i in range(len(self._cache)):
563 yield n.key
563 yield n.key
564 n = n.next
564 n = n.next
565
565
566 def __getitem__(self, k):
566 def __getitem__(self, k):
567 node = self._cache[k]
567 node = self._cache[k]
568 self._movetohead(node)
568 self._movetohead(node)
569 return node.value
569 return node.value
570
570
571 def __setitem__(self, k, v):
571 def __setitem__(self, k, v):
572 node = self._cache.get(k)
572 node = self._cache.get(k)
573 # Replace existing value and mark as newest.
573 # Replace existing value and mark as newest.
574 if node is not None:
574 if node is not None:
575 node.value = v
575 node.value = v
576 self._movetohead(node)
576 self._movetohead(node)
577 return
577 return
578
578
579 if self._size < self._capacity:
579 if self._size < self._capacity:
580 node = self._addcapacity()
580 node = self._addcapacity()
581 else:
581 else:
582 # Grab the last/oldest item.
582 # Grab the last/oldest item.
583 node = self._head.prev
583 node = self._head.prev
584
584
585 # At capacity. Kill the old entry.
585 # At capacity. Kill the old entry.
586 if node.key is not _notset:
586 if node.key is not _notset:
587 del self._cache[node.key]
587 del self._cache[node.key]
588
588
589 node.key = k
589 node.key = k
590 node.value = v
590 node.value = v
591 self._cache[k] = node
591 self._cache[k] = node
592 # And mark it as newest entry. No need to adjust order since it
592 # And mark it as newest entry. No need to adjust order since it
593 # is already self._head.prev.
593 # is already self._head.prev.
594 self._head = node
594 self._head = node
595
595
596 def __delitem__(self, k):
596 def __delitem__(self, k):
597 node = self._cache.pop(k)
597 node = self._cache.pop(k)
598 node.markempty()
598 node.markempty()
599
599
600 # Temporarily mark as newest item before re-adjusting head to make
600 # Temporarily mark as newest item before re-adjusting head to make
601 # this node the oldest item.
601 # this node the oldest item.
602 self._movetohead(node)
602 self._movetohead(node)
603 self._head = node.next
603 self._head = node.next
604
604
605 # Additional dict methods.
605 # Additional dict methods.
606
606
607 def get(self, k, default=None):
607 def get(self, k, default=None):
608 try:
608 try:
609 return self._cache[k]
609 return self._cache[k]
610 except KeyError:
610 except KeyError:
611 return default
611 return default
612
612
613 def clear(self):
613 def clear(self):
614 n = self._head
614 n = self._head
615 while n.key is not _notset:
615 while n.key is not _notset:
616 n.markempty()
616 n.markempty()
617 n = n.next
617 n = n.next
618
618
619 self._cache.clear()
619 self._cache.clear()
620
620
621 def copy(self):
621 def copy(self):
622 result = lrucachedict(self._capacity)
622 result = lrucachedict(self._capacity)
623 n = self._head.prev
623 n = self._head.prev
624 # Iterate in oldest-to-newest order, so the copy has the right ordering
624 # Iterate in oldest-to-newest order, so the copy has the right ordering
625 for i in range(len(self._cache)):
625 for i in range(len(self._cache)):
626 result[n.key] = n.value
626 result[n.key] = n.value
627 n = n.prev
627 n = n.prev
628 return result
628 return result
629
629
630 def _movetohead(self, node):
630 def _movetohead(self, node):
631 """Mark a node as the newest, making it the new head.
631 """Mark a node as the newest, making it the new head.
632
632
633 When a node is accessed, it becomes the freshest entry in the LRU
633 When a node is accessed, it becomes the freshest entry in the LRU
634 list, which is denoted by self._head.
634 list, which is denoted by self._head.
635
635
636 Visually, let's make ``N`` the new head node (* denotes head):
636 Visually, let's make ``N`` the new head node (* denotes head):
637
637
638 previous/oldest <-> head <-> next/next newest
638 previous/oldest <-> head <-> next/next newest
639
639
640 ----<->--- A* ---<->-----
640 ----<->--- A* ---<->-----
641 | |
641 | |
642 E <-> D <-> N <-> C <-> B
642 E <-> D <-> N <-> C <-> B
643
643
644 To:
644 To:
645
645
646 ----<->--- N* ---<->-----
646 ----<->--- N* ---<->-----
647 | |
647 | |
648 E <-> D <-> C <-> B <-> A
648 E <-> D <-> C <-> B <-> A
649
649
650 This requires the following moves:
650 This requires the following moves:
651
651
652 C.next = D (node.prev.next = node.next)
652 C.next = D (node.prev.next = node.next)
653 D.prev = C (node.next.prev = node.prev)
653 D.prev = C (node.next.prev = node.prev)
654 E.next = N (head.prev.next = node)
654 E.next = N (head.prev.next = node)
655 N.prev = E (node.prev = head.prev)
655 N.prev = E (node.prev = head.prev)
656 N.next = A (node.next = head)
656 N.next = A (node.next = head)
657 A.prev = N (head.prev = node)
657 A.prev = N (head.prev = node)
658 """
658 """
659 head = self._head
659 head = self._head
660 # C.next = D
660 # C.next = D
661 node.prev.next = node.next
661 node.prev.next = node.next
662 # D.prev = C
662 # D.prev = C
663 node.next.prev = node.prev
663 node.next.prev = node.prev
664 # N.prev = E
664 # N.prev = E
665 node.prev = head.prev
665 node.prev = head.prev
666 # N.next = A
666 # N.next = A
667 # It is tempting to do just "head" here, however if node is
667 # It is tempting to do just "head" here, however if node is
668 # adjacent to head, this will do bad things.
668 # adjacent to head, this will do bad things.
669 node.next = head.prev.next
669 node.next = head.prev.next
670 # E.next = N
670 # E.next = N
671 node.next.prev = node
671 node.next.prev = node
672 # A.prev = N
672 # A.prev = N
673 node.prev.next = node
673 node.prev.next = node
674
674
675 self._head = node
675 self._head = node
676
676
677 def _addcapacity(self):
677 def _addcapacity(self):
678 """Add a node to the circular linked list.
678 """Add a node to the circular linked list.
679
679
680 The new node is inserted before the head node.
680 The new node is inserted before the head node.
681 """
681 """
682 head = self._head
682 head = self._head
683 node = _lrucachenode()
683 node = _lrucachenode()
684 head.prev.next = node
684 head.prev.next = node
685 node.prev = head.prev
685 node.prev = head.prev
686 node.next = head
686 node.next = head
687 head.prev = node
687 head.prev = node
688 self._size += 1
688 self._size += 1
689 return node
689 return node
690
690
691 def lrucachefunc(func):
691 def lrucachefunc(func):
692 '''cache most recent results of function calls'''
692 '''cache most recent results of function calls'''
693 cache = {}
693 cache = {}
694 order = collections.deque()
694 order = collections.deque()
695 if func.func_code.co_argcount == 1:
695 if func.func_code.co_argcount == 1:
696 def f(arg):
696 def f(arg):
697 if arg not in cache:
697 if arg not in cache:
698 if len(cache) > 20:
698 if len(cache) > 20:
699 del cache[order.popleft()]
699 del cache[order.popleft()]
700 cache[arg] = func(arg)
700 cache[arg] = func(arg)
701 else:
701 else:
702 order.remove(arg)
702 order.remove(arg)
703 order.append(arg)
703 order.append(arg)
704 return cache[arg]
704 return cache[arg]
705 else:
705 else:
706 def f(*args):
706 def f(*args):
707 if args not in cache:
707 if args not in cache:
708 if len(cache) > 20:
708 if len(cache) > 20:
709 del cache[order.popleft()]
709 del cache[order.popleft()]
710 cache[args] = func(*args)
710 cache[args] = func(*args)
711 else:
711 else:
712 order.remove(args)
712 order.remove(args)
713 order.append(args)
713 order.append(args)
714 return cache[args]
714 return cache[args]
715
715
716 return f
716 return f
717
717
718 class propertycache(object):
718 class propertycache(object):
719 def __init__(self, func):
719 def __init__(self, func):
720 self.func = func
720 self.func = func
721 self.name = func.__name__
721 self.name = func.__name__
722 def __get__(self, obj, type=None):
722 def __get__(self, obj, type=None):
723 result = self.func(obj)
723 result = self.func(obj)
724 self.cachevalue(obj, result)
724 self.cachevalue(obj, result)
725 return result
725 return result
726
726
727 def cachevalue(self, obj, value):
727 def cachevalue(self, obj, value):
728 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
728 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
729 obj.__dict__[self.name] = value
729 obj.__dict__[self.name] = value
730
730
731 def pipefilter(s, cmd):
731 def pipefilter(s, cmd):
732 '''filter string S through command CMD, returning its output'''
732 '''filter string S through command CMD, returning its output'''
733 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
733 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
734 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
734 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
735 pout, perr = p.communicate(s)
735 pout, perr = p.communicate(s)
736 return pout
736 return pout
737
737
738 def tempfilter(s, cmd):
738 def tempfilter(s, cmd):
739 '''filter string S through a pair of temporary files with CMD.
739 '''filter string S through a pair of temporary files with CMD.
740 CMD is used as a template to create the real command to be run,
740 CMD is used as a template to create the real command to be run,
741 with the strings INFILE and OUTFILE replaced by the real names of
741 with the strings INFILE and OUTFILE replaced by the real names of
742 the temporary files generated.'''
742 the temporary files generated.'''
743 inname, outname = None, None
743 inname, outname = None, None
744 try:
744 try:
745 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
745 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
746 fp = os.fdopen(infd, 'wb')
746 fp = os.fdopen(infd, 'wb')
747 fp.write(s)
747 fp.write(s)
748 fp.close()
748 fp.close()
749 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
749 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
750 os.close(outfd)
750 os.close(outfd)
751 cmd = cmd.replace('INFILE', inname)
751 cmd = cmd.replace('INFILE', inname)
752 cmd = cmd.replace('OUTFILE', outname)
752 cmd = cmd.replace('OUTFILE', outname)
753 code = os.system(cmd)
753 code = os.system(cmd)
754 if sys.platform == 'OpenVMS' and code & 1:
754 if sys.platform == 'OpenVMS' and code & 1:
755 code = 0
755 code = 0
756 if code:
756 if code:
757 raise Abort(_("command '%s' failed: %s") %
757 raise Abort(_("command '%s' failed: %s") %
758 (cmd, explainexit(code)))
758 (cmd, explainexit(code)))
759 fp = open(outname, 'rb')
759 return readfile(outname)
760 r = fp.read()
761 fp.close()
762 return r
763 finally:
760 finally:
764 try:
761 try:
765 if inname:
762 if inname:
766 os.unlink(inname)
763 os.unlink(inname)
767 except OSError:
764 except OSError:
768 pass
765 pass
769 try:
766 try:
770 if outname:
767 if outname:
771 os.unlink(outname)
768 os.unlink(outname)
772 except OSError:
769 except OSError:
773 pass
770 pass
774
771
775 filtertable = {
772 filtertable = {
776 'tempfile:': tempfilter,
773 'tempfile:': tempfilter,
777 'pipe:': pipefilter,
774 'pipe:': pipefilter,
778 }
775 }
779
776
780 def filter(s, cmd):
777 def filter(s, cmd):
781 "filter a string through a command that transforms its input to its output"
778 "filter a string through a command that transforms its input to its output"
782 for name, fn in filtertable.iteritems():
779 for name, fn in filtertable.iteritems():
783 if cmd.startswith(name):
780 if cmd.startswith(name):
784 return fn(s, cmd[len(name):].lstrip())
781 return fn(s, cmd[len(name):].lstrip())
785 return pipefilter(s, cmd)
782 return pipefilter(s, cmd)
786
783
787 def binary(s):
784 def binary(s):
788 """return true if a string is binary data"""
785 """return true if a string is binary data"""
789 return bool(s and '\0' in s)
786 return bool(s and '\0' in s)
790
787
791 def increasingchunks(source, min=1024, max=65536):
788 def increasingchunks(source, min=1024, max=65536):
792 '''return no less than min bytes per chunk while data remains,
789 '''return no less than min bytes per chunk while data remains,
793 doubling min after each chunk until it reaches max'''
790 doubling min after each chunk until it reaches max'''
794 def log2(x):
791 def log2(x):
795 if not x:
792 if not x:
796 return 0
793 return 0
797 i = 0
794 i = 0
798 while x:
795 while x:
799 x >>= 1
796 x >>= 1
800 i += 1
797 i += 1
801 return i - 1
798 return i - 1
802
799
803 buf = []
800 buf = []
804 blen = 0
801 blen = 0
805 for chunk in source:
802 for chunk in source:
806 buf.append(chunk)
803 buf.append(chunk)
807 blen += len(chunk)
804 blen += len(chunk)
808 if blen >= min:
805 if blen >= min:
809 if min < max:
806 if min < max:
810 min = min << 1
807 min = min << 1
811 nmin = 1 << log2(blen)
808 nmin = 1 << log2(blen)
812 if nmin > min:
809 if nmin > min:
813 min = nmin
810 min = nmin
814 if min > max:
811 if min > max:
815 min = max
812 min = max
816 yield ''.join(buf)
813 yield ''.join(buf)
817 blen = 0
814 blen = 0
818 buf = []
815 buf = []
819 if buf:
816 if buf:
820 yield ''.join(buf)
817 yield ''.join(buf)
821
818
822 Abort = error.Abort
819 Abort = error.Abort
823
820
824 def always(fn):
821 def always(fn):
825 return True
822 return True
826
823
827 def never(fn):
824 def never(fn):
828 return False
825 return False
829
826
830 def nogc(func):
827 def nogc(func):
831 """disable garbage collector
828 """disable garbage collector
832
829
833 Python's garbage collector triggers a GC each time a certain number of
830 Python's garbage collector triggers a GC each time a certain number of
834 container objects (the number being defined by gc.get_threshold()) are
831 container objects (the number being defined by gc.get_threshold()) are
835 allocated even when marked not to be tracked by the collector. Tracking has
832 allocated even when marked not to be tracked by the collector. Tracking has
836 no effect on when GCs are triggered, only on what objects the GC looks
833 no effect on when GCs are triggered, only on what objects the GC looks
837 into. As a workaround, disable GC while building complex (huge)
834 into. As a workaround, disable GC while building complex (huge)
838 containers.
835 containers.
839
836
840 This garbage collector issue have been fixed in 2.7.
837 This garbage collector issue have been fixed in 2.7.
841 """
838 """
842 def wrapper(*args, **kwargs):
839 def wrapper(*args, **kwargs):
843 gcenabled = gc.isenabled()
840 gcenabled = gc.isenabled()
844 gc.disable()
841 gc.disable()
845 try:
842 try:
846 return func(*args, **kwargs)
843 return func(*args, **kwargs)
847 finally:
844 finally:
848 if gcenabled:
845 if gcenabled:
849 gc.enable()
846 gc.enable()
850 return wrapper
847 return wrapper
851
848
852 def pathto(root, n1, n2):
849 def pathto(root, n1, n2):
853 '''return the relative path from one place to another.
850 '''return the relative path from one place to another.
854 root should use os.sep to separate directories
851 root should use os.sep to separate directories
855 n1 should use os.sep to separate directories
852 n1 should use os.sep to separate directories
856 n2 should use "/" to separate directories
853 n2 should use "/" to separate directories
857 returns an os.sep-separated path.
854 returns an os.sep-separated path.
858
855
859 If n1 is a relative path, it's assumed it's
856 If n1 is a relative path, it's assumed it's
860 relative to root.
857 relative to root.
861 n2 should always be relative to root.
858 n2 should always be relative to root.
862 '''
859 '''
863 if not n1:
860 if not n1:
864 return localpath(n2)
861 return localpath(n2)
865 if os.path.isabs(n1):
862 if os.path.isabs(n1):
866 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
863 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
867 return os.path.join(root, localpath(n2))
864 return os.path.join(root, localpath(n2))
868 n2 = '/'.join((pconvert(root), n2))
865 n2 = '/'.join((pconvert(root), n2))
869 a, b = splitpath(n1), n2.split('/')
866 a, b = splitpath(n1), n2.split('/')
870 a.reverse()
867 a.reverse()
871 b.reverse()
868 b.reverse()
872 while a and b and a[-1] == b[-1]:
869 while a and b and a[-1] == b[-1]:
873 a.pop()
870 a.pop()
874 b.pop()
871 b.pop()
875 b.reverse()
872 b.reverse()
876 return os.sep.join((['..'] * len(a)) + b) or '.'
873 return os.sep.join((['..'] * len(a)) + b) or '.'
877
874
878 def mainfrozen():
875 def mainfrozen():
879 """return True if we are a frozen executable.
876 """return True if we are a frozen executable.
880
877
881 The code supports py2exe (most common, Windows only) and tools/freeze
878 The code supports py2exe (most common, Windows only) and tools/freeze
882 (portable, not much used).
879 (portable, not much used).
883 """
880 """
884 return (safehasattr(sys, "frozen") or # new py2exe
881 return (safehasattr(sys, "frozen") or # new py2exe
885 safehasattr(sys, "importers") or # old py2exe
882 safehasattr(sys, "importers") or # old py2exe
886 imp.is_frozen("__main__")) # tools/freeze
883 imp.is_frozen("__main__")) # tools/freeze
887
884
888 # the location of data files matching the source code
885 # the location of data files matching the source code
889 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
886 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
890 # executable version (py2exe) doesn't support __file__
887 # executable version (py2exe) doesn't support __file__
891 datapath = os.path.dirname(sys.executable)
888 datapath = os.path.dirname(sys.executable)
892 else:
889 else:
893 datapath = os.path.dirname(__file__)
890 datapath = os.path.dirname(__file__)
894
891
895 i18n.setdatapath(datapath)
892 i18n.setdatapath(datapath)
896
893
897 _hgexecutable = None
894 _hgexecutable = None
898
895
899 def hgexecutable():
896 def hgexecutable():
900 """return location of the 'hg' executable.
897 """return location of the 'hg' executable.
901
898
902 Defaults to $HG or 'hg' in the search path.
899 Defaults to $HG or 'hg' in the search path.
903 """
900 """
904 if _hgexecutable is None:
901 if _hgexecutable is None:
905 hg = os.environ.get('HG')
902 hg = os.environ.get('HG')
906 mainmod = sys.modules['__main__']
903 mainmod = sys.modules['__main__']
907 if hg:
904 if hg:
908 _sethgexecutable(hg)
905 _sethgexecutable(hg)
909 elif mainfrozen():
906 elif mainfrozen():
910 if getattr(sys, 'frozen', None) == 'macosx_app':
907 if getattr(sys, 'frozen', None) == 'macosx_app':
911 # Env variable set by py2app
908 # Env variable set by py2app
912 _sethgexecutable(os.environ['EXECUTABLEPATH'])
909 _sethgexecutable(os.environ['EXECUTABLEPATH'])
913 else:
910 else:
914 _sethgexecutable(sys.executable)
911 _sethgexecutable(sys.executable)
915 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
912 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
916 _sethgexecutable(mainmod.__file__)
913 _sethgexecutable(mainmod.__file__)
917 else:
914 else:
918 exe = findexe('hg') or os.path.basename(sys.argv[0])
915 exe = findexe('hg') or os.path.basename(sys.argv[0])
919 _sethgexecutable(exe)
916 _sethgexecutable(exe)
920 return _hgexecutable
917 return _hgexecutable
921
918
922 def _sethgexecutable(path):
919 def _sethgexecutable(path):
923 """set location of the 'hg' executable"""
920 """set location of the 'hg' executable"""
924 global _hgexecutable
921 global _hgexecutable
925 _hgexecutable = path
922 _hgexecutable = path
926
923
927 def _isstdout(f):
924 def _isstdout(f):
928 fileno = getattr(f, 'fileno', None)
925 fileno = getattr(f, 'fileno', None)
929 return fileno and fileno() == sys.__stdout__.fileno()
926 return fileno and fileno() == sys.__stdout__.fileno()
930
927
931 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
928 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
932 '''enhanced shell command execution.
929 '''enhanced shell command execution.
933 run with environment maybe modified, maybe in different dir.
930 run with environment maybe modified, maybe in different dir.
934
931
935 if command fails and onerr is None, return status, else raise onerr
932 if command fails and onerr is None, return status, else raise onerr
936 object as exception.
933 object as exception.
937
934
938 if out is specified, it is assumed to be a file-like object that has a
935 if out is specified, it is assumed to be a file-like object that has a
939 write() method. stdout and stderr will be redirected to out.'''
936 write() method. stdout and stderr will be redirected to out.'''
940 if environ is None:
937 if environ is None:
941 environ = {}
938 environ = {}
942 try:
939 try:
943 sys.stdout.flush()
940 sys.stdout.flush()
944 except Exception:
941 except Exception:
945 pass
942 pass
946 def py2shell(val):
943 def py2shell(val):
947 'convert python object into string that is useful to shell'
944 'convert python object into string that is useful to shell'
948 if val is None or val is False:
945 if val is None or val is False:
949 return '0'
946 return '0'
950 if val is True:
947 if val is True:
951 return '1'
948 return '1'
952 return str(val)
949 return str(val)
953 origcmd = cmd
950 origcmd = cmd
954 cmd = quotecommand(cmd)
951 cmd = quotecommand(cmd)
955 if sys.platform == 'plan9' and (sys.version_info[0] == 2
952 if sys.platform == 'plan9' and (sys.version_info[0] == 2
956 and sys.version_info[1] < 7):
953 and sys.version_info[1] < 7):
957 # subprocess kludge to work around issues in half-baked Python
954 # subprocess kludge to work around issues in half-baked Python
958 # ports, notably bichued/python:
955 # ports, notably bichued/python:
959 if not cwd is None:
956 if not cwd is None:
960 os.chdir(cwd)
957 os.chdir(cwd)
961 rc = os.system(cmd)
958 rc = os.system(cmd)
962 else:
959 else:
963 env = dict(os.environ)
960 env = dict(os.environ)
964 env.update((k, py2shell(v)) for k, v in environ.iteritems())
961 env.update((k, py2shell(v)) for k, v in environ.iteritems())
965 env['HG'] = hgexecutable()
962 env['HG'] = hgexecutable()
966 if out is None or _isstdout(out):
963 if out is None or _isstdout(out):
967 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
964 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
968 env=env, cwd=cwd)
965 env=env, cwd=cwd)
969 else:
966 else:
970 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
967 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
971 env=env, cwd=cwd, stdout=subprocess.PIPE,
968 env=env, cwd=cwd, stdout=subprocess.PIPE,
972 stderr=subprocess.STDOUT)
969 stderr=subprocess.STDOUT)
973 while True:
970 while True:
974 line = proc.stdout.readline()
971 line = proc.stdout.readline()
975 if not line:
972 if not line:
976 break
973 break
977 out.write(line)
974 out.write(line)
978 proc.wait()
975 proc.wait()
979 rc = proc.returncode
976 rc = proc.returncode
980 if sys.platform == 'OpenVMS' and rc & 1:
977 if sys.platform == 'OpenVMS' and rc & 1:
981 rc = 0
978 rc = 0
982 if rc and onerr:
979 if rc and onerr:
983 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
980 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
984 explainexit(rc)[0])
981 explainexit(rc)[0])
985 if errprefix:
982 if errprefix:
986 errmsg = '%s: %s' % (errprefix, errmsg)
983 errmsg = '%s: %s' % (errprefix, errmsg)
987 raise onerr(errmsg)
984 raise onerr(errmsg)
988 return rc
985 return rc
989
986
990 def checksignature(func):
987 def checksignature(func):
991 '''wrap a function with code to check for calling errors'''
988 '''wrap a function with code to check for calling errors'''
992 def check(*args, **kwargs):
989 def check(*args, **kwargs):
993 try:
990 try:
994 return func(*args, **kwargs)
991 return func(*args, **kwargs)
995 except TypeError:
992 except TypeError:
996 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
993 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
997 raise error.SignatureError
994 raise error.SignatureError
998 raise
995 raise
999
996
1000 return check
997 return check
1001
998
1002 def copyfile(src, dest, hardlink=False, copystat=False):
999 def copyfile(src, dest, hardlink=False, copystat=False):
1003 '''copy a file, preserving mode and optionally other stat info like
1000 '''copy a file, preserving mode and optionally other stat info like
1004 atime/mtime'''
1001 atime/mtime'''
1005 if os.path.lexists(dest):
1002 if os.path.lexists(dest):
1006 unlink(dest)
1003 unlink(dest)
1007 # hardlinks are problematic on CIFS, quietly ignore this flag
1004 # hardlinks are problematic on CIFS, quietly ignore this flag
1008 # until we find a way to work around it cleanly (issue4546)
1005 # until we find a way to work around it cleanly (issue4546)
1009 if False and hardlink:
1006 if False and hardlink:
1010 try:
1007 try:
1011 oslink(src, dest)
1008 oslink(src, dest)
1012 return
1009 return
1013 except (IOError, OSError):
1010 except (IOError, OSError):
1014 pass # fall back to normal copy
1011 pass # fall back to normal copy
1015 if os.path.islink(src):
1012 if os.path.islink(src):
1016 os.symlink(os.readlink(src), dest)
1013 os.symlink(os.readlink(src), dest)
1017 # copytime is ignored for symlinks, but in general copytime isn't needed
1014 # copytime is ignored for symlinks, but in general copytime isn't needed
1018 # for them anyway
1015 # for them anyway
1019 else:
1016 else:
1020 try:
1017 try:
1021 shutil.copyfile(src, dest)
1018 shutil.copyfile(src, dest)
1022 if copystat:
1019 if copystat:
1023 # copystat also copies mode
1020 # copystat also copies mode
1024 shutil.copystat(src, dest)
1021 shutil.copystat(src, dest)
1025 else:
1022 else:
1026 shutil.copymode(src, dest)
1023 shutil.copymode(src, dest)
1027 except shutil.Error as inst:
1024 except shutil.Error as inst:
1028 raise Abort(str(inst))
1025 raise Abort(str(inst))
1029
1026
1030 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1027 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1031 """Copy a directory tree using hardlinks if possible."""
1028 """Copy a directory tree using hardlinks if possible."""
1032 num = 0
1029 num = 0
1033
1030
1034 if hardlink is None:
1031 if hardlink is None:
1035 hardlink = (os.stat(src).st_dev ==
1032 hardlink = (os.stat(src).st_dev ==
1036 os.stat(os.path.dirname(dst)).st_dev)
1033 os.stat(os.path.dirname(dst)).st_dev)
1037 if hardlink:
1034 if hardlink:
1038 topic = _('linking')
1035 topic = _('linking')
1039 else:
1036 else:
1040 topic = _('copying')
1037 topic = _('copying')
1041
1038
1042 if os.path.isdir(src):
1039 if os.path.isdir(src):
1043 os.mkdir(dst)
1040 os.mkdir(dst)
1044 for name, kind in osutil.listdir(src):
1041 for name, kind in osutil.listdir(src):
1045 srcname = os.path.join(src, name)
1042 srcname = os.path.join(src, name)
1046 dstname = os.path.join(dst, name)
1043 dstname = os.path.join(dst, name)
1047 def nprog(t, pos):
1044 def nprog(t, pos):
1048 if pos is not None:
1045 if pos is not None:
1049 return progress(t, pos + num)
1046 return progress(t, pos + num)
1050 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1047 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1051 num += n
1048 num += n
1052 else:
1049 else:
1053 if hardlink:
1050 if hardlink:
1054 try:
1051 try:
1055 oslink(src, dst)
1052 oslink(src, dst)
1056 except (IOError, OSError):
1053 except (IOError, OSError):
1057 hardlink = False
1054 hardlink = False
1058 shutil.copy(src, dst)
1055 shutil.copy(src, dst)
1059 else:
1056 else:
1060 shutil.copy(src, dst)
1057 shutil.copy(src, dst)
1061 num += 1
1058 num += 1
1062 progress(topic, num)
1059 progress(topic, num)
1063 progress(topic, None)
1060 progress(topic, None)
1064
1061
1065 return hardlink, num
1062 return hardlink, num
1066
1063
1067 _winreservednames = '''con prn aux nul
1064 _winreservednames = '''con prn aux nul
1068 com1 com2 com3 com4 com5 com6 com7 com8 com9
1065 com1 com2 com3 com4 com5 com6 com7 com8 com9
1069 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1066 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1070 _winreservedchars = ':*?"<>|'
1067 _winreservedchars = ':*?"<>|'
1071 def checkwinfilename(path):
1068 def checkwinfilename(path):
1072 r'''Check that the base-relative path is a valid filename on Windows.
1069 r'''Check that the base-relative path is a valid filename on Windows.
1073 Returns None if the path is ok, or a UI string describing the problem.
1070 Returns None if the path is ok, or a UI string describing the problem.
1074
1071
1075 >>> checkwinfilename("just/a/normal/path")
1072 >>> checkwinfilename("just/a/normal/path")
1076 >>> checkwinfilename("foo/bar/con.xml")
1073 >>> checkwinfilename("foo/bar/con.xml")
1077 "filename contains 'con', which is reserved on Windows"
1074 "filename contains 'con', which is reserved on Windows"
1078 >>> checkwinfilename("foo/con.xml/bar")
1075 >>> checkwinfilename("foo/con.xml/bar")
1079 "filename contains 'con', which is reserved on Windows"
1076 "filename contains 'con', which is reserved on Windows"
1080 >>> checkwinfilename("foo/bar/xml.con")
1077 >>> checkwinfilename("foo/bar/xml.con")
1081 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1078 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1082 "filename contains 'AUX', which is reserved on Windows"
1079 "filename contains 'AUX', which is reserved on Windows"
1083 >>> checkwinfilename("foo/bar/bla:.txt")
1080 >>> checkwinfilename("foo/bar/bla:.txt")
1084 "filename contains ':', which is reserved on Windows"
1081 "filename contains ':', which is reserved on Windows"
1085 >>> checkwinfilename("foo/bar/b\07la.txt")
1082 >>> checkwinfilename("foo/bar/b\07la.txt")
1086 "filename contains '\\x07', which is invalid on Windows"
1083 "filename contains '\\x07', which is invalid on Windows"
1087 >>> checkwinfilename("foo/bar/bla ")
1084 >>> checkwinfilename("foo/bar/bla ")
1088 "filename ends with ' ', which is not allowed on Windows"
1085 "filename ends with ' ', which is not allowed on Windows"
1089 >>> checkwinfilename("../bar")
1086 >>> checkwinfilename("../bar")
1090 >>> checkwinfilename("foo\\")
1087 >>> checkwinfilename("foo\\")
1091 "filename ends with '\\', which is invalid on Windows"
1088 "filename ends with '\\', which is invalid on Windows"
1092 >>> checkwinfilename("foo\\/bar")
1089 >>> checkwinfilename("foo\\/bar")
1093 "directory name ends with '\\', which is invalid on Windows"
1090 "directory name ends with '\\', which is invalid on Windows"
1094 '''
1091 '''
1095 if path.endswith('\\'):
1092 if path.endswith('\\'):
1096 return _("filename ends with '\\', which is invalid on Windows")
1093 return _("filename ends with '\\', which is invalid on Windows")
1097 if '\\/' in path:
1094 if '\\/' in path:
1098 return _("directory name ends with '\\', which is invalid on Windows")
1095 return _("directory name ends with '\\', which is invalid on Windows")
1099 for n in path.replace('\\', '/').split('/'):
1096 for n in path.replace('\\', '/').split('/'):
1100 if not n:
1097 if not n:
1101 continue
1098 continue
1102 for c in n:
1099 for c in n:
1103 if c in _winreservedchars:
1100 if c in _winreservedchars:
1104 return _("filename contains '%s', which is reserved "
1101 return _("filename contains '%s', which is reserved "
1105 "on Windows") % c
1102 "on Windows") % c
1106 if ord(c) <= 31:
1103 if ord(c) <= 31:
1107 return _("filename contains %r, which is invalid "
1104 return _("filename contains %r, which is invalid "
1108 "on Windows") % c
1105 "on Windows") % c
1109 base = n.split('.')[0]
1106 base = n.split('.')[0]
1110 if base and base.lower() in _winreservednames:
1107 if base and base.lower() in _winreservednames:
1111 return _("filename contains '%s', which is reserved "
1108 return _("filename contains '%s', which is reserved "
1112 "on Windows") % base
1109 "on Windows") % base
1113 t = n[-1]
1110 t = n[-1]
1114 if t in '. ' and n not in '..':
1111 if t in '. ' and n not in '..':
1115 return _("filename ends with '%s', which is not allowed "
1112 return _("filename ends with '%s', which is not allowed "
1116 "on Windows") % t
1113 "on Windows") % t
1117
1114
1118 if os.name == 'nt':
1115 if os.name == 'nt':
1119 checkosfilename = checkwinfilename
1116 checkosfilename = checkwinfilename
1120 else:
1117 else:
1121 checkosfilename = platform.checkosfilename
1118 checkosfilename = platform.checkosfilename
1122
1119
1123 def makelock(info, pathname):
1120 def makelock(info, pathname):
1124 try:
1121 try:
1125 return os.symlink(info, pathname)
1122 return os.symlink(info, pathname)
1126 except OSError as why:
1123 except OSError as why:
1127 if why.errno == errno.EEXIST:
1124 if why.errno == errno.EEXIST:
1128 raise
1125 raise
1129 except AttributeError: # no symlink in os
1126 except AttributeError: # no symlink in os
1130 pass
1127 pass
1131
1128
1132 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1129 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1133 os.write(ld, info)
1130 os.write(ld, info)
1134 os.close(ld)
1131 os.close(ld)
1135
1132
1136 def readlock(pathname):
1133 def readlock(pathname):
1137 try:
1134 try:
1138 return os.readlink(pathname)
1135 return os.readlink(pathname)
1139 except OSError as why:
1136 except OSError as why:
1140 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1137 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1141 raise
1138 raise
1142 except AttributeError: # no symlink in os
1139 except AttributeError: # no symlink in os
1143 pass
1140 pass
1144 fp = posixfile(pathname)
1141 fp = posixfile(pathname)
1145 r = fp.read()
1142 r = fp.read()
1146 fp.close()
1143 fp.close()
1147 return r
1144 return r
1148
1145
1149 def fstat(fp):
1146 def fstat(fp):
1150 '''stat file object that may not have fileno method.'''
1147 '''stat file object that may not have fileno method.'''
1151 try:
1148 try:
1152 return os.fstat(fp.fileno())
1149 return os.fstat(fp.fileno())
1153 except AttributeError:
1150 except AttributeError:
1154 return os.stat(fp.name)
1151 return os.stat(fp.name)
1155
1152
1156 # File system features
1153 # File system features
1157
1154
1158 def checkcase(path):
1155 def checkcase(path):
1159 """
1156 """
1160 Return true if the given path is on a case-sensitive filesystem
1157 Return true if the given path is on a case-sensitive filesystem
1161
1158
1162 Requires a path (like /foo/.hg) ending with a foldable final
1159 Requires a path (like /foo/.hg) ending with a foldable final
1163 directory component.
1160 directory component.
1164 """
1161 """
1165 s1 = os.lstat(path)
1162 s1 = os.lstat(path)
1166 d, b = os.path.split(path)
1163 d, b = os.path.split(path)
1167 b2 = b.upper()
1164 b2 = b.upper()
1168 if b == b2:
1165 if b == b2:
1169 b2 = b.lower()
1166 b2 = b.lower()
1170 if b == b2:
1167 if b == b2:
1171 return True # no evidence against case sensitivity
1168 return True # no evidence against case sensitivity
1172 p2 = os.path.join(d, b2)
1169 p2 = os.path.join(d, b2)
1173 try:
1170 try:
1174 s2 = os.lstat(p2)
1171 s2 = os.lstat(p2)
1175 if s2 == s1:
1172 if s2 == s1:
1176 return False
1173 return False
1177 return True
1174 return True
1178 except OSError:
1175 except OSError:
1179 return True
1176 return True
1180
1177
1181 try:
1178 try:
1182 import re2
1179 import re2
1183 _re2 = None
1180 _re2 = None
1184 except ImportError:
1181 except ImportError:
1185 _re2 = False
1182 _re2 = False
1186
1183
1187 class _re(object):
1184 class _re(object):
1188 def _checkre2(self):
1185 def _checkre2(self):
1189 global _re2
1186 global _re2
1190 try:
1187 try:
1191 # check if match works, see issue3964
1188 # check if match works, see issue3964
1192 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1189 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1193 except ImportError:
1190 except ImportError:
1194 _re2 = False
1191 _re2 = False
1195
1192
1196 def compile(self, pat, flags=0):
1193 def compile(self, pat, flags=0):
1197 '''Compile a regular expression, using re2 if possible
1194 '''Compile a regular expression, using re2 if possible
1198
1195
1199 For best performance, use only re2-compatible regexp features. The
1196 For best performance, use only re2-compatible regexp features. The
1200 only flags from the re module that are re2-compatible are
1197 only flags from the re module that are re2-compatible are
1201 IGNORECASE and MULTILINE.'''
1198 IGNORECASE and MULTILINE.'''
1202 if _re2 is None:
1199 if _re2 is None:
1203 self._checkre2()
1200 self._checkre2()
1204 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1201 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1205 if flags & remod.IGNORECASE:
1202 if flags & remod.IGNORECASE:
1206 pat = '(?i)' + pat
1203 pat = '(?i)' + pat
1207 if flags & remod.MULTILINE:
1204 if flags & remod.MULTILINE:
1208 pat = '(?m)' + pat
1205 pat = '(?m)' + pat
1209 try:
1206 try:
1210 return re2.compile(pat)
1207 return re2.compile(pat)
1211 except re2.error:
1208 except re2.error:
1212 pass
1209 pass
1213 return remod.compile(pat, flags)
1210 return remod.compile(pat, flags)
1214
1211
1215 @propertycache
1212 @propertycache
1216 def escape(self):
1213 def escape(self):
1217 '''Return the version of escape corresponding to self.compile.
1214 '''Return the version of escape corresponding to self.compile.
1218
1215
1219 This is imperfect because whether re2 or re is used for a particular
1216 This is imperfect because whether re2 or re is used for a particular
1220 function depends on the flags, etc, but it's the best we can do.
1217 function depends on the flags, etc, but it's the best we can do.
1221 '''
1218 '''
1222 global _re2
1219 global _re2
1223 if _re2 is None:
1220 if _re2 is None:
1224 self._checkre2()
1221 self._checkre2()
1225 if _re2:
1222 if _re2:
1226 return re2.escape
1223 return re2.escape
1227 else:
1224 else:
1228 return remod.escape
1225 return remod.escape
1229
1226
1230 re = _re()
1227 re = _re()
1231
1228
1232 _fspathcache = {}
1229 _fspathcache = {}
1233 def fspath(name, root):
1230 def fspath(name, root):
1234 '''Get name in the case stored in the filesystem
1231 '''Get name in the case stored in the filesystem
1235
1232
1236 The name should be relative to root, and be normcase-ed for efficiency.
1233 The name should be relative to root, and be normcase-ed for efficiency.
1237
1234
1238 Note that this function is unnecessary, and should not be
1235 Note that this function is unnecessary, and should not be
1239 called, for case-sensitive filesystems (simply because it's expensive).
1236 called, for case-sensitive filesystems (simply because it's expensive).
1240
1237
1241 The root should be normcase-ed, too.
1238 The root should be normcase-ed, too.
1242 '''
1239 '''
1243 def _makefspathcacheentry(dir):
1240 def _makefspathcacheentry(dir):
1244 return dict((normcase(n), n) for n in os.listdir(dir))
1241 return dict((normcase(n), n) for n in os.listdir(dir))
1245
1242
1246 seps = os.sep
1243 seps = os.sep
1247 if os.altsep:
1244 if os.altsep:
1248 seps = seps + os.altsep
1245 seps = seps + os.altsep
1249 # Protect backslashes. This gets silly very quickly.
1246 # Protect backslashes. This gets silly very quickly.
1250 seps.replace('\\','\\\\')
1247 seps.replace('\\','\\\\')
1251 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1248 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1252 dir = os.path.normpath(root)
1249 dir = os.path.normpath(root)
1253 result = []
1250 result = []
1254 for part, sep in pattern.findall(name):
1251 for part, sep in pattern.findall(name):
1255 if sep:
1252 if sep:
1256 result.append(sep)
1253 result.append(sep)
1257 continue
1254 continue
1258
1255
1259 if dir not in _fspathcache:
1256 if dir not in _fspathcache:
1260 _fspathcache[dir] = _makefspathcacheentry(dir)
1257 _fspathcache[dir] = _makefspathcacheentry(dir)
1261 contents = _fspathcache[dir]
1258 contents = _fspathcache[dir]
1262
1259
1263 found = contents.get(part)
1260 found = contents.get(part)
1264 if not found:
1261 if not found:
1265 # retry "once per directory" per "dirstate.walk" which
1262 # retry "once per directory" per "dirstate.walk" which
1266 # may take place for each patches of "hg qpush", for example
1263 # may take place for each patches of "hg qpush", for example
1267 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1264 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1268 found = contents.get(part)
1265 found = contents.get(part)
1269
1266
1270 result.append(found or part)
1267 result.append(found or part)
1271 dir = os.path.join(dir, part)
1268 dir = os.path.join(dir, part)
1272
1269
1273 return ''.join(result)
1270 return ''.join(result)
1274
1271
1275 def checknlink(testfile):
1272 def checknlink(testfile):
1276 '''check whether hardlink count reporting works properly'''
1273 '''check whether hardlink count reporting works properly'''
1277
1274
1278 # testfile may be open, so we need a separate file for checking to
1275 # testfile may be open, so we need a separate file for checking to
1279 # work around issue2543 (or testfile may get lost on Samba shares)
1276 # work around issue2543 (or testfile may get lost on Samba shares)
1280 f1 = testfile + ".hgtmp1"
1277 f1 = testfile + ".hgtmp1"
1281 if os.path.lexists(f1):
1278 if os.path.lexists(f1):
1282 return False
1279 return False
1283 try:
1280 try:
1284 posixfile(f1, 'w').close()
1281 posixfile(f1, 'w').close()
1285 except IOError:
1282 except IOError:
1286 return False
1283 return False
1287
1284
1288 f2 = testfile + ".hgtmp2"
1285 f2 = testfile + ".hgtmp2"
1289 fd = None
1286 fd = None
1290 try:
1287 try:
1291 oslink(f1, f2)
1288 oslink(f1, f2)
1292 # nlinks() may behave differently for files on Windows shares if
1289 # nlinks() may behave differently for files on Windows shares if
1293 # the file is open.
1290 # the file is open.
1294 fd = posixfile(f2)
1291 fd = posixfile(f2)
1295 return nlinks(f2) > 1
1292 return nlinks(f2) > 1
1296 except OSError:
1293 except OSError:
1297 return False
1294 return False
1298 finally:
1295 finally:
1299 if fd is not None:
1296 if fd is not None:
1300 fd.close()
1297 fd.close()
1301 for f in (f1, f2):
1298 for f in (f1, f2):
1302 try:
1299 try:
1303 os.unlink(f)
1300 os.unlink(f)
1304 except OSError:
1301 except OSError:
1305 pass
1302 pass
1306
1303
1307 def endswithsep(path):
1304 def endswithsep(path):
1308 '''Check path ends with os.sep or os.altsep.'''
1305 '''Check path ends with os.sep or os.altsep.'''
1309 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1306 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1310
1307
1311 def splitpath(path):
1308 def splitpath(path):
1312 '''Split path by os.sep.
1309 '''Split path by os.sep.
1313 Note that this function does not use os.altsep because this is
1310 Note that this function does not use os.altsep because this is
1314 an alternative of simple "xxx.split(os.sep)".
1311 an alternative of simple "xxx.split(os.sep)".
1315 It is recommended to use os.path.normpath() before using this
1312 It is recommended to use os.path.normpath() before using this
1316 function if need.'''
1313 function if need.'''
1317 return path.split(os.sep)
1314 return path.split(os.sep)
1318
1315
1319 def gui():
1316 def gui():
1320 '''Are we running in a GUI?'''
1317 '''Are we running in a GUI?'''
1321 if sys.platform == 'darwin':
1318 if sys.platform == 'darwin':
1322 if 'SSH_CONNECTION' in os.environ:
1319 if 'SSH_CONNECTION' in os.environ:
1323 # handle SSH access to a box where the user is logged in
1320 # handle SSH access to a box where the user is logged in
1324 return False
1321 return False
1325 elif getattr(osutil, 'isgui', None):
1322 elif getattr(osutil, 'isgui', None):
1326 # check if a CoreGraphics session is available
1323 # check if a CoreGraphics session is available
1327 return osutil.isgui()
1324 return osutil.isgui()
1328 else:
1325 else:
1329 # pure build; use a safe default
1326 # pure build; use a safe default
1330 return True
1327 return True
1331 else:
1328 else:
1332 return os.name == "nt" or os.environ.get("DISPLAY")
1329 return os.name == "nt" or os.environ.get("DISPLAY")
1333
1330
1334 def mktempcopy(name, emptyok=False, createmode=None):
1331 def mktempcopy(name, emptyok=False, createmode=None):
1335 """Create a temporary file with the same contents from name
1332 """Create a temporary file with the same contents from name
1336
1333
1337 The permission bits are copied from the original file.
1334 The permission bits are copied from the original file.
1338
1335
1339 If the temporary file is going to be truncated immediately, you
1336 If the temporary file is going to be truncated immediately, you
1340 can use emptyok=True as an optimization.
1337 can use emptyok=True as an optimization.
1341
1338
1342 Returns the name of the temporary file.
1339 Returns the name of the temporary file.
1343 """
1340 """
1344 d, fn = os.path.split(name)
1341 d, fn = os.path.split(name)
1345 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1342 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1346 os.close(fd)
1343 os.close(fd)
1347 # Temporary files are created with mode 0600, which is usually not
1344 # Temporary files are created with mode 0600, which is usually not
1348 # what we want. If the original file already exists, just copy
1345 # what we want. If the original file already exists, just copy
1349 # its mode. Otherwise, manually obey umask.
1346 # its mode. Otherwise, manually obey umask.
1350 copymode(name, temp, createmode)
1347 copymode(name, temp, createmode)
1351 if emptyok:
1348 if emptyok:
1352 return temp
1349 return temp
1353 try:
1350 try:
1354 try:
1351 try:
1355 ifp = posixfile(name, "rb")
1352 ifp = posixfile(name, "rb")
1356 except IOError as inst:
1353 except IOError as inst:
1357 if inst.errno == errno.ENOENT:
1354 if inst.errno == errno.ENOENT:
1358 return temp
1355 return temp
1359 if not getattr(inst, 'filename', None):
1356 if not getattr(inst, 'filename', None):
1360 inst.filename = name
1357 inst.filename = name
1361 raise
1358 raise
1362 ofp = posixfile(temp, "wb")
1359 ofp = posixfile(temp, "wb")
1363 for chunk in filechunkiter(ifp):
1360 for chunk in filechunkiter(ifp):
1364 ofp.write(chunk)
1361 ofp.write(chunk)
1365 ifp.close()
1362 ifp.close()
1366 ofp.close()
1363 ofp.close()
1367 except: # re-raises
1364 except: # re-raises
1368 try: os.unlink(temp)
1365 try: os.unlink(temp)
1369 except OSError: pass
1366 except OSError: pass
1370 raise
1367 raise
1371 return temp
1368 return temp
1372
1369
1373 class atomictempfile(object):
1370 class atomictempfile(object):
1374 '''writable file object that atomically updates a file
1371 '''writable file object that atomically updates a file
1375
1372
1376 All writes will go to a temporary copy of the original file. Call
1373 All writes will go to a temporary copy of the original file. Call
1377 close() when you are done writing, and atomictempfile will rename
1374 close() when you are done writing, and atomictempfile will rename
1378 the temporary copy to the original name, making the changes
1375 the temporary copy to the original name, making the changes
1379 visible. If the object is destroyed without being closed, all your
1376 visible. If the object is destroyed without being closed, all your
1380 writes are discarded.
1377 writes are discarded.
1381 '''
1378 '''
1382 def __init__(self, name, mode='w+b', createmode=None):
1379 def __init__(self, name, mode='w+b', createmode=None):
1383 self.__name = name # permanent name
1380 self.__name = name # permanent name
1384 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1381 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1385 createmode=createmode)
1382 createmode=createmode)
1386 self._fp = posixfile(self._tempname, mode)
1383 self._fp = posixfile(self._tempname, mode)
1387
1384
1388 # delegated methods
1385 # delegated methods
1389 self.write = self._fp.write
1386 self.write = self._fp.write
1390 self.seek = self._fp.seek
1387 self.seek = self._fp.seek
1391 self.tell = self._fp.tell
1388 self.tell = self._fp.tell
1392 self.fileno = self._fp.fileno
1389 self.fileno = self._fp.fileno
1393
1390
1394 def close(self):
1391 def close(self):
1395 if not self._fp.closed:
1392 if not self._fp.closed:
1396 self._fp.close()
1393 self._fp.close()
1397 rename(self._tempname, localpath(self.__name))
1394 rename(self._tempname, localpath(self.__name))
1398
1395
1399 def discard(self):
1396 def discard(self):
1400 if not self._fp.closed:
1397 if not self._fp.closed:
1401 try:
1398 try:
1402 os.unlink(self._tempname)
1399 os.unlink(self._tempname)
1403 except OSError:
1400 except OSError:
1404 pass
1401 pass
1405 self._fp.close()
1402 self._fp.close()
1406
1403
1407 def __del__(self):
1404 def __del__(self):
1408 if safehasattr(self, '_fp'): # constructor actually did something
1405 if safehasattr(self, '_fp'): # constructor actually did something
1409 self.discard()
1406 self.discard()
1410
1407
1411 def makedirs(name, mode=None, notindexed=False):
1408 def makedirs(name, mode=None, notindexed=False):
1412 """recursive directory creation with parent mode inheritance"""
1409 """recursive directory creation with parent mode inheritance"""
1413 try:
1410 try:
1414 makedir(name, notindexed)
1411 makedir(name, notindexed)
1415 except OSError as err:
1412 except OSError as err:
1416 if err.errno == errno.EEXIST:
1413 if err.errno == errno.EEXIST:
1417 return
1414 return
1418 if err.errno != errno.ENOENT or not name:
1415 if err.errno != errno.ENOENT or not name:
1419 raise
1416 raise
1420 parent = os.path.dirname(os.path.abspath(name))
1417 parent = os.path.dirname(os.path.abspath(name))
1421 if parent == name:
1418 if parent == name:
1422 raise
1419 raise
1423 makedirs(parent, mode, notindexed)
1420 makedirs(parent, mode, notindexed)
1424 makedir(name, notindexed)
1421 makedir(name, notindexed)
1425 if mode is not None:
1422 if mode is not None:
1426 os.chmod(name, mode)
1423 os.chmod(name, mode)
1427
1424
1428 def ensuredirs(name, mode=None, notindexed=False):
1425 def ensuredirs(name, mode=None, notindexed=False):
1429 """race-safe recursive directory creation
1426 """race-safe recursive directory creation
1430
1427
1431 Newly created directories are marked as "not to be indexed by
1428 Newly created directories are marked as "not to be indexed by
1432 the content indexing service", if ``notindexed`` is specified
1429 the content indexing service", if ``notindexed`` is specified
1433 for "write" mode access.
1430 for "write" mode access.
1434 """
1431 """
1435 if os.path.isdir(name):
1432 if os.path.isdir(name):
1436 return
1433 return
1437 parent = os.path.dirname(os.path.abspath(name))
1434 parent = os.path.dirname(os.path.abspath(name))
1438 if parent != name:
1435 if parent != name:
1439 ensuredirs(parent, mode, notindexed)
1436 ensuredirs(parent, mode, notindexed)
1440 try:
1437 try:
1441 makedir(name, notindexed)
1438 makedir(name, notindexed)
1442 except OSError as err:
1439 except OSError as err:
1443 if err.errno == errno.EEXIST and os.path.isdir(name):
1440 if err.errno == errno.EEXIST and os.path.isdir(name):
1444 # someone else seems to have won a directory creation race
1441 # someone else seems to have won a directory creation race
1445 return
1442 return
1446 raise
1443 raise
1447 if mode is not None:
1444 if mode is not None:
1448 os.chmod(name, mode)
1445 os.chmod(name, mode)
1449
1446
1450 def readfile(path):
1447 def readfile(path):
1451 fp = open(path, 'rb')
1448 fp = open(path, 'rb')
1452 try:
1449 try:
1453 return fp.read()
1450 return fp.read()
1454 finally:
1451 finally:
1455 fp.close()
1452 fp.close()
1456
1453
1457 def writefile(path, text):
1454 def writefile(path, text):
1458 fp = open(path, 'wb')
1455 fp = open(path, 'wb')
1459 try:
1456 try:
1460 fp.write(text)
1457 fp.write(text)
1461 finally:
1458 finally:
1462 fp.close()
1459 fp.close()
1463
1460
1464 def appendfile(path, text):
1461 def appendfile(path, text):
1465 fp = open(path, 'ab')
1462 fp = open(path, 'ab')
1466 try:
1463 try:
1467 fp.write(text)
1464 fp.write(text)
1468 finally:
1465 finally:
1469 fp.close()
1466 fp.close()
1470
1467
1471 class chunkbuffer(object):
1468 class chunkbuffer(object):
1472 """Allow arbitrary sized chunks of data to be efficiently read from an
1469 """Allow arbitrary sized chunks of data to be efficiently read from an
1473 iterator over chunks of arbitrary size."""
1470 iterator over chunks of arbitrary size."""
1474
1471
1475 def __init__(self, in_iter):
1472 def __init__(self, in_iter):
1476 """in_iter is the iterator that's iterating over the input chunks.
1473 """in_iter is the iterator that's iterating over the input chunks.
1477 targetsize is how big a buffer to try to maintain."""
1474 targetsize is how big a buffer to try to maintain."""
1478 def splitbig(chunks):
1475 def splitbig(chunks):
1479 for chunk in chunks:
1476 for chunk in chunks:
1480 if len(chunk) > 2**20:
1477 if len(chunk) > 2**20:
1481 pos = 0
1478 pos = 0
1482 while pos < len(chunk):
1479 while pos < len(chunk):
1483 end = pos + 2 ** 18
1480 end = pos + 2 ** 18
1484 yield chunk[pos:end]
1481 yield chunk[pos:end]
1485 pos = end
1482 pos = end
1486 else:
1483 else:
1487 yield chunk
1484 yield chunk
1488 self.iter = splitbig(in_iter)
1485 self.iter = splitbig(in_iter)
1489 self._queue = collections.deque()
1486 self._queue = collections.deque()
1490 self._chunkoffset = 0
1487 self._chunkoffset = 0
1491
1488
1492 def read(self, l=None):
1489 def read(self, l=None):
1493 """Read L bytes of data from the iterator of chunks of data.
1490 """Read L bytes of data from the iterator of chunks of data.
1494 Returns less than L bytes if the iterator runs dry.
1491 Returns less than L bytes if the iterator runs dry.
1495
1492
1496 If size parameter is omitted, read everything"""
1493 If size parameter is omitted, read everything"""
1497 if l is None:
1494 if l is None:
1498 return ''.join(self.iter)
1495 return ''.join(self.iter)
1499
1496
1500 left = l
1497 left = l
1501 buf = []
1498 buf = []
1502 queue = self._queue
1499 queue = self._queue
1503 while left > 0:
1500 while left > 0:
1504 # refill the queue
1501 # refill the queue
1505 if not queue:
1502 if not queue:
1506 target = 2**18
1503 target = 2**18
1507 for chunk in self.iter:
1504 for chunk in self.iter:
1508 queue.append(chunk)
1505 queue.append(chunk)
1509 target -= len(chunk)
1506 target -= len(chunk)
1510 if target <= 0:
1507 if target <= 0:
1511 break
1508 break
1512 if not queue:
1509 if not queue:
1513 break
1510 break
1514
1511
1515 # The easy way to do this would be to queue.popleft(), modify the
1512 # The easy way to do this would be to queue.popleft(), modify the
1516 # chunk (if necessary), then queue.appendleft(). However, for cases
1513 # chunk (if necessary), then queue.appendleft(). However, for cases
1517 # where we read partial chunk content, this incurs 2 dequeue
1514 # where we read partial chunk content, this incurs 2 dequeue
1518 # mutations and creates a new str for the remaining chunk in the
1515 # mutations and creates a new str for the remaining chunk in the
1519 # queue. Our code below avoids this overhead.
1516 # queue. Our code below avoids this overhead.
1520
1517
1521 chunk = queue[0]
1518 chunk = queue[0]
1522 chunkl = len(chunk)
1519 chunkl = len(chunk)
1523 offset = self._chunkoffset
1520 offset = self._chunkoffset
1524
1521
1525 # Use full chunk.
1522 # Use full chunk.
1526 if offset == 0 and left >= chunkl:
1523 if offset == 0 and left >= chunkl:
1527 left -= chunkl
1524 left -= chunkl
1528 queue.popleft()
1525 queue.popleft()
1529 buf.append(chunk)
1526 buf.append(chunk)
1530 # self._chunkoffset remains at 0.
1527 # self._chunkoffset remains at 0.
1531 continue
1528 continue
1532
1529
1533 chunkremaining = chunkl - offset
1530 chunkremaining = chunkl - offset
1534
1531
1535 # Use all of unconsumed part of chunk.
1532 # Use all of unconsumed part of chunk.
1536 if left >= chunkremaining:
1533 if left >= chunkremaining:
1537 left -= chunkremaining
1534 left -= chunkremaining
1538 queue.popleft()
1535 queue.popleft()
1539 # offset == 0 is enabled by block above, so this won't merely
1536 # offset == 0 is enabled by block above, so this won't merely
1540 # copy via ``chunk[0:]``.
1537 # copy via ``chunk[0:]``.
1541 buf.append(chunk[offset:])
1538 buf.append(chunk[offset:])
1542 self._chunkoffset = 0
1539 self._chunkoffset = 0
1543
1540
1544 # Partial chunk needed.
1541 # Partial chunk needed.
1545 else:
1542 else:
1546 buf.append(chunk[offset:offset + left])
1543 buf.append(chunk[offset:offset + left])
1547 self._chunkoffset += left
1544 self._chunkoffset += left
1548 left -= chunkremaining
1545 left -= chunkremaining
1549
1546
1550 return ''.join(buf)
1547 return ''.join(buf)
1551
1548
1552 def filechunkiter(f, size=65536, limit=None):
1549 def filechunkiter(f, size=65536, limit=None):
1553 """Create a generator that produces the data in the file size
1550 """Create a generator that produces the data in the file size
1554 (default 65536) bytes at a time, up to optional limit (default is
1551 (default 65536) bytes at a time, up to optional limit (default is
1555 to read all data). Chunks may be less than size bytes if the
1552 to read all data). Chunks may be less than size bytes if the
1556 chunk is the last chunk in the file, or the file is a socket or
1553 chunk is the last chunk in the file, or the file is a socket or
1557 some other type of file that sometimes reads less data than is
1554 some other type of file that sometimes reads less data than is
1558 requested."""
1555 requested."""
1559 assert size >= 0
1556 assert size >= 0
1560 assert limit is None or limit >= 0
1557 assert limit is None or limit >= 0
1561 while True:
1558 while True:
1562 if limit is None:
1559 if limit is None:
1563 nbytes = size
1560 nbytes = size
1564 else:
1561 else:
1565 nbytes = min(limit, size)
1562 nbytes = min(limit, size)
1566 s = nbytes and f.read(nbytes)
1563 s = nbytes and f.read(nbytes)
1567 if not s:
1564 if not s:
1568 break
1565 break
1569 if limit:
1566 if limit:
1570 limit -= len(s)
1567 limit -= len(s)
1571 yield s
1568 yield s
1572
1569
1573 def makedate(timestamp=None):
1570 def makedate(timestamp=None):
1574 '''Return a unix timestamp (or the current time) as a (unixtime,
1571 '''Return a unix timestamp (or the current time) as a (unixtime,
1575 offset) tuple based off the local timezone.'''
1572 offset) tuple based off the local timezone.'''
1576 if timestamp is None:
1573 if timestamp is None:
1577 timestamp = time.time()
1574 timestamp = time.time()
1578 if timestamp < 0:
1575 if timestamp < 0:
1579 hint = _("check your clock")
1576 hint = _("check your clock")
1580 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1577 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1581 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1578 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1582 datetime.datetime.fromtimestamp(timestamp))
1579 datetime.datetime.fromtimestamp(timestamp))
1583 tz = delta.days * 86400 + delta.seconds
1580 tz = delta.days * 86400 + delta.seconds
1584 return timestamp, tz
1581 return timestamp, tz
1585
1582
1586 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1583 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1587 """represent a (unixtime, offset) tuple as a localized time.
1584 """represent a (unixtime, offset) tuple as a localized time.
1588 unixtime is seconds since the epoch, and offset is the time zone's
1585 unixtime is seconds since the epoch, and offset is the time zone's
1589 number of seconds away from UTC. if timezone is false, do not
1586 number of seconds away from UTC. if timezone is false, do not
1590 append time zone to string."""
1587 append time zone to string."""
1591 t, tz = date or makedate()
1588 t, tz = date or makedate()
1592 if t < 0:
1589 if t < 0:
1593 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1590 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1594 tz = 0
1591 tz = 0
1595 if "%1" in format or "%2" in format or "%z" in format:
1592 if "%1" in format or "%2" in format or "%z" in format:
1596 sign = (tz > 0) and "-" or "+"
1593 sign = (tz > 0) and "-" or "+"
1597 minutes = abs(tz) // 60
1594 minutes = abs(tz) // 60
1598 q, r = divmod(minutes, 60)
1595 q, r = divmod(minutes, 60)
1599 format = format.replace("%z", "%1%2")
1596 format = format.replace("%z", "%1%2")
1600 format = format.replace("%1", "%c%02d" % (sign, q))
1597 format = format.replace("%1", "%c%02d" % (sign, q))
1601 format = format.replace("%2", "%02d" % r)
1598 format = format.replace("%2", "%02d" % r)
1602 try:
1599 try:
1603 t = time.gmtime(float(t) - tz)
1600 t = time.gmtime(float(t) - tz)
1604 except ValueError:
1601 except ValueError:
1605 # time was out of range
1602 # time was out of range
1606 t = time.gmtime(sys.maxint)
1603 t = time.gmtime(sys.maxint)
1607 s = time.strftime(format, t)
1604 s = time.strftime(format, t)
1608 return s
1605 return s
1609
1606
1610 def shortdate(date=None):
1607 def shortdate(date=None):
1611 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1608 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1612 return datestr(date, format='%Y-%m-%d')
1609 return datestr(date, format='%Y-%m-%d')
1613
1610
1614 def parsetimezone(tz):
1611 def parsetimezone(tz):
1615 """parse a timezone string and return an offset integer"""
1612 """parse a timezone string and return an offset integer"""
1616 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1613 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1617 sign = (tz[0] == "+") and 1 or -1
1614 sign = (tz[0] == "+") and 1 or -1
1618 hours = int(tz[1:3])
1615 hours = int(tz[1:3])
1619 minutes = int(tz[3:5])
1616 minutes = int(tz[3:5])
1620 return -sign * (hours * 60 + minutes) * 60
1617 return -sign * (hours * 60 + minutes) * 60
1621 if tz == "GMT" or tz == "UTC":
1618 if tz == "GMT" or tz == "UTC":
1622 return 0
1619 return 0
1623 return None
1620 return None
1624
1621
1625 def strdate(string, format, defaults=[]):
1622 def strdate(string, format, defaults=[]):
1626 """parse a localized time string and return a (unixtime, offset) tuple.
1623 """parse a localized time string and return a (unixtime, offset) tuple.
1627 if the string cannot be parsed, ValueError is raised."""
1624 if the string cannot be parsed, ValueError is raised."""
1628 # NOTE: unixtime = localunixtime + offset
1625 # NOTE: unixtime = localunixtime + offset
1629 offset, date = parsetimezone(string.split()[-1]), string
1626 offset, date = parsetimezone(string.split()[-1]), string
1630 if offset is not None:
1627 if offset is not None:
1631 date = " ".join(string.split()[:-1])
1628 date = " ".join(string.split()[:-1])
1632
1629
1633 # add missing elements from defaults
1630 # add missing elements from defaults
1634 usenow = False # default to using biased defaults
1631 usenow = False # default to using biased defaults
1635 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1632 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1636 found = [True for p in part if ("%"+p) in format]
1633 found = [True for p in part if ("%"+p) in format]
1637 if not found:
1634 if not found:
1638 date += "@" + defaults[part][usenow]
1635 date += "@" + defaults[part][usenow]
1639 format += "@%" + part[0]
1636 format += "@%" + part[0]
1640 else:
1637 else:
1641 # We've found a specific time element, less specific time
1638 # We've found a specific time element, less specific time
1642 # elements are relative to today
1639 # elements are relative to today
1643 usenow = True
1640 usenow = True
1644
1641
1645 timetuple = time.strptime(date, format)
1642 timetuple = time.strptime(date, format)
1646 localunixtime = int(calendar.timegm(timetuple))
1643 localunixtime = int(calendar.timegm(timetuple))
1647 if offset is None:
1644 if offset is None:
1648 # local timezone
1645 # local timezone
1649 unixtime = int(time.mktime(timetuple))
1646 unixtime = int(time.mktime(timetuple))
1650 offset = unixtime - localunixtime
1647 offset = unixtime - localunixtime
1651 else:
1648 else:
1652 unixtime = localunixtime + offset
1649 unixtime = localunixtime + offset
1653 return unixtime, offset
1650 return unixtime, offset
1654
1651
1655 def parsedate(date, formats=None, bias=None):
1652 def parsedate(date, formats=None, bias=None):
1656 """parse a localized date/time and return a (unixtime, offset) tuple.
1653 """parse a localized date/time and return a (unixtime, offset) tuple.
1657
1654
1658 The date may be a "unixtime offset" string or in one of the specified
1655 The date may be a "unixtime offset" string or in one of the specified
1659 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1656 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1660
1657
1661 >>> parsedate(' today ') == parsedate(\
1658 >>> parsedate(' today ') == parsedate(\
1662 datetime.date.today().strftime('%b %d'))
1659 datetime.date.today().strftime('%b %d'))
1663 True
1660 True
1664 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1661 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1665 datetime.timedelta(days=1)\
1662 datetime.timedelta(days=1)\
1666 ).strftime('%b %d'))
1663 ).strftime('%b %d'))
1667 True
1664 True
1668 >>> now, tz = makedate()
1665 >>> now, tz = makedate()
1669 >>> strnow, strtz = parsedate('now')
1666 >>> strnow, strtz = parsedate('now')
1670 >>> (strnow - now) < 1
1667 >>> (strnow - now) < 1
1671 True
1668 True
1672 >>> tz == strtz
1669 >>> tz == strtz
1673 True
1670 True
1674 """
1671 """
1675 if bias is None:
1672 if bias is None:
1676 bias = {}
1673 bias = {}
1677 if not date:
1674 if not date:
1678 return 0, 0
1675 return 0, 0
1679 if isinstance(date, tuple) and len(date) == 2:
1676 if isinstance(date, tuple) and len(date) == 2:
1680 return date
1677 return date
1681 if not formats:
1678 if not formats:
1682 formats = defaultdateformats
1679 formats = defaultdateformats
1683 date = date.strip()
1680 date = date.strip()
1684
1681
1685 if date == 'now' or date == _('now'):
1682 if date == 'now' or date == _('now'):
1686 return makedate()
1683 return makedate()
1687 if date == 'today' or date == _('today'):
1684 if date == 'today' or date == _('today'):
1688 date = datetime.date.today().strftime('%b %d')
1685 date = datetime.date.today().strftime('%b %d')
1689 elif date == 'yesterday' or date == _('yesterday'):
1686 elif date == 'yesterday' or date == _('yesterday'):
1690 date = (datetime.date.today() -
1687 date = (datetime.date.today() -
1691 datetime.timedelta(days=1)).strftime('%b %d')
1688 datetime.timedelta(days=1)).strftime('%b %d')
1692
1689
1693 try:
1690 try:
1694 when, offset = map(int, date.split(' '))
1691 when, offset = map(int, date.split(' '))
1695 except ValueError:
1692 except ValueError:
1696 # fill out defaults
1693 # fill out defaults
1697 now = makedate()
1694 now = makedate()
1698 defaults = {}
1695 defaults = {}
1699 for part in ("d", "mb", "yY", "HI", "M", "S"):
1696 for part in ("d", "mb", "yY", "HI", "M", "S"):
1700 # this piece is for rounding the specific end of unknowns
1697 # this piece is for rounding the specific end of unknowns
1701 b = bias.get(part)
1698 b = bias.get(part)
1702 if b is None:
1699 if b is None:
1703 if part[0] in "HMS":
1700 if part[0] in "HMS":
1704 b = "00"
1701 b = "00"
1705 else:
1702 else:
1706 b = "0"
1703 b = "0"
1707
1704
1708 # this piece is for matching the generic end to today's date
1705 # this piece is for matching the generic end to today's date
1709 n = datestr(now, "%" + part[0])
1706 n = datestr(now, "%" + part[0])
1710
1707
1711 defaults[part] = (b, n)
1708 defaults[part] = (b, n)
1712
1709
1713 for format in formats:
1710 for format in formats:
1714 try:
1711 try:
1715 when, offset = strdate(date, format, defaults)
1712 when, offset = strdate(date, format, defaults)
1716 except (ValueError, OverflowError):
1713 except (ValueError, OverflowError):
1717 pass
1714 pass
1718 else:
1715 else:
1719 break
1716 break
1720 else:
1717 else:
1721 raise Abort(_('invalid date: %r') % date)
1718 raise Abort(_('invalid date: %r') % date)
1722 # validate explicit (probably user-specified) date and
1719 # validate explicit (probably user-specified) date and
1723 # time zone offset. values must fit in signed 32 bits for
1720 # time zone offset. values must fit in signed 32 bits for
1724 # current 32-bit linux runtimes. timezones go from UTC-12
1721 # current 32-bit linux runtimes. timezones go from UTC-12
1725 # to UTC+14
1722 # to UTC+14
1726 if abs(when) > 0x7fffffff:
1723 if abs(when) > 0x7fffffff:
1727 raise Abort(_('date exceeds 32 bits: %d') % when)
1724 raise Abort(_('date exceeds 32 bits: %d') % when)
1728 if when < 0:
1725 if when < 0:
1729 raise Abort(_('negative date value: %d') % when)
1726 raise Abort(_('negative date value: %d') % when)
1730 if offset < -50400 or offset > 43200:
1727 if offset < -50400 or offset > 43200:
1731 raise Abort(_('impossible time zone offset: %d') % offset)
1728 raise Abort(_('impossible time zone offset: %d') % offset)
1732 return when, offset
1729 return when, offset
1733
1730
1734 def matchdate(date):
1731 def matchdate(date):
1735 """Return a function that matches a given date match specifier
1732 """Return a function that matches a given date match specifier
1736
1733
1737 Formats include:
1734 Formats include:
1738
1735
1739 '{date}' match a given date to the accuracy provided
1736 '{date}' match a given date to the accuracy provided
1740
1737
1741 '<{date}' on or before a given date
1738 '<{date}' on or before a given date
1742
1739
1743 '>{date}' on or after a given date
1740 '>{date}' on or after a given date
1744
1741
1745 >>> p1 = parsedate("10:29:59")
1742 >>> p1 = parsedate("10:29:59")
1746 >>> p2 = parsedate("10:30:00")
1743 >>> p2 = parsedate("10:30:00")
1747 >>> p3 = parsedate("10:30:59")
1744 >>> p3 = parsedate("10:30:59")
1748 >>> p4 = parsedate("10:31:00")
1745 >>> p4 = parsedate("10:31:00")
1749 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1746 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1750 >>> f = matchdate("10:30")
1747 >>> f = matchdate("10:30")
1751 >>> f(p1[0])
1748 >>> f(p1[0])
1752 False
1749 False
1753 >>> f(p2[0])
1750 >>> f(p2[0])
1754 True
1751 True
1755 >>> f(p3[0])
1752 >>> f(p3[0])
1756 True
1753 True
1757 >>> f(p4[0])
1754 >>> f(p4[0])
1758 False
1755 False
1759 >>> f(p5[0])
1756 >>> f(p5[0])
1760 False
1757 False
1761 """
1758 """
1762
1759
1763 def lower(date):
1760 def lower(date):
1764 d = {'mb': "1", 'd': "1"}
1761 d = {'mb': "1", 'd': "1"}
1765 return parsedate(date, extendeddateformats, d)[0]
1762 return parsedate(date, extendeddateformats, d)[0]
1766
1763
1767 def upper(date):
1764 def upper(date):
1768 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1765 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1769 for days in ("31", "30", "29"):
1766 for days in ("31", "30", "29"):
1770 try:
1767 try:
1771 d["d"] = days
1768 d["d"] = days
1772 return parsedate(date, extendeddateformats, d)[0]
1769 return parsedate(date, extendeddateformats, d)[0]
1773 except Abort:
1770 except Abort:
1774 pass
1771 pass
1775 d["d"] = "28"
1772 d["d"] = "28"
1776 return parsedate(date, extendeddateformats, d)[0]
1773 return parsedate(date, extendeddateformats, d)[0]
1777
1774
1778 date = date.strip()
1775 date = date.strip()
1779
1776
1780 if not date:
1777 if not date:
1781 raise Abort(_("dates cannot consist entirely of whitespace"))
1778 raise Abort(_("dates cannot consist entirely of whitespace"))
1782 elif date[0] == "<":
1779 elif date[0] == "<":
1783 if not date[1:]:
1780 if not date[1:]:
1784 raise Abort(_("invalid day spec, use '<DATE'"))
1781 raise Abort(_("invalid day spec, use '<DATE'"))
1785 when = upper(date[1:])
1782 when = upper(date[1:])
1786 return lambda x: x <= when
1783 return lambda x: x <= when
1787 elif date[0] == ">":
1784 elif date[0] == ">":
1788 if not date[1:]:
1785 if not date[1:]:
1789 raise Abort(_("invalid day spec, use '>DATE'"))
1786 raise Abort(_("invalid day spec, use '>DATE'"))
1790 when = lower(date[1:])
1787 when = lower(date[1:])
1791 return lambda x: x >= when
1788 return lambda x: x >= when
1792 elif date[0] == "-":
1789 elif date[0] == "-":
1793 try:
1790 try:
1794 days = int(date[1:])
1791 days = int(date[1:])
1795 except ValueError:
1792 except ValueError:
1796 raise Abort(_("invalid day spec: %s") % date[1:])
1793 raise Abort(_("invalid day spec: %s") % date[1:])
1797 if days < 0:
1794 if days < 0:
1798 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1795 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1799 % date[1:])
1796 % date[1:])
1800 when = makedate()[0] - days * 3600 * 24
1797 when = makedate()[0] - days * 3600 * 24
1801 return lambda x: x >= when
1798 return lambda x: x >= when
1802 elif " to " in date:
1799 elif " to " in date:
1803 a, b = date.split(" to ")
1800 a, b = date.split(" to ")
1804 start, stop = lower(a), upper(b)
1801 start, stop = lower(a), upper(b)
1805 return lambda x: x >= start and x <= stop
1802 return lambda x: x >= start and x <= stop
1806 else:
1803 else:
1807 start, stop = lower(date), upper(date)
1804 start, stop = lower(date), upper(date)
1808 return lambda x: x >= start and x <= stop
1805 return lambda x: x >= start and x <= stop
1809
1806
1810 def stringmatcher(pattern):
1807 def stringmatcher(pattern):
1811 """
1808 """
1812 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1809 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1813 returns the matcher name, pattern, and matcher function.
1810 returns the matcher name, pattern, and matcher function.
1814 missing or unknown prefixes are treated as literal matches.
1811 missing or unknown prefixes are treated as literal matches.
1815
1812
1816 helper for tests:
1813 helper for tests:
1817 >>> def test(pattern, *tests):
1814 >>> def test(pattern, *tests):
1818 ... kind, pattern, matcher = stringmatcher(pattern)
1815 ... kind, pattern, matcher = stringmatcher(pattern)
1819 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1816 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1820
1817
1821 exact matching (no prefix):
1818 exact matching (no prefix):
1822 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1819 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1823 ('literal', 'abcdefg', [False, False, True])
1820 ('literal', 'abcdefg', [False, False, True])
1824
1821
1825 regex matching ('re:' prefix)
1822 regex matching ('re:' prefix)
1826 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1823 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1827 ('re', 'a.+b', [False, False, True])
1824 ('re', 'a.+b', [False, False, True])
1828
1825
1829 force exact matches ('literal:' prefix)
1826 force exact matches ('literal:' prefix)
1830 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1827 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1831 ('literal', 're:foobar', [False, True])
1828 ('literal', 're:foobar', [False, True])
1832
1829
1833 unknown prefixes are ignored and treated as literals
1830 unknown prefixes are ignored and treated as literals
1834 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1831 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1835 ('literal', 'foo:bar', [False, False, True])
1832 ('literal', 'foo:bar', [False, False, True])
1836 """
1833 """
1837 if pattern.startswith('re:'):
1834 if pattern.startswith('re:'):
1838 pattern = pattern[3:]
1835 pattern = pattern[3:]
1839 try:
1836 try:
1840 regex = remod.compile(pattern)
1837 regex = remod.compile(pattern)
1841 except remod.error as e:
1838 except remod.error as e:
1842 raise error.ParseError(_('invalid regular expression: %s')
1839 raise error.ParseError(_('invalid regular expression: %s')
1843 % e)
1840 % e)
1844 return 're', pattern, regex.search
1841 return 're', pattern, regex.search
1845 elif pattern.startswith('literal:'):
1842 elif pattern.startswith('literal:'):
1846 pattern = pattern[8:]
1843 pattern = pattern[8:]
1847 return 'literal', pattern, pattern.__eq__
1844 return 'literal', pattern, pattern.__eq__
1848
1845
1849 def shortuser(user):
1846 def shortuser(user):
1850 """Return a short representation of a user name or email address."""
1847 """Return a short representation of a user name or email address."""
1851 f = user.find('@')
1848 f = user.find('@')
1852 if f >= 0:
1849 if f >= 0:
1853 user = user[:f]
1850 user = user[:f]
1854 f = user.find('<')
1851 f = user.find('<')
1855 if f >= 0:
1852 if f >= 0:
1856 user = user[f + 1:]
1853 user = user[f + 1:]
1857 f = user.find(' ')
1854 f = user.find(' ')
1858 if f >= 0:
1855 if f >= 0:
1859 user = user[:f]
1856 user = user[:f]
1860 f = user.find('.')
1857 f = user.find('.')
1861 if f >= 0:
1858 if f >= 0:
1862 user = user[:f]
1859 user = user[:f]
1863 return user
1860 return user
1864
1861
1865 def emailuser(user):
1862 def emailuser(user):
1866 """Return the user portion of an email address."""
1863 """Return the user portion of an email address."""
1867 f = user.find('@')
1864 f = user.find('@')
1868 if f >= 0:
1865 if f >= 0:
1869 user = user[:f]
1866 user = user[:f]
1870 f = user.find('<')
1867 f = user.find('<')
1871 if f >= 0:
1868 if f >= 0:
1872 user = user[f + 1:]
1869 user = user[f + 1:]
1873 return user
1870 return user
1874
1871
1875 def email(author):
1872 def email(author):
1876 '''get email of author.'''
1873 '''get email of author.'''
1877 r = author.find('>')
1874 r = author.find('>')
1878 if r == -1:
1875 if r == -1:
1879 r = None
1876 r = None
1880 return author[author.find('<') + 1:r]
1877 return author[author.find('<') + 1:r]
1881
1878
1882 def ellipsis(text, maxlength=400):
1879 def ellipsis(text, maxlength=400):
1883 """Trim string to at most maxlength (default: 400) columns in display."""
1880 """Trim string to at most maxlength (default: 400) columns in display."""
1884 return encoding.trim(text, maxlength, ellipsis='...')
1881 return encoding.trim(text, maxlength, ellipsis='...')
1885
1882
1886 def unitcountfn(*unittable):
1883 def unitcountfn(*unittable):
1887 '''return a function that renders a readable count of some quantity'''
1884 '''return a function that renders a readable count of some quantity'''
1888
1885
1889 def go(count):
1886 def go(count):
1890 for multiplier, divisor, format in unittable:
1887 for multiplier, divisor, format in unittable:
1891 if count >= divisor * multiplier:
1888 if count >= divisor * multiplier:
1892 return format % (count / float(divisor))
1889 return format % (count / float(divisor))
1893 return unittable[-1][2] % count
1890 return unittable[-1][2] % count
1894
1891
1895 return go
1892 return go
1896
1893
1897 bytecount = unitcountfn(
1894 bytecount = unitcountfn(
1898 (100, 1 << 30, _('%.0f GB')),
1895 (100, 1 << 30, _('%.0f GB')),
1899 (10, 1 << 30, _('%.1f GB')),
1896 (10, 1 << 30, _('%.1f GB')),
1900 (1, 1 << 30, _('%.2f GB')),
1897 (1, 1 << 30, _('%.2f GB')),
1901 (100, 1 << 20, _('%.0f MB')),
1898 (100, 1 << 20, _('%.0f MB')),
1902 (10, 1 << 20, _('%.1f MB')),
1899 (10, 1 << 20, _('%.1f MB')),
1903 (1, 1 << 20, _('%.2f MB')),
1900 (1, 1 << 20, _('%.2f MB')),
1904 (100, 1 << 10, _('%.0f KB')),
1901 (100, 1 << 10, _('%.0f KB')),
1905 (10, 1 << 10, _('%.1f KB')),
1902 (10, 1 << 10, _('%.1f KB')),
1906 (1, 1 << 10, _('%.2f KB')),
1903 (1, 1 << 10, _('%.2f KB')),
1907 (1, 1, _('%.0f bytes')),
1904 (1, 1, _('%.0f bytes')),
1908 )
1905 )
1909
1906
1910 def uirepr(s):
1907 def uirepr(s):
1911 # Avoid double backslash in Windows path repr()
1908 # Avoid double backslash in Windows path repr()
1912 return repr(s).replace('\\\\', '\\')
1909 return repr(s).replace('\\\\', '\\')
1913
1910
1914 # delay import of textwrap
1911 # delay import of textwrap
1915 def MBTextWrapper(**kwargs):
1912 def MBTextWrapper(**kwargs):
1916 class tw(textwrap.TextWrapper):
1913 class tw(textwrap.TextWrapper):
1917 """
1914 """
1918 Extend TextWrapper for width-awareness.
1915 Extend TextWrapper for width-awareness.
1919
1916
1920 Neither number of 'bytes' in any encoding nor 'characters' is
1917 Neither number of 'bytes' in any encoding nor 'characters' is
1921 appropriate to calculate terminal columns for specified string.
1918 appropriate to calculate terminal columns for specified string.
1922
1919
1923 Original TextWrapper implementation uses built-in 'len()' directly,
1920 Original TextWrapper implementation uses built-in 'len()' directly,
1924 so overriding is needed to use width information of each characters.
1921 so overriding is needed to use width information of each characters.
1925
1922
1926 In addition, characters classified into 'ambiguous' width are
1923 In addition, characters classified into 'ambiguous' width are
1927 treated as wide in East Asian area, but as narrow in other.
1924 treated as wide in East Asian area, but as narrow in other.
1928
1925
1929 This requires use decision to determine width of such characters.
1926 This requires use decision to determine width of such characters.
1930 """
1927 """
1931 def _cutdown(self, ucstr, space_left):
1928 def _cutdown(self, ucstr, space_left):
1932 l = 0
1929 l = 0
1933 colwidth = encoding.ucolwidth
1930 colwidth = encoding.ucolwidth
1934 for i in xrange(len(ucstr)):
1931 for i in xrange(len(ucstr)):
1935 l += colwidth(ucstr[i])
1932 l += colwidth(ucstr[i])
1936 if space_left < l:
1933 if space_left < l:
1937 return (ucstr[:i], ucstr[i:])
1934 return (ucstr[:i], ucstr[i:])
1938 return ucstr, ''
1935 return ucstr, ''
1939
1936
1940 # overriding of base class
1937 # overriding of base class
1941 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1938 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1942 space_left = max(width - cur_len, 1)
1939 space_left = max(width - cur_len, 1)
1943
1940
1944 if self.break_long_words:
1941 if self.break_long_words:
1945 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1942 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1946 cur_line.append(cut)
1943 cur_line.append(cut)
1947 reversed_chunks[-1] = res
1944 reversed_chunks[-1] = res
1948 elif not cur_line:
1945 elif not cur_line:
1949 cur_line.append(reversed_chunks.pop())
1946 cur_line.append(reversed_chunks.pop())
1950
1947
1951 # this overriding code is imported from TextWrapper of Python 2.6
1948 # this overriding code is imported from TextWrapper of Python 2.6
1952 # to calculate columns of string by 'encoding.ucolwidth()'
1949 # to calculate columns of string by 'encoding.ucolwidth()'
1953 def _wrap_chunks(self, chunks):
1950 def _wrap_chunks(self, chunks):
1954 colwidth = encoding.ucolwidth
1951 colwidth = encoding.ucolwidth
1955
1952
1956 lines = []
1953 lines = []
1957 if self.width <= 0:
1954 if self.width <= 0:
1958 raise ValueError("invalid width %r (must be > 0)" % self.width)
1955 raise ValueError("invalid width %r (must be > 0)" % self.width)
1959
1956
1960 # Arrange in reverse order so items can be efficiently popped
1957 # Arrange in reverse order so items can be efficiently popped
1961 # from a stack of chucks.
1958 # from a stack of chucks.
1962 chunks.reverse()
1959 chunks.reverse()
1963
1960
1964 while chunks:
1961 while chunks:
1965
1962
1966 # Start the list of chunks that will make up the current line.
1963 # Start the list of chunks that will make up the current line.
1967 # cur_len is just the length of all the chunks in cur_line.
1964 # cur_len is just the length of all the chunks in cur_line.
1968 cur_line = []
1965 cur_line = []
1969 cur_len = 0
1966 cur_len = 0
1970
1967
1971 # Figure out which static string will prefix this line.
1968 # Figure out which static string will prefix this line.
1972 if lines:
1969 if lines:
1973 indent = self.subsequent_indent
1970 indent = self.subsequent_indent
1974 else:
1971 else:
1975 indent = self.initial_indent
1972 indent = self.initial_indent
1976
1973
1977 # Maximum width for this line.
1974 # Maximum width for this line.
1978 width = self.width - len(indent)
1975 width = self.width - len(indent)
1979
1976
1980 # First chunk on line is whitespace -- drop it, unless this
1977 # First chunk on line is whitespace -- drop it, unless this
1981 # is the very beginning of the text (i.e. no lines started yet).
1978 # is the very beginning of the text (i.e. no lines started yet).
1982 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1979 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1983 del chunks[-1]
1980 del chunks[-1]
1984
1981
1985 while chunks:
1982 while chunks:
1986 l = colwidth(chunks[-1])
1983 l = colwidth(chunks[-1])
1987
1984
1988 # Can at least squeeze this chunk onto the current line.
1985 # Can at least squeeze this chunk onto the current line.
1989 if cur_len + l <= width:
1986 if cur_len + l <= width:
1990 cur_line.append(chunks.pop())
1987 cur_line.append(chunks.pop())
1991 cur_len += l
1988 cur_len += l
1992
1989
1993 # Nope, this line is full.
1990 # Nope, this line is full.
1994 else:
1991 else:
1995 break
1992 break
1996
1993
1997 # The current line is full, and the next chunk is too big to
1994 # The current line is full, and the next chunk is too big to
1998 # fit on *any* line (not just this one).
1995 # fit on *any* line (not just this one).
1999 if chunks and colwidth(chunks[-1]) > width:
1996 if chunks and colwidth(chunks[-1]) > width:
2000 self._handle_long_word(chunks, cur_line, cur_len, width)
1997 self._handle_long_word(chunks, cur_line, cur_len, width)
2001
1998
2002 # If the last chunk on this line is all whitespace, drop it.
1999 # If the last chunk on this line is all whitespace, drop it.
2003 if (self.drop_whitespace and
2000 if (self.drop_whitespace and
2004 cur_line and cur_line[-1].strip() == ''):
2001 cur_line and cur_line[-1].strip() == ''):
2005 del cur_line[-1]
2002 del cur_line[-1]
2006
2003
2007 # Convert current line back to a string and store it in list
2004 # Convert current line back to a string and store it in list
2008 # of all lines (return value).
2005 # of all lines (return value).
2009 if cur_line:
2006 if cur_line:
2010 lines.append(indent + ''.join(cur_line))
2007 lines.append(indent + ''.join(cur_line))
2011
2008
2012 return lines
2009 return lines
2013
2010
2014 global MBTextWrapper
2011 global MBTextWrapper
2015 MBTextWrapper = tw
2012 MBTextWrapper = tw
2016 return tw(**kwargs)
2013 return tw(**kwargs)
2017
2014
2018 def wrap(line, width, initindent='', hangindent=''):
2015 def wrap(line, width, initindent='', hangindent=''):
2019 maxindent = max(len(hangindent), len(initindent))
2016 maxindent = max(len(hangindent), len(initindent))
2020 if width <= maxindent:
2017 if width <= maxindent:
2021 # adjust for weird terminal size
2018 # adjust for weird terminal size
2022 width = max(78, maxindent + 1)
2019 width = max(78, maxindent + 1)
2023 line = line.decode(encoding.encoding, encoding.encodingmode)
2020 line = line.decode(encoding.encoding, encoding.encodingmode)
2024 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2021 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2025 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2022 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2026 wrapper = MBTextWrapper(width=width,
2023 wrapper = MBTextWrapper(width=width,
2027 initial_indent=initindent,
2024 initial_indent=initindent,
2028 subsequent_indent=hangindent)
2025 subsequent_indent=hangindent)
2029 return wrapper.fill(line).encode(encoding.encoding)
2026 return wrapper.fill(line).encode(encoding.encoding)
2030
2027
2031 def iterlines(iterator):
2028 def iterlines(iterator):
2032 for chunk in iterator:
2029 for chunk in iterator:
2033 for line in chunk.splitlines():
2030 for line in chunk.splitlines():
2034 yield line
2031 yield line
2035
2032
2036 def expandpath(path):
2033 def expandpath(path):
2037 return os.path.expanduser(os.path.expandvars(path))
2034 return os.path.expanduser(os.path.expandvars(path))
2038
2035
2039 def hgcmd():
2036 def hgcmd():
2040 """Return the command used to execute current hg
2037 """Return the command used to execute current hg
2041
2038
2042 This is different from hgexecutable() because on Windows we want
2039 This is different from hgexecutable() because on Windows we want
2043 to avoid things opening new shell windows like batch files, so we
2040 to avoid things opening new shell windows like batch files, so we
2044 get either the python call or current executable.
2041 get either the python call or current executable.
2045 """
2042 """
2046 if mainfrozen():
2043 if mainfrozen():
2047 if getattr(sys, 'frozen', None) == 'macosx_app':
2044 if getattr(sys, 'frozen', None) == 'macosx_app':
2048 # Env variable set by py2app
2045 # Env variable set by py2app
2049 return [os.environ['EXECUTABLEPATH']]
2046 return [os.environ['EXECUTABLEPATH']]
2050 else:
2047 else:
2051 return [sys.executable]
2048 return [sys.executable]
2052 return gethgcmd()
2049 return gethgcmd()
2053
2050
2054 def rundetached(args, condfn):
2051 def rundetached(args, condfn):
2055 """Execute the argument list in a detached process.
2052 """Execute the argument list in a detached process.
2056
2053
2057 condfn is a callable which is called repeatedly and should return
2054 condfn is a callable which is called repeatedly and should return
2058 True once the child process is known to have started successfully.
2055 True once the child process is known to have started successfully.
2059 At this point, the child process PID is returned. If the child
2056 At this point, the child process PID is returned. If the child
2060 process fails to start or finishes before condfn() evaluates to
2057 process fails to start or finishes before condfn() evaluates to
2061 True, return -1.
2058 True, return -1.
2062 """
2059 """
2063 # Windows case is easier because the child process is either
2060 # Windows case is easier because the child process is either
2064 # successfully starting and validating the condition or exiting
2061 # successfully starting and validating the condition or exiting
2065 # on failure. We just poll on its PID. On Unix, if the child
2062 # on failure. We just poll on its PID. On Unix, if the child
2066 # process fails to start, it will be left in a zombie state until
2063 # process fails to start, it will be left in a zombie state until
2067 # the parent wait on it, which we cannot do since we expect a long
2064 # the parent wait on it, which we cannot do since we expect a long
2068 # running process on success. Instead we listen for SIGCHLD telling
2065 # running process on success. Instead we listen for SIGCHLD telling
2069 # us our child process terminated.
2066 # us our child process terminated.
2070 terminated = set()
2067 terminated = set()
2071 def handler(signum, frame):
2068 def handler(signum, frame):
2072 terminated.add(os.wait())
2069 terminated.add(os.wait())
2073 prevhandler = None
2070 prevhandler = None
2074 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2071 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2075 if SIGCHLD is not None:
2072 if SIGCHLD is not None:
2076 prevhandler = signal.signal(SIGCHLD, handler)
2073 prevhandler = signal.signal(SIGCHLD, handler)
2077 try:
2074 try:
2078 pid = spawndetached(args)
2075 pid = spawndetached(args)
2079 while not condfn():
2076 while not condfn():
2080 if ((pid in terminated or not testpid(pid))
2077 if ((pid in terminated or not testpid(pid))
2081 and not condfn()):
2078 and not condfn()):
2082 return -1
2079 return -1
2083 time.sleep(0.1)
2080 time.sleep(0.1)
2084 return pid
2081 return pid
2085 finally:
2082 finally:
2086 if prevhandler is not None:
2083 if prevhandler is not None:
2087 signal.signal(signal.SIGCHLD, prevhandler)
2084 signal.signal(signal.SIGCHLD, prevhandler)
2088
2085
2089 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2086 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2090 """Return the result of interpolating items in the mapping into string s.
2087 """Return the result of interpolating items in the mapping into string s.
2091
2088
2092 prefix is a single character string, or a two character string with
2089 prefix is a single character string, or a two character string with
2093 a backslash as the first character if the prefix needs to be escaped in
2090 a backslash as the first character if the prefix needs to be escaped in
2094 a regular expression.
2091 a regular expression.
2095
2092
2096 fn is an optional function that will be applied to the replacement text
2093 fn is an optional function that will be applied to the replacement text
2097 just before replacement.
2094 just before replacement.
2098
2095
2099 escape_prefix is an optional flag that allows using doubled prefix for
2096 escape_prefix is an optional flag that allows using doubled prefix for
2100 its escaping.
2097 its escaping.
2101 """
2098 """
2102 fn = fn or (lambda s: s)
2099 fn = fn or (lambda s: s)
2103 patterns = '|'.join(mapping.keys())
2100 patterns = '|'.join(mapping.keys())
2104 if escape_prefix:
2101 if escape_prefix:
2105 patterns += '|' + prefix
2102 patterns += '|' + prefix
2106 if len(prefix) > 1:
2103 if len(prefix) > 1:
2107 prefix_char = prefix[1:]
2104 prefix_char = prefix[1:]
2108 else:
2105 else:
2109 prefix_char = prefix
2106 prefix_char = prefix
2110 mapping[prefix_char] = prefix_char
2107 mapping[prefix_char] = prefix_char
2111 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2108 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2112 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2109 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2113
2110
2114 def getport(port):
2111 def getport(port):
2115 """Return the port for a given network service.
2112 """Return the port for a given network service.
2116
2113
2117 If port is an integer, it's returned as is. If it's a string, it's
2114 If port is an integer, it's returned as is. If it's a string, it's
2118 looked up using socket.getservbyname(). If there's no matching
2115 looked up using socket.getservbyname(). If there's no matching
2119 service, error.Abort is raised.
2116 service, error.Abort is raised.
2120 """
2117 """
2121 try:
2118 try:
2122 return int(port)
2119 return int(port)
2123 except ValueError:
2120 except ValueError:
2124 pass
2121 pass
2125
2122
2126 try:
2123 try:
2127 return socket.getservbyname(port)
2124 return socket.getservbyname(port)
2128 except socket.error:
2125 except socket.error:
2129 raise Abort(_("no port number associated with service '%s'") % port)
2126 raise Abort(_("no port number associated with service '%s'") % port)
2130
2127
2131 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2128 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2132 '0': False, 'no': False, 'false': False, 'off': False,
2129 '0': False, 'no': False, 'false': False, 'off': False,
2133 'never': False}
2130 'never': False}
2134
2131
2135 def parsebool(s):
2132 def parsebool(s):
2136 """Parse s into a boolean.
2133 """Parse s into a boolean.
2137
2134
2138 If s is not a valid boolean, returns None.
2135 If s is not a valid boolean, returns None.
2139 """
2136 """
2140 return _booleans.get(s.lower(), None)
2137 return _booleans.get(s.lower(), None)
2141
2138
2142 _hexdig = '0123456789ABCDEFabcdef'
2139 _hexdig = '0123456789ABCDEFabcdef'
2143 _hextochr = dict((a + b, chr(int(a + b, 16)))
2140 _hextochr = dict((a + b, chr(int(a + b, 16)))
2144 for a in _hexdig for b in _hexdig)
2141 for a in _hexdig for b in _hexdig)
2145
2142
2146 def _urlunquote(s):
2143 def _urlunquote(s):
2147 """Decode HTTP/HTML % encoding.
2144 """Decode HTTP/HTML % encoding.
2148
2145
2149 >>> _urlunquote('abc%20def')
2146 >>> _urlunquote('abc%20def')
2150 'abc def'
2147 'abc def'
2151 """
2148 """
2152 res = s.split('%')
2149 res = s.split('%')
2153 # fastpath
2150 # fastpath
2154 if len(res) == 1:
2151 if len(res) == 1:
2155 return s
2152 return s
2156 s = res[0]
2153 s = res[0]
2157 for item in res[1:]:
2154 for item in res[1:]:
2158 try:
2155 try:
2159 s += _hextochr[item[:2]] + item[2:]
2156 s += _hextochr[item[:2]] + item[2:]
2160 except KeyError:
2157 except KeyError:
2161 s += '%' + item
2158 s += '%' + item
2162 except UnicodeDecodeError:
2159 except UnicodeDecodeError:
2163 s += unichr(int(item[:2], 16)) + item[2:]
2160 s += unichr(int(item[:2], 16)) + item[2:]
2164 return s
2161 return s
2165
2162
2166 class url(object):
2163 class url(object):
2167 r"""Reliable URL parser.
2164 r"""Reliable URL parser.
2168
2165
2169 This parses URLs and provides attributes for the following
2166 This parses URLs and provides attributes for the following
2170 components:
2167 components:
2171
2168
2172 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2169 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2173
2170
2174 Missing components are set to None. The only exception is
2171 Missing components are set to None. The only exception is
2175 fragment, which is set to '' if present but empty.
2172 fragment, which is set to '' if present but empty.
2176
2173
2177 If parsefragment is False, fragment is included in query. If
2174 If parsefragment is False, fragment is included in query. If
2178 parsequery is False, query is included in path. If both are
2175 parsequery is False, query is included in path. If both are
2179 False, both fragment and query are included in path.
2176 False, both fragment and query are included in path.
2180
2177
2181 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2178 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2182
2179
2183 Note that for backward compatibility reasons, bundle URLs do not
2180 Note that for backward compatibility reasons, bundle URLs do not
2184 take host names. That means 'bundle://../' has a path of '../'.
2181 take host names. That means 'bundle://../' has a path of '../'.
2185
2182
2186 Examples:
2183 Examples:
2187
2184
2188 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2185 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2189 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2186 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2190 >>> url('ssh://[::1]:2200//home/joe/repo')
2187 >>> url('ssh://[::1]:2200//home/joe/repo')
2191 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2188 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2192 >>> url('file:///home/joe/repo')
2189 >>> url('file:///home/joe/repo')
2193 <url scheme: 'file', path: '/home/joe/repo'>
2190 <url scheme: 'file', path: '/home/joe/repo'>
2194 >>> url('file:///c:/temp/foo/')
2191 >>> url('file:///c:/temp/foo/')
2195 <url scheme: 'file', path: 'c:/temp/foo/'>
2192 <url scheme: 'file', path: 'c:/temp/foo/'>
2196 >>> url('bundle:foo')
2193 >>> url('bundle:foo')
2197 <url scheme: 'bundle', path: 'foo'>
2194 <url scheme: 'bundle', path: 'foo'>
2198 >>> url('bundle://../foo')
2195 >>> url('bundle://../foo')
2199 <url scheme: 'bundle', path: '../foo'>
2196 <url scheme: 'bundle', path: '../foo'>
2200 >>> url(r'c:\foo\bar')
2197 >>> url(r'c:\foo\bar')
2201 <url path: 'c:\\foo\\bar'>
2198 <url path: 'c:\\foo\\bar'>
2202 >>> url(r'\\blah\blah\blah')
2199 >>> url(r'\\blah\blah\blah')
2203 <url path: '\\\\blah\\blah\\blah'>
2200 <url path: '\\\\blah\\blah\\blah'>
2204 >>> url(r'\\blah\blah\blah#baz')
2201 >>> url(r'\\blah\blah\blah#baz')
2205 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2202 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2206 >>> url(r'file:///C:\users\me')
2203 >>> url(r'file:///C:\users\me')
2207 <url scheme: 'file', path: 'C:\\users\\me'>
2204 <url scheme: 'file', path: 'C:\\users\\me'>
2208
2205
2209 Authentication credentials:
2206 Authentication credentials:
2210
2207
2211 >>> url('ssh://joe:xyz@x/repo')
2208 >>> url('ssh://joe:xyz@x/repo')
2212 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2209 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2213 >>> url('ssh://joe@x/repo')
2210 >>> url('ssh://joe@x/repo')
2214 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2211 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2215
2212
2216 Query strings and fragments:
2213 Query strings and fragments:
2217
2214
2218 >>> url('http://host/a?b#c')
2215 >>> url('http://host/a?b#c')
2219 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2216 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2220 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2217 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2221 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2218 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2222 """
2219 """
2223
2220
2224 _safechars = "!~*'()+"
2221 _safechars = "!~*'()+"
2225 _safepchars = "/!~*'()+:\\"
2222 _safepchars = "/!~*'()+:\\"
2226 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2223 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2227
2224
2228 def __init__(self, path, parsequery=True, parsefragment=True):
2225 def __init__(self, path, parsequery=True, parsefragment=True):
2229 # We slowly chomp away at path until we have only the path left
2226 # We slowly chomp away at path until we have only the path left
2230 self.scheme = self.user = self.passwd = self.host = None
2227 self.scheme = self.user = self.passwd = self.host = None
2231 self.port = self.path = self.query = self.fragment = None
2228 self.port = self.path = self.query = self.fragment = None
2232 self._localpath = True
2229 self._localpath = True
2233 self._hostport = ''
2230 self._hostport = ''
2234 self._origpath = path
2231 self._origpath = path
2235
2232
2236 if parsefragment and '#' in path:
2233 if parsefragment and '#' in path:
2237 path, self.fragment = path.split('#', 1)
2234 path, self.fragment = path.split('#', 1)
2238 if not path:
2235 if not path:
2239 path = None
2236 path = None
2240
2237
2241 # special case for Windows drive letters and UNC paths
2238 # special case for Windows drive letters and UNC paths
2242 if hasdriveletter(path) or path.startswith(r'\\'):
2239 if hasdriveletter(path) or path.startswith(r'\\'):
2243 self.path = path
2240 self.path = path
2244 return
2241 return
2245
2242
2246 # For compatibility reasons, we can't handle bundle paths as
2243 # For compatibility reasons, we can't handle bundle paths as
2247 # normal URLS
2244 # normal URLS
2248 if path.startswith('bundle:'):
2245 if path.startswith('bundle:'):
2249 self.scheme = 'bundle'
2246 self.scheme = 'bundle'
2250 path = path[7:]
2247 path = path[7:]
2251 if path.startswith('//'):
2248 if path.startswith('//'):
2252 path = path[2:]
2249 path = path[2:]
2253 self.path = path
2250 self.path = path
2254 return
2251 return
2255
2252
2256 if self._matchscheme(path):
2253 if self._matchscheme(path):
2257 parts = path.split(':', 1)
2254 parts = path.split(':', 1)
2258 if parts[0]:
2255 if parts[0]:
2259 self.scheme, path = parts
2256 self.scheme, path = parts
2260 self._localpath = False
2257 self._localpath = False
2261
2258
2262 if not path:
2259 if not path:
2263 path = None
2260 path = None
2264 if self._localpath:
2261 if self._localpath:
2265 self.path = ''
2262 self.path = ''
2266 return
2263 return
2267 else:
2264 else:
2268 if self._localpath:
2265 if self._localpath:
2269 self.path = path
2266 self.path = path
2270 return
2267 return
2271
2268
2272 if parsequery and '?' in path:
2269 if parsequery and '?' in path:
2273 path, self.query = path.split('?', 1)
2270 path, self.query = path.split('?', 1)
2274 if not path:
2271 if not path:
2275 path = None
2272 path = None
2276 if not self.query:
2273 if not self.query:
2277 self.query = None
2274 self.query = None
2278
2275
2279 # // is required to specify a host/authority
2276 # // is required to specify a host/authority
2280 if path and path.startswith('//'):
2277 if path and path.startswith('//'):
2281 parts = path[2:].split('/', 1)
2278 parts = path[2:].split('/', 1)
2282 if len(parts) > 1:
2279 if len(parts) > 1:
2283 self.host, path = parts
2280 self.host, path = parts
2284 else:
2281 else:
2285 self.host = parts[0]
2282 self.host = parts[0]
2286 path = None
2283 path = None
2287 if not self.host:
2284 if not self.host:
2288 self.host = None
2285 self.host = None
2289 # path of file:///d is /d
2286 # path of file:///d is /d
2290 # path of file:///d:/ is d:/, not /d:/
2287 # path of file:///d:/ is d:/, not /d:/
2291 if path and not hasdriveletter(path):
2288 if path and not hasdriveletter(path):
2292 path = '/' + path
2289 path = '/' + path
2293
2290
2294 if self.host and '@' in self.host:
2291 if self.host and '@' in self.host:
2295 self.user, self.host = self.host.rsplit('@', 1)
2292 self.user, self.host = self.host.rsplit('@', 1)
2296 if ':' in self.user:
2293 if ':' in self.user:
2297 self.user, self.passwd = self.user.split(':', 1)
2294 self.user, self.passwd = self.user.split(':', 1)
2298 if not self.host:
2295 if not self.host:
2299 self.host = None
2296 self.host = None
2300
2297
2301 # Don't split on colons in IPv6 addresses without ports
2298 # Don't split on colons in IPv6 addresses without ports
2302 if (self.host and ':' in self.host and
2299 if (self.host and ':' in self.host and
2303 not (self.host.startswith('[') and self.host.endswith(']'))):
2300 not (self.host.startswith('[') and self.host.endswith(']'))):
2304 self._hostport = self.host
2301 self._hostport = self.host
2305 self.host, self.port = self.host.rsplit(':', 1)
2302 self.host, self.port = self.host.rsplit(':', 1)
2306 if not self.host:
2303 if not self.host:
2307 self.host = None
2304 self.host = None
2308
2305
2309 if (self.host and self.scheme == 'file' and
2306 if (self.host and self.scheme == 'file' and
2310 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2307 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2311 raise Abort(_('file:// URLs can only refer to localhost'))
2308 raise Abort(_('file:// URLs can only refer to localhost'))
2312
2309
2313 self.path = path
2310 self.path = path
2314
2311
2315 # leave the query string escaped
2312 # leave the query string escaped
2316 for a in ('user', 'passwd', 'host', 'port',
2313 for a in ('user', 'passwd', 'host', 'port',
2317 'path', 'fragment'):
2314 'path', 'fragment'):
2318 v = getattr(self, a)
2315 v = getattr(self, a)
2319 if v is not None:
2316 if v is not None:
2320 setattr(self, a, _urlunquote(v))
2317 setattr(self, a, _urlunquote(v))
2321
2318
2322 def __repr__(self):
2319 def __repr__(self):
2323 attrs = []
2320 attrs = []
2324 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2321 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2325 'query', 'fragment'):
2322 'query', 'fragment'):
2326 v = getattr(self, a)
2323 v = getattr(self, a)
2327 if v is not None:
2324 if v is not None:
2328 attrs.append('%s: %r' % (a, v))
2325 attrs.append('%s: %r' % (a, v))
2329 return '<url %s>' % ', '.join(attrs)
2326 return '<url %s>' % ', '.join(attrs)
2330
2327
2331 def __str__(self):
2328 def __str__(self):
2332 r"""Join the URL's components back into a URL string.
2329 r"""Join the URL's components back into a URL string.
2333
2330
2334 Examples:
2331 Examples:
2335
2332
2336 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2333 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2337 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2334 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2338 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2335 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2339 'http://user:pw@host:80/?foo=bar&baz=42'
2336 'http://user:pw@host:80/?foo=bar&baz=42'
2340 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2337 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2341 'http://user:pw@host:80/?foo=bar%3dbaz'
2338 'http://user:pw@host:80/?foo=bar%3dbaz'
2342 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2339 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2343 'ssh://user:pw@[::1]:2200//home/joe#'
2340 'ssh://user:pw@[::1]:2200//home/joe#'
2344 >>> str(url('http://localhost:80//'))
2341 >>> str(url('http://localhost:80//'))
2345 'http://localhost:80//'
2342 'http://localhost:80//'
2346 >>> str(url('http://localhost:80/'))
2343 >>> str(url('http://localhost:80/'))
2347 'http://localhost:80/'
2344 'http://localhost:80/'
2348 >>> str(url('http://localhost:80'))
2345 >>> str(url('http://localhost:80'))
2349 'http://localhost:80/'
2346 'http://localhost:80/'
2350 >>> str(url('bundle:foo'))
2347 >>> str(url('bundle:foo'))
2351 'bundle:foo'
2348 'bundle:foo'
2352 >>> str(url('bundle://../foo'))
2349 >>> str(url('bundle://../foo'))
2353 'bundle:../foo'
2350 'bundle:../foo'
2354 >>> str(url('path'))
2351 >>> str(url('path'))
2355 'path'
2352 'path'
2356 >>> str(url('file:///tmp/foo/bar'))
2353 >>> str(url('file:///tmp/foo/bar'))
2357 'file:///tmp/foo/bar'
2354 'file:///tmp/foo/bar'
2358 >>> str(url('file:///c:/tmp/foo/bar'))
2355 >>> str(url('file:///c:/tmp/foo/bar'))
2359 'file:///c:/tmp/foo/bar'
2356 'file:///c:/tmp/foo/bar'
2360 >>> print url(r'bundle:foo\bar')
2357 >>> print url(r'bundle:foo\bar')
2361 bundle:foo\bar
2358 bundle:foo\bar
2362 >>> print url(r'file:///D:\data\hg')
2359 >>> print url(r'file:///D:\data\hg')
2363 file:///D:\data\hg
2360 file:///D:\data\hg
2364 """
2361 """
2365 if self._localpath:
2362 if self._localpath:
2366 s = self.path
2363 s = self.path
2367 if self.scheme == 'bundle':
2364 if self.scheme == 'bundle':
2368 s = 'bundle:' + s
2365 s = 'bundle:' + s
2369 if self.fragment:
2366 if self.fragment:
2370 s += '#' + self.fragment
2367 s += '#' + self.fragment
2371 return s
2368 return s
2372
2369
2373 s = self.scheme + ':'
2370 s = self.scheme + ':'
2374 if self.user or self.passwd or self.host:
2371 if self.user or self.passwd or self.host:
2375 s += '//'
2372 s += '//'
2376 elif self.scheme and (not self.path or self.path.startswith('/')
2373 elif self.scheme and (not self.path or self.path.startswith('/')
2377 or hasdriveletter(self.path)):
2374 or hasdriveletter(self.path)):
2378 s += '//'
2375 s += '//'
2379 if hasdriveletter(self.path):
2376 if hasdriveletter(self.path):
2380 s += '/'
2377 s += '/'
2381 if self.user:
2378 if self.user:
2382 s += urllib.quote(self.user, safe=self._safechars)
2379 s += urllib.quote(self.user, safe=self._safechars)
2383 if self.passwd:
2380 if self.passwd:
2384 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2381 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2385 if self.user or self.passwd:
2382 if self.user or self.passwd:
2386 s += '@'
2383 s += '@'
2387 if self.host:
2384 if self.host:
2388 if not (self.host.startswith('[') and self.host.endswith(']')):
2385 if not (self.host.startswith('[') and self.host.endswith(']')):
2389 s += urllib.quote(self.host)
2386 s += urllib.quote(self.host)
2390 else:
2387 else:
2391 s += self.host
2388 s += self.host
2392 if self.port:
2389 if self.port:
2393 s += ':' + urllib.quote(self.port)
2390 s += ':' + urllib.quote(self.port)
2394 if self.host:
2391 if self.host:
2395 s += '/'
2392 s += '/'
2396 if self.path:
2393 if self.path:
2397 # TODO: similar to the query string, we should not unescape the
2394 # TODO: similar to the query string, we should not unescape the
2398 # path when we store it, the path might contain '%2f' = '/',
2395 # path when we store it, the path might contain '%2f' = '/',
2399 # which we should *not* escape.
2396 # which we should *not* escape.
2400 s += urllib.quote(self.path, safe=self._safepchars)
2397 s += urllib.quote(self.path, safe=self._safepchars)
2401 if self.query:
2398 if self.query:
2402 # we store the query in escaped form.
2399 # we store the query in escaped form.
2403 s += '?' + self.query
2400 s += '?' + self.query
2404 if self.fragment is not None:
2401 if self.fragment is not None:
2405 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2402 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2406 return s
2403 return s
2407
2404
2408 def authinfo(self):
2405 def authinfo(self):
2409 user, passwd = self.user, self.passwd
2406 user, passwd = self.user, self.passwd
2410 try:
2407 try:
2411 self.user, self.passwd = None, None
2408 self.user, self.passwd = None, None
2412 s = str(self)
2409 s = str(self)
2413 finally:
2410 finally:
2414 self.user, self.passwd = user, passwd
2411 self.user, self.passwd = user, passwd
2415 if not self.user:
2412 if not self.user:
2416 return (s, None)
2413 return (s, None)
2417 # authinfo[1] is passed to urllib2 password manager, and its
2414 # authinfo[1] is passed to urllib2 password manager, and its
2418 # URIs must not contain credentials. The host is passed in the
2415 # URIs must not contain credentials. The host is passed in the
2419 # URIs list because Python < 2.4.3 uses only that to search for
2416 # URIs list because Python < 2.4.3 uses only that to search for
2420 # a password.
2417 # a password.
2421 return (s, (None, (s, self.host),
2418 return (s, (None, (s, self.host),
2422 self.user, self.passwd or ''))
2419 self.user, self.passwd or ''))
2423
2420
2424 def isabs(self):
2421 def isabs(self):
2425 if self.scheme and self.scheme != 'file':
2422 if self.scheme and self.scheme != 'file':
2426 return True # remote URL
2423 return True # remote URL
2427 if hasdriveletter(self.path):
2424 if hasdriveletter(self.path):
2428 return True # absolute for our purposes - can't be joined()
2425 return True # absolute for our purposes - can't be joined()
2429 if self.path.startswith(r'\\'):
2426 if self.path.startswith(r'\\'):
2430 return True # Windows UNC path
2427 return True # Windows UNC path
2431 if self.path.startswith('/'):
2428 if self.path.startswith('/'):
2432 return True # POSIX-style
2429 return True # POSIX-style
2433 return False
2430 return False
2434
2431
2435 def localpath(self):
2432 def localpath(self):
2436 if self.scheme == 'file' or self.scheme == 'bundle':
2433 if self.scheme == 'file' or self.scheme == 'bundle':
2437 path = self.path or '/'
2434 path = self.path or '/'
2438 # For Windows, we need to promote hosts containing drive
2435 # For Windows, we need to promote hosts containing drive
2439 # letters to paths with drive letters.
2436 # letters to paths with drive letters.
2440 if hasdriveletter(self._hostport):
2437 if hasdriveletter(self._hostport):
2441 path = self._hostport + '/' + self.path
2438 path = self._hostport + '/' + self.path
2442 elif (self.host is not None and self.path
2439 elif (self.host is not None and self.path
2443 and not hasdriveletter(path)):
2440 and not hasdriveletter(path)):
2444 path = '/' + path
2441 path = '/' + path
2445 return path
2442 return path
2446 return self._origpath
2443 return self._origpath
2447
2444
2448 def islocal(self):
2445 def islocal(self):
2449 '''whether localpath will return something that posixfile can open'''
2446 '''whether localpath will return something that posixfile can open'''
2450 return (not self.scheme or self.scheme == 'file'
2447 return (not self.scheme or self.scheme == 'file'
2451 or self.scheme == 'bundle')
2448 or self.scheme == 'bundle')
2452
2449
2453 def hasscheme(path):
2450 def hasscheme(path):
2454 return bool(url(path).scheme)
2451 return bool(url(path).scheme)
2455
2452
2456 def hasdriveletter(path):
2453 def hasdriveletter(path):
2457 return path and path[1:2] == ':' and path[0:1].isalpha()
2454 return path and path[1:2] == ':' and path[0:1].isalpha()
2458
2455
2459 def urllocalpath(path):
2456 def urllocalpath(path):
2460 return url(path, parsequery=False, parsefragment=False).localpath()
2457 return url(path, parsequery=False, parsefragment=False).localpath()
2461
2458
2462 def hidepassword(u):
2459 def hidepassword(u):
2463 '''hide user credential in a url string'''
2460 '''hide user credential in a url string'''
2464 u = url(u)
2461 u = url(u)
2465 if u.passwd:
2462 if u.passwd:
2466 u.passwd = '***'
2463 u.passwd = '***'
2467 return str(u)
2464 return str(u)
2468
2465
2469 def removeauth(u):
2466 def removeauth(u):
2470 '''remove all authentication information from a url string'''
2467 '''remove all authentication information from a url string'''
2471 u = url(u)
2468 u = url(u)
2472 u.user = u.passwd = None
2469 u.user = u.passwd = None
2473 return str(u)
2470 return str(u)
2474
2471
2475 def isatty(fp):
2472 def isatty(fp):
2476 try:
2473 try:
2477 return fp.isatty()
2474 return fp.isatty()
2478 except AttributeError:
2475 except AttributeError:
2479 return False
2476 return False
2480
2477
2481 timecount = unitcountfn(
2478 timecount = unitcountfn(
2482 (1, 1e3, _('%.0f s')),
2479 (1, 1e3, _('%.0f s')),
2483 (100, 1, _('%.1f s')),
2480 (100, 1, _('%.1f s')),
2484 (10, 1, _('%.2f s')),
2481 (10, 1, _('%.2f s')),
2485 (1, 1, _('%.3f s')),
2482 (1, 1, _('%.3f s')),
2486 (100, 0.001, _('%.1f ms')),
2483 (100, 0.001, _('%.1f ms')),
2487 (10, 0.001, _('%.2f ms')),
2484 (10, 0.001, _('%.2f ms')),
2488 (1, 0.001, _('%.3f ms')),
2485 (1, 0.001, _('%.3f ms')),
2489 (100, 0.000001, _('%.1f us')),
2486 (100, 0.000001, _('%.1f us')),
2490 (10, 0.000001, _('%.2f us')),
2487 (10, 0.000001, _('%.2f us')),
2491 (1, 0.000001, _('%.3f us')),
2488 (1, 0.000001, _('%.3f us')),
2492 (100, 0.000000001, _('%.1f ns')),
2489 (100, 0.000000001, _('%.1f ns')),
2493 (10, 0.000000001, _('%.2f ns')),
2490 (10, 0.000000001, _('%.2f ns')),
2494 (1, 0.000000001, _('%.3f ns')),
2491 (1, 0.000000001, _('%.3f ns')),
2495 )
2492 )
2496
2493
2497 _timenesting = [0]
2494 _timenesting = [0]
2498
2495
2499 def timed(func):
2496 def timed(func):
2500 '''Report the execution time of a function call to stderr.
2497 '''Report the execution time of a function call to stderr.
2501
2498
2502 During development, use as a decorator when you need to measure
2499 During development, use as a decorator when you need to measure
2503 the cost of a function, e.g. as follows:
2500 the cost of a function, e.g. as follows:
2504
2501
2505 @util.timed
2502 @util.timed
2506 def foo(a, b, c):
2503 def foo(a, b, c):
2507 pass
2504 pass
2508 '''
2505 '''
2509
2506
2510 def wrapper(*args, **kwargs):
2507 def wrapper(*args, **kwargs):
2511 start = time.time()
2508 start = time.time()
2512 indent = 2
2509 indent = 2
2513 _timenesting[0] += indent
2510 _timenesting[0] += indent
2514 try:
2511 try:
2515 return func(*args, **kwargs)
2512 return func(*args, **kwargs)
2516 finally:
2513 finally:
2517 elapsed = time.time() - start
2514 elapsed = time.time() - start
2518 _timenesting[0] -= indent
2515 _timenesting[0] -= indent
2519 sys.stderr.write('%s%s: %s\n' %
2516 sys.stderr.write('%s%s: %s\n' %
2520 (' ' * _timenesting[0], func.__name__,
2517 (' ' * _timenesting[0], func.__name__,
2521 timecount(elapsed)))
2518 timecount(elapsed)))
2522 return wrapper
2519 return wrapper
2523
2520
2524 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2521 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2525 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2522 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2526
2523
2527 def sizetoint(s):
2524 def sizetoint(s):
2528 '''Convert a space specifier to a byte count.
2525 '''Convert a space specifier to a byte count.
2529
2526
2530 >>> sizetoint('30')
2527 >>> sizetoint('30')
2531 30
2528 30
2532 >>> sizetoint('2.2kb')
2529 >>> sizetoint('2.2kb')
2533 2252
2530 2252
2534 >>> sizetoint('6M')
2531 >>> sizetoint('6M')
2535 6291456
2532 6291456
2536 '''
2533 '''
2537 t = s.strip().lower()
2534 t = s.strip().lower()
2538 try:
2535 try:
2539 for k, u in _sizeunits:
2536 for k, u in _sizeunits:
2540 if t.endswith(k):
2537 if t.endswith(k):
2541 return int(float(t[:-len(k)]) * u)
2538 return int(float(t[:-len(k)]) * u)
2542 return int(t)
2539 return int(t)
2543 except ValueError:
2540 except ValueError:
2544 raise error.ParseError(_("couldn't parse size: %s") % s)
2541 raise error.ParseError(_("couldn't parse size: %s") % s)
2545
2542
2546 class hooks(object):
2543 class hooks(object):
2547 '''A collection of hook functions that can be used to extend a
2544 '''A collection of hook functions that can be used to extend a
2548 function's behavior. Hooks are called in lexicographic order,
2545 function's behavior. Hooks are called in lexicographic order,
2549 based on the names of their sources.'''
2546 based on the names of their sources.'''
2550
2547
2551 def __init__(self):
2548 def __init__(self):
2552 self._hooks = []
2549 self._hooks = []
2553
2550
2554 def add(self, source, hook):
2551 def add(self, source, hook):
2555 self._hooks.append((source, hook))
2552 self._hooks.append((source, hook))
2556
2553
2557 def __call__(self, *args):
2554 def __call__(self, *args):
2558 self._hooks.sort(key=lambda x: x[0])
2555 self._hooks.sort(key=lambda x: x[0])
2559 results = []
2556 results = []
2560 for source, hook in self._hooks:
2557 for source, hook in self._hooks:
2561 results.append(hook(*args))
2558 results.append(hook(*args))
2562 return results
2559 return results
2563
2560
2564 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2561 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2565 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2562 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2566 Skips the 'skip' last entries. By default it will flush stdout first.
2563 Skips the 'skip' last entries. By default it will flush stdout first.
2567 It can be used everywhere and do intentionally not require an ui object.
2564 It can be used everywhere and do intentionally not require an ui object.
2568 Not be used in production code but very convenient while developing.
2565 Not be used in production code but very convenient while developing.
2569 '''
2566 '''
2570 if otherf:
2567 if otherf:
2571 otherf.flush()
2568 otherf.flush()
2572 f.write('%s at:\n' % msg)
2569 f.write('%s at:\n' % msg)
2573 entries = [('%s:%s' % (fn, ln), func)
2570 entries = [('%s:%s' % (fn, ln), func)
2574 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2571 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2575 if entries:
2572 if entries:
2576 fnmax = max(len(entry[0]) for entry in entries)
2573 fnmax = max(len(entry[0]) for entry in entries)
2577 for fnln, func in entries:
2574 for fnln, func in entries:
2578 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2575 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2579 f.flush()
2576 f.flush()
2580
2577
2581 class dirs(object):
2578 class dirs(object):
2582 '''a multiset of directory names from a dirstate or manifest'''
2579 '''a multiset of directory names from a dirstate or manifest'''
2583
2580
2584 def __init__(self, map, skip=None):
2581 def __init__(self, map, skip=None):
2585 self._dirs = {}
2582 self._dirs = {}
2586 addpath = self.addpath
2583 addpath = self.addpath
2587 if safehasattr(map, 'iteritems') and skip is not None:
2584 if safehasattr(map, 'iteritems') and skip is not None:
2588 for f, s in map.iteritems():
2585 for f, s in map.iteritems():
2589 if s[0] != skip:
2586 if s[0] != skip:
2590 addpath(f)
2587 addpath(f)
2591 else:
2588 else:
2592 for f in map:
2589 for f in map:
2593 addpath(f)
2590 addpath(f)
2594
2591
2595 def addpath(self, path):
2592 def addpath(self, path):
2596 dirs = self._dirs
2593 dirs = self._dirs
2597 for base in finddirs(path):
2594 for base in finddirs(path):
2598 if base in dirs:
2595 if base in dirs:
2599 dirs[base] += 1
2596 dirs[base] += 1
2600 return
2597 return
2601 dirs[base] = 1
2598 dirs[base] = 1
2602
2599
2603 def delpath(self, path):
2600 def delpath(self, path):
2604 dirs = self._dirs
2601 dirs = self._dirs
2605 for base in finddirs(path):
2602 for base in finddirs(path):
2606 if dirs[base] > 1:
2603 if dirs[base] > 1:
2607 dirs[base] -= 1
2604 dirs[base] -= 1
2608 return
2605 return
2609 del dirs[base]
2606 del dirs[base]
2610
2607
2611 def __iter__(self):
2608 def __iter__(self):
2612 return self._dirs.iterkeys()
2609 return self._dirs.iterkeys()
2613
2610
2614 def __contains__(self, d):
2611 def __contains__(self, d):
2615 return d in self._dirs
2612 return d in self._dirs
2616
2613
2617 if safehasattr(parsers, 'dirs'):
2614 if safehasattr(parsers, 'dirs'):
2618 dirs = parsers.dirs
2615 dirs = parsers.dirs
2619
2616
2620 def finddirs(path):
2617 def finddirs(path):
2621 pos = path.rfind('/')
2618 pos = path.rfind('/')
2622 while pos != -1:
2619 while pos != -1:
2623 yield path[:pos]
2620 yield path[:pos]
2624 pos = path.rfind('/', 0, pos)
2621 pos = path.rfind('/', 0, pos)
2625
2622
2626 # compression utility
2623 # compression utility
2627
2624
2628 class nocompress(object):
2625 class nocompress(object):
2629 def compress(self, x):
2626 def compress(self, x):
2630 return x
2627 return x
2631 def flush(self):
2628 def flush(self):
2632 return ""
2629 return ""
2633
2630
2634 compressors = {
2631 compressors = {
2635 None: nocompress,
2632 None: nocompress,
2636 # lambda to prevent early import
2633 # lambda to prevent early import
2637 'BZ': lambda: bz2.BZ2Compressor(),
2634 'BZ': lambda: bz2.BZ2Compressor(),
2638 'GZ': lambda: zlib.compressobj(),
2635 'GZ': lambda: zlib.compressobj(),
2639 }
2636 }
2640 # also support the old form by courtesies
2637 # also support the old form by courtesies
2641 compressors['UN'] = compressors[None]
2638 compressors['UN'] = compressors[None]
2642
2639
2643 def _makedecompressor(decompcls):
2640 def _makedecompressor(decompcls):
2644 def generator(f):
2641 def generator(f):
2645 d = decompcls()
2642 d = decompcls()
2646 for chunk in filechunkiter(f):
2643 for chunk in filechunkiter(f):
2647 yield d.decompress(chunk)
2644 yield d.decompress(chunk)
2648 def func(fh):
2645 def func(fh):
2649 return chunkbuffer(generator(fh))
2646 return chunkbuffer(generator(fh))
2650 return func
2647 return func
2651
2648
2652 class ctxmanager(object):
2649 class ctxmanager(object):
2653 '''A context manager for use in 'with' blocks to allow multiple
2650 '''A context manager for use in 'with' blocks to allow multiple
2654 contexts to be entered at once. This is both safer and more
2651 contexts to be entered at once. This is both safer and more
2655 flexible than contextlib.nested.
2652 flexible than contextlib.nested.
2656
2653
2657 Once Mercurial supports Python 2.7+, this will become mostly
2654 Once Mercurial supports Python 2.7+, this will become mostly
2658 unnecessary.
2655 unnecessary.
2659 '''
2656 '''
2660
2657
2661 def __init__(self, *args):
2658 def __init__(self, *args):
2662 '''Accepts a list of no-argument functions that return context
2659 '''Accepts a list of no-argument functions that return context
2663 managers. These will be invoked at __call__ time.'''
2660 managers. These will be invoked at __call__ time.'''
2664 self._pending = args
2661 self._pending = args
2665 self._atexit = []
2662 self._atexit = []
2666
2663
2667 def __enter__(self):
2664 def __enter__(self):
2668 return self
2665 return self
2669
2666
2670 def __call__(self):
2667 def __call__(self):
2671 '''Create and enter context managers in the order in which they were
2668 '''Create and enter context managers in the order in which they were
2672 passed to the constructor.'''
2669 passed to the constructor.'''
2673 values = []
2670 values = []
2674 for func in self._pending:
2671 for func in self._pending:
2675 obj = func()
2672 obj = func()
2676 values.append(obj.__enter__())
2673 values.append(obj.__enter__())
2677 self._atexit.append(obj.__exit__)
2674 self._atexit.append(obj.__exit__)
2678 del self._pending
2675 del self._pending
2679 return values
2676 return values
2680
2677
2681 def atexit(self, func, *args, **kwargs):
2678 def atexit(self, func, *args, **kwargs):
2682 '''Add a function to call when this context manager exits. The
2679 '''Add a function to call when this context manager exits. The
2683 ordering of multiple atexit calls is unspecified, save that
2680 ordering of multiple atexit calls is unspecified, save that
2684 they will happen before any __exit__ functions.'''
2681 they will happen before any __exit__ functions.'''
2685 def wrapper(exc_type, exc_val, exc_tb):
2682 def wrapper(exc_type, exc_val, exc_tb):
2686 func(*args, **kwargs)
2683 func(*args, **kwargs)
2687 self._atexit.append(wrapper)
2684 self._atexit.append(wrapper)
2688 return func
2685 return func
2689
2686
2690 def __exit__(self, exc_type, exc_val, exc_tb):
2687 def __exit__(self, exc_type, exc_val, exc_tb):
2691 '''Context managers are exited in the reverse order from which
2688 '''Context managers are exited in the reverse order from which
2692 they were created.'''
2689 they were created.'''
2693 received = exc_type is not None
2690 received = exc_type is not None
2694 suppressed = False
2691 suppressed = False
2695 pending = None
2692 pending = None
2696 self._atexit.reverse()
2693 self._atexit.reverse()
2697 for exitfunc in self._atexit:
2694 for exitfunc in self._atexit:
2698 try:
2695 try:
2699 if exitfunc(exc_type, exc_val, exc_tb):
2696 if exitfunc(exc_type, exc_val, exc_tb):
2700 suppressed = True
2697 suppressed = True
2701 exc_type = None
2698 exc_type = None
2702 exc_val = None
2699 exc_val = None
2703 exc_tb = None
2700 exc_tb = None
2704 except BaseException:
2701 except BaseException:
2705 pending = sys.exc_info()
2702 pending = sys.exc_info()
2706 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2703 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2707 del self._atexit
2704 del self._atexit
2708 if pending:
2705 if pending:
2709 raise exc_val
2706 raise exc_val
2710 return received and suppressed
2707 return received and suppressed
2711
2708
2712 def _bz2():
2709 def _bz2():
2713 d = bz2.BZ2Decompressor()
2710 d = bz2.BZ2Decompressor()
2714 # Bzip2 stream start with BZ, but we stripped it.
2711 # Bzip2 stream start with BZ, but we stripped it.
2715 # we put it back for good measure.
2712 # we put it back for good measure.
2716 d.decompress('BZ')
2713 d.decompress('BZ')
2717 return d
2714 return d
2718
2715
2719 decompressors = {None: lambda fh: fh,
2716 decompressors = {None: lambda fh: fh,
2720 '_truncatedBZ': _makedecompressor(_bz2),
2717 '_truncatedBZ': _makedecompressor(_bz2),
2721 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2718 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2722 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2719 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2723 }
2720 }
2724 # also support the old form by courtesies
2721 # also support the old form by courtesies
2725 decompressors['UN'] = decompressors[None]
2722 decompressors['UN'] = decompressors[None]
2726
2723
2727 # convenient shortcut
2724 # convenient shortcut
2728 dst = debugstacktrace
2725 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now