##// END OF EJS Templates
util: use __code__ (available since py2.6)
timeless -
r28832:f5ff10f6 default
parent child Browse files
Show More
@@ -1,2740 +1,2740 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import urllib
37 import urllib
38 import zlib
38 import zlib
39
39
40 from . import (
40 from . import (
41 encoding,
41 encoding,
42 error,
42 error,
43 i18n,
43 i18n,
44 osutil,
44 osutil,
45 parsers,
45 parsers,
46 pycompat,
46 pycompat,
47 )
47 )
48
48
49 for attr in (
49 for attr in (
50 'empty',
50 'empty',
51 'queue',
51 'queue',
52 ):
52 ):
53 globals()[attr] = getattr(pycompat, attr)
53 globals()[attr] = getattr(pycompat, attr)
54
54
55 if os.name == 'nt':
55 if os.name == 'nt':
56 from . import windows as platform
56 from . import windows as platform
57 else:
57 else:
58 from . import posix as platform
58 from . import posix as platform
59
59
60 md5 = hashlib.md5
60 md5 = hashlib.md5
61 sha1 = hashlib.sha1
61 sha1 = hashlib.sha1
62 sha512 = hashlib.sha512
62 sha512 = hashlib.sha512
63 _ = i18n._
63 _ = i18n._
64
64
65 cachestat = platform.cachestat
65 cachestat = platform.cachestat
66 checkexec = platform.checkexec
66 checkexec = platform.checkexec
67 checklink = platform.checklink
67 checklink = platform.checklink
68 copymode = platform.copymode
68 copymode = platform.copymode
69 executablepath = platform.executablepath
69 executablepath = platform.executablepath
70 expandglobs = platform.expandglobs
70 expandglobs = platform.expandglobs
71 explainexit = platform.explainexit
71 explainexit = platform.explainexit
72 findexe = platform.findexe
72 findexe = platform.findexe
73 gethgcmd = platform.gethgcmd
73 gethgcmd = platform.gethgcmd
74 getuser = platform.getuser
74 getuser = platform.getuser
75 getpid = os.getpid
75 getpid = os.getpid
76 groupmembers = platform.groupmembers
76 groupmembers = platform.groupmembers
77 groupname = platform.groupname
77 groupname = platform.groupname
78 hidewindow = platform.hidewindow
78 hidewindow = platform.hidewindow
79 isexec = platform.isexec
79 isexec = platform.isexec
80 isowner = platform.isowner
80 isowner = platform.isowner
81 localpath = platform.localpath
81 localpath = platform.localpath
82 lookupreg = platform.lookupreg
82 lookupreg = platform.lookupreg
83 makedir = platform.makedir
83 makedir = platform.makedir
84 nlinks = platform.nlinks
84 nlinks = platform.nlinks
85 normpath = platform.normpath
85 normpath = platform.normpath
86 normcase = platform.normcase
86 normcase = platform.normcase
87 normcasespec = platform.normcasespec
87 normcasespec = platform.normcasespec
88 normcasefallback = platform.normcasefallback
88 normcasefallback = platform.normcasefallback
89 openhardlinks = platform.openhardlinks
89 openhardlinks = platform.openhardlinks
90 oslink = platform.oslink
90 oslink = platform.oslink
91 parsepatchoutput = platform.parsepatchoutput
91 parsepatchoutput = platform.parsepatchoutput
92 pconvert = platform.pconvert
92 pconvert = platform.pconvert
93 poll = platform.poll
93 poll = platform.poll
94 popen = platform.popen
94 popen = platform.popen
95 posixfile = platform.posixfile
95 posixfile = platform.posixfile
96 quotecommand = platform.quotecommand
96 quotecommand = platform.quotecommand
97 readpipe = platform.readpipe
97 readpipe = platform.readpipe
98 rename = platform.rename
98 rename = platform.rename
99 removedirs = platform.removedirs
99 removedirs = platform.removedirs
100 samedevice = platform.samedevice
100 samedevice = platform.samedevice
101 samefile = platform.samefile
101 samefile = platform.samefile
102 samestat = platform.samestat
102 samestat = platform.samestat
103 setbinary = platform.setbinary
103 setbinary = platform.setbinary
104 setflags = platform.setflags
104 setflags = platform.setflags
105 setsignalhandler = platform.setsignalhandler
105 setsignalhandler = platform.setsignalhandler
106 shellquote = platform.shellquote
106 shellquote = platform.shellquote
107 spawndetached = platform.spawndetached
107 spawndetached = platform.spawndetached
108 split = platform.split
108 split = platform.split
109 sshargs = platform.sshargs
109 sshargs = platform.sshargs
110 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
110 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
111 statisexec = platform.statisexec
111 statisexec = platform.statisexec
112 statislink = platform.statislink
112 statislink = platform.statislink
113 termwidth = platform.termwidth
113 termwidth = platform.termwidth
114 testpid = platform.testpid
114 testpid = platform.testpid
115 umask = platform.umask
115 umask = platform.umask
116 unlink = platform.unlink
116 unlink = platform.unlink
117 unlinkpath = platform.unlinkpath
117 unlinkpath = platform.unlinkpath
118 username = platform.username
118 username = platform.username
119
119
120 # Python compatibility
120 # Python compatibility
121
121
122 _notset = object()
122 _notset = object()
123
123
124 # disable Python's problematic floating point timestamps (issue4836)
124 # disable Python's problematic floating point timestamps (issue4836)
125 # (Python hypocritically says you shouldn't change this behavior in
125 # (Python hypocritically says you shouldn't change this behavior in
126 # libraries, and sure enough Mercurial is not a library.)
126 # libraries, and sure enough Mercurial is not a library.)
127 os.stat_float_times(False)
127 os.stat_float_times(False)
128
128
129 def safehasattr(thing, attr):
129 def safehasattr(thing, attr):
130 return getattr(thing, attr, _notset) is not _notset
130 return getattr(thing, attr, _notset) is not _notset
131
131
132 DIGESTS = {
132 DIGESTS = {
133 'md5': md5,
133 'md5': md5,
134 'sha1': sha1,
134 'sha1': sha1,
135 'sha512': sha512,
135 'sha512': sha512,
136 }
136 }
137 # List of digest types from strongest to weakest
137 # List of digest types from strongest to weakest
138 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
138 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
139
139
140 for k in DIGESTS_BY_STRENGTH:
140 for k in DIGESTS_BY_STRENGTH:
141 assert k in DIGESTS
141 assert k in DIGESTS
142
142
143 class digester(object):
143 class digester(object):
144 """helper to compute digests.
144 """helper to compute digests.
145
145
146 This helper can be used to compute one or more digests given their name.
146 This helper can be used to compute one or more digests given their name.
147
147
148 >>> d = digester(['md5', 'sha1'])
148 >>> d = digester(['md5', 'sha1'])
149 >>> d.update('foo')
149 >>> d.update('foo')
150 >>> [k for k in sorted(d)]
150 >>> [k for k in sorted(d)]
151 ['md5', 'sha1']
151 ['md5', 'sha1']
152 >>> d['md5']
152 >>> d['md5']
153 'acbd18db4cc2f85cedef654fccc4a4d8'
153 'acbd18db4cc2f85cedef654fccc4a4d8'
154 >>> d['sha1']
154 >>> d['sha1']
155 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
155 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
156 >>> digester.preferred(['md5', 'sha1'])
156 >>> digester.preferred(['md5', 'sha1'])
157 'sha1'
157 'sha1'
158 """
158 """
159
159
160 def __init__(self, digests, s=''):
160 def __init__(self, digests, s=''):
161 self._hashes = {}
161 self._hashes = {}
162 for k in digests:
162 for k in digests:
163 if k not in DIGESTS:
163 if k not in DIGESTS:
164 raise Abort(_('unknown digest type: %s') % k)
164 raise Abort(_('unknown digest type: %s') % k)
165 self._hashes[k] = DIGESTS[k]()
165 self._hashes[k] = DIGESTS[k]()
166 if s:
166 if s:
167 self.update(s)
167 self.update(s)
168
168
169 def update(self, data):
169 def update(self, data):
170 for h in self._hashes.values():
170 for h in self._hashes.values():
171 h.update(data)
171 h.update(data)
172
172
173 def __getitem__(self, key):
173 def __getitem__(self, key):
174 if key not in DIGESTS:
174 if key not in DIGESTS:
175 raise Abort(_('unknown digest type: %s') % k)
175 raise Abort(_('unknown digest type: %s') % k)
176 return self._hashes[key].hexdigest()
176 return self._hashes[key].hexdigest()
177
177
178 def __iter__(self):
178 def __iter__(self):
179 return iter(self._hashes)
179 return iter(self._hashes)
180
180
181 @staticmethod
181 @staticmethod
182 def preferred(supported):
182 def preferred(supported):
183 """returns the strongest digest type in both supported and DIGESTS."""
183 """returns the strongest digest type in both supported and DIGESTS."""
184
184
185 for k in DIGESTS_BY_STRENGTH:
185 for k in DIGESTS_BY_STRENGTH:
186 if k in supported:
186 if k in supported:
187 return k
187 return k
188 return None
188 return None
189
189
190 class digestchecker(object):
190 class digestchecker(object):
191 """file handle wrapper that additionally checks content against a given
191 """file handle wrapper that additionally checks content against a given
192 size and digests.
192 size and digests.
193
193
194 d = digestchecker(fh, size, {'md5': '...'})
194 d = digestchecker(fh, size, {'md5': '...'})
195
195
196 When multiple digests are given, all of them are validated.
196 When multiple digests are given, all of them are validated.
197 """
197 """
198
198
199 def __init__(self, fh, size, digests):
199 def __init__(self, fh, size, digests):
200 self._fh = fh
200 self._fh = fh
201 self._size = size
201 self._size = size
202 self._got = 0
202 self._got = 0
203 self._digests = dict(digests)
203 self._digests = dict(digests)
204 self._digester = digester(self._digests.keys())
204 self._digester = digester(self._digests.keys())
205
205
206 def read(self, length=-1):
206 def read(self, length=-1):
207 content = self._fh.read(length)
207 content = self._fh.read(length)
208 self._digester.update(content)
208 self._digester.update(content)
209 self._got += len(content)
209 self._got += len(content)
210 return content
210 return content
211
211
212 def validate(self):
212 def validate(self):
213 if self._size != self._got:
213 if self._size != self._got:
214 raise Abort(_('size mismatch: expected %d, got %d') %
214 raise Abort(_('size mismatch: expected %d, got %d') %
215 (self._size, self._got))
215 (self._size, self._got))
216 for k, v in self._digests.items():
216 for k, v in self._digests.items():
217 if v != self._digester[k]:
217 if v != self._digester[k]:
218 # i18n: first parameter is a digest name
218 # i18n: first parameter is a digest name
219 raise Abort(_('%s mismatch: expected %s, got %s') %
219 raise Abort(_('%s mismatch: expected %s, got %s') %
220 (k, v, self._digester[k]))
220 (k, v, self._digester[k]))
221
221
222 try:
222 try:
223 buffer = buffer
223 buffer = buffer
224 except NameError:
224 except NameError:
225 if sys.version_info[0] < 3:
225 if sys.version_info[0] < 3:
226 def buffer(sliceable, offset=0):
226 def buffer(sliceable, offset=0):
227 return sliceable[offset:]
227 return sliceable[offset:]
228 else:
228 else:
229 def buffer(sliceable, offset=0):
229 def buffer(sliceable, offset=0):
230 return memoryview(sliceable)[offset:]
230 return memoryview(sliceable)[offset:]
231
231
232 closefds = os.name == 'posix'
232 closefds = os.name == 'posix'
233
233
234 _chunksize = 4096
234 _chunksize = 4096
235
235
236 class bufferedinputpipe(object):
236 class bufferedinputpipe(object):
237 """a manually buffered input pipe
237 """a manually buffered input pipe
238
238
239 Python will not let us use buffered IO and lazy reading with 'polling' at
239 Python will not let us use buffered IO and lazy reading with 'polling' at
240 the same time. We cannot probe the buffer state and select will not detect
240 the same time. We cannot probe the buffer state and select will not detect
241 that data are ready to read if they are already buffered.
241 that data are ready to read if they are already buffered.
242
242
243 This class let us work around that by implementing its own buffering
243 This class let us work around that by implementing its own buffering
244 (allowing efficient readline) while offering a way to know if the buffer is
244 (allowing efficient readline) while offering a way to know if the buffer is
245 empty from the output (allowing collaboration of the buffer with polling).
245 empty from the output (allowing collaboration of the buffer with polling).
246
246
247 This class lives in the 'util' module because it makes use of the 'os'
247 This class lives in the 'util' module because it makes use of the 'os'
248 module from the python stdlib.
248 module from the python stdlib.
249 """
249 """
250
250
251 def __init__(self, input):
251 def __init__(self, input):
252 self._input = input
252 self._input = input
253 self._buffer = []
253 self._buffer = []
254 self._eof = False
254 self._eof = False
255 self._lenbuf = 0
255 self._lenbuf = 0
256
256
257 @property
257 @property
258 def hasbuffer(self):
258 def hasbuffer(self):
259 """True is any data is currently buffered
259 """True is any data is currently buffered
260
260
261 This will be used externally a pre-step for polling IO. If there is
261 This will be used externally a pre-step for polling IO. If there is
262 already data then no polling should be set in place."""
262 already data then no polling should be set in place."""
263 return bool(self._buffer)
263 return bool(self._buffer)
264
264
265 @property
265 @property
266 def closed(self):
266 def closed(self):
267 return self._input.closed
267 return self._input.closed
268
268
269 def fileno(self):
269 def fileno(self):
270 return self._input.fileno()
270 return self._input.fileno()
271
271
272 def close(self):
272 def close(self):
273 return self._input.close()
273 return self._input.close()
274
274
275 def read(self, size):
275 def read(self, size):
276 while (not self._eof) and (self._lenbuf < size):
276 while (not self._eof) and (self._lenbuf < size):
277 self._fillbuffer()
277 self._fillbuffer()
278 return self._frombuffer(size)
278 return self._frombuffer(size)
279
279
280 def readline(self, *args, **kwargs):
280 def readline(self, *args, **kwargs):
281 if 1 < len(self._buffer):
281 if 1 < len(self._buffer):
282 # this should not happen because both read and readline end with a
282 # this should not happen because both read and readline end with a
283 # _frombuffer call that collapse it.
283 # _frombuffer call that collapse it.
284 self._buffer = [''.join(self._buffer)]
284 self._buffer = [''.join(self._buffer)]
285 self._lenbuf = len(self._buffer[0])
285 self._lenbuf = len(self._buffer[0])
286 lfi = -1
286 lfi = -1
287 if self._buffer:
287 if self._buffer:
288 lfi = self._buffer[-1].find('\n')
288 lfi = self._buffer[-1].find('\n')
289 while (not self._eof) and lfi < 0:
289 while (not self._eof) and lfi < 0:
290 self._fillbuffer()
290 self._fillbuffer()
291 if self._buffer:
291 if self._buffer:
292 lfi = self._buffer[-1].find('\n')
292 lfi = self._buffer[-1].find('\n')
293 size = lfi + 1
293 size = lfi + 1
294 if lfi < 0: # end of file
294 if lfi < 0: # end of file
295 size = self._lenbuf
295 size = self._lenbuf
296 elif 1 < len(self._buffer):
296 elif 1 < len(self._buffer):
297 # we need to take previous chunks into account
297 # we need to take previous chunks into account
298 size += self._lenbuf - len(self._buffer[-1])
298 size += self._lenbuf - len(self._buffer[-1])
299 return self._frombuffer(size)
299 return self._frombuffer(size)
300
300
301 def _frombuffer(self, size):
301 def _frombuffer(self, size):
302 """return at most 'size' data from the buffer
302 """return at most 'size' data from the buffer
303
303
304 The data are removed from the buffer."""
304 The data are removed from the buffer."""
305 if size == 0 or not self._buffer:
305 if size == 0 or not self._buffer:
306 return ''
306 return ''
307 buf = self._buffer[0]
307 buf = self._buffer[0]
308 if 1 < len(self._buffer):
308 if 1 < len(self._buffer):
309 buf = ''.join(self._buffer)
309 buf = ''.join(self._buffer)
310
310
311 data = buf[:size]
311 data = buf[:size]
312 buf = buf[len(data):]
312 buf = buf[len(data):]
313 if buf:
313 if buf:
314 self._buffer = [buf]
314 self._buffer = [buf]
315 self._lenbuf = len(buf)
315 self._lenbuf = len(buf)
316 else:
316 else:
317 self._buffer = []
317 self._buffer = []
318 self._lenbuf = 0
318 self._lenbuf = 0
319 return data
319 return data
320
320
321 def _fillbuffer(self):
321 def _fillbuffer(self):
322 """read data to the buffer"""
322 """read data to the buffer"""
323 data = os.read(self._input.fileno(), _chunksize)
323 data = os.read(self._input.fileno(), _chunksize)
324 if not data:
324 if not data:
325 self._eof = True
325 self._eof = True
326 else:
326 else:
327 self._lenbuf += len(data)
327 self._lenbuf += len(data)
328 self._buffer.append(data)
328 self._buffer.append(data)
329
329
330 def popen2(cmd, env=None, newlines=False):
330 def popen2(cmd, env=None, newlines=False):
331 # Setting bufsize to -1 lets the system decide the buffer size.
331 # Setting bufsize to -1 lets the system decide the buffer size.
332 # The default for bufsize is 0, meaning unbuffered. This leads to
332 # The default for bufsize is 0, meaning unbuffered. This leads to
333 # poor performance on Mac OS X: http://bugs.python.org/issue4194
333 # poor performance on Mac OS X: http://bugs.python.org/issue4194
334 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
334 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
335 close_fds=closefds,
335 close_fds=closefds,
336 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
336 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
337 universal_newlines=newlines,
337 universal_newlines=newlines,
338 env=env)
338 env=env)
339 return p.stdin, p.stdout
339 return p.stdin, p.stdout
340
340
341 def popen3(cmd, env=None, newlines=False):
341 def popen3(cmd, env=None, newlines=False):
342 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
342 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
343 return stdin, stdout, stderr
343 return stdin, stdout, stderr
344
344
345 def popen4(cmd, env=None, newlines=False, bufsize=-1):
345 def popen4(cmd, env=None, newlines=False, bufsize=-1):
346 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
346 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
347 close_fds=closefds,
347 close_fds=closefds,
348 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
348 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
349 stderr=subprocess.PIPE,
349 stderr=subprocess.PIPE,
350 universal_newlines=newlines,
350 universal_newlines=newlines,
351 env=env)
351 env=env)
352 return p.stdin, p.stdout, p.stderr, p
352 return p.stdin, p.stdout, p.stderr, p
353
353
354 def version():
354 def version():
355 """Return version information if available."""
355 """Return version information if available."""
356 try:
356 try:
357 from . import __version__
357 from . import __version__
358 return __version__.version
358 return __version__.version
359 except ImportError:
359 except ImportError:
360 return 'unknown'
360 return 'unknown'
361
361
362 def versiontuple(v=None, n=4):
362 def versiontuple(v=None, n=4):
363 """Parses a Mercurial version string into an N-tuple.
363 """Parses a Mercurial version string into an N-tuple.
364
364
365 The version string to be parsed is specified with the ``v`` argument.
365 The version string to be parsed is specified with the ``v`` argument.
366 If it isn't defined, the current Mercurial version string will be parsed.
366 If it isn't defined, the current Mercurial version string will be parsed.
367
367
368 ``n`` can be 2, 3, or 4. Here is how some version strings map to
368 ``n`` can be 2, 3, or 4. Here is how some version strings map to
369 returned values:
369 returned values:
370
370
371 >>> v = '3.6.1+190-df9b73d2d444'
371 >>> v = '3.6.1+190-df9b73d2d444'
372 >>> versiontuple(v, 2)
372 >>> versiontuple(v, 2)
373 (3, 6)
373 (3, 6)
374 >>> versiontuple(v, 3)
374 >>> versiontuple(v, 3)
375 (3, 6, 1)
375 (3, 6, 1)
376 >>> versiontuple(v, 4)
376 >>> versiontuple(v, 4)
377 (3, 6, 1, '190-df9b73d2d444')
377 (3, 6, 1, '190-df9b73d2d444')
378
378
379 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
379 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
380 (3, 6, 1, '190-df9b73d2d444+20151118')
380 (3, 6, 1, '190-df9b73d2d444+20151118')
381
381
382 >>> v = '3.6'
382 >>> v = '3.6'
383 >>> versiontuple(v, 2)
383 >>> versiontuple(v, 2)
384 (3, 6)
384 (3, 6)
385 >>> versiontuple(v, 3)
385 >>> versiontuple(v, 3)
386 (3, 6, None)
386 (3, 6, None)
387 >>> versiontuple(v, 4)
387 >>> versiontuple(v, 4)
388 (3, 6, None, None)
388 (3, 6, None, None)
389 """
389 """
390 if not v:
390 if not v:
391 v = version()
391 v = version()
392 parts = v.split('+', 1)
392 parts = v.split('+', 1)
393 if len(parts) == 1:
393 if len(parts) == 1:
394 vparts, extra = parts[0], None
394 vparts, extra = parts[0], None
395 else:
395 else:
396 vparts, extra = parts
396 vparts, extra = parts
397
397
398 vints = []
398 vints = []
399 for i in vparts.split('.'):
399 for i in vparts.split('.'):
400 try:
400 try:
401 vints.append(int(i))
401 vints.append(int(i))
402 except ValueError:
402 except ValueError:
403 break
403 break
404 # (3, 6) -> (3, 6, None)
404 # (3, 6) -> (3, 6, None)
405 while len(vints) < 3:
405 while len(vints) < 3:
406 vints.append(None)
406 vints.append(None)
407
407
408 if n == 2:
408 if n == 2:
409 return (vints[0], vints[1])
409 return (vints[0], vints[1])
410 if n == 3:
410 if n == 3:
411 return (vints[0], vints[1], vints[2])
411 return (vints[0], vints[1], vints[2])
412 if n == 4:
412 if n == 4:
413 return (vints[0], vints[1], vints[2], extra)
413 return (vints[0], vints[1], vints[2], extra)
414
414
415 # used by parsedate
415 # used by parsedate
416 defaultdateformats = (
416 defaultdateformats = (
417 '%Y-%m-%d %H:%M:%S',
417 '%Y-%m-%d %H:%M:%S',
418 '%Y-%m-%d %I:%M:%S%p',
418 '%Y-%m-%d %I:%M:%S%p',
419 '%Y-%m-%d %H:%M',
419 '%Y-%m-%d %H:%M',
420 '%Y-%m-%d %I:%M%p',
420 '%Y-%m-%d %I:%M%p',
421 '%Y-%m-%d',
421 '%Y-%m-%d',
422 '%m-%d',
422 '%m-%d',
423 '%m/%d',
423 '%m/%d',
424 '%m/%d/%y',
424 '%m/%d/%y',
425 '%m/%d/%Y',
425 '%m/%d/%Y',
426 '%a %b %d %H:%M:%S %Y',
426 '%a %b %d %H:%M:%S %Y',
427 '%a %b %d %I:%M:%S%p %Y',
427 '%a %b %d %I:%M:%S%p %Y',
428 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
428 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
429 '%b %d %H:%M:%S %Y',
429 '%b %d %H:%M:%S %Y',
430 '%b %d %I:%M:%S%p %Y',
430 '%b %d %I:%M:%S%p %Y',
431 '%b %d %H:%M:%S',
431 '%b %d %H:%M:%S',
432 '%b %d %I:%M:%S%p',
432 '%b %d %I:%M:%S%p',
433 '%b %d %H:%M',
433 '%b %d %H:%M',
434 '%b %d %I:%M%p',
434 '%b %d %I:%M%p',
435 '%b %d %Y',
435 '%b %d %Y',
436 '%b %d',
436 '%b %d',
437 '%H:%M:%S',
437 '%H:%M:%S',
438 '%I:%M:%S%p',
438 '%I:%M:%S%p',
439 '%H:%M',
439 '%H:%M',
440 '%I:%M%p',
440 '%I:%M%p',
441 )
441 )
442
442
443 extendeddateformats = defaultdateformats + (
443 extendeddateformats = defaultdateformats + (
444 "%Y",
444 "%Y",
445 "%Y-%m",
445 "%Y-%m",
446 "%b",
446 "%b",
447 "%b %Y",
447 "%b %Y",
448 )
448 )
449
449
450 def cachefunc(func):
450 def cachefunc(func):
451 '''cache the result of function calls'''
451 '''cache the result of function calls'''
452 # XXX doesn't handle keywords args
452 # XXX doesn't handle keywords args
453 if func.func_code.co_argcount == 0:
453 if func.__code__.co_argcount == 0:
454 cache = []
454 cache = []
455 def f():
455 def f():
456 if len(cache) == 0:
456 if len(cache) == 0:
457 cache.append(func())
457 cache.append(func())
458 return cache[0]
458 return cache[0]
459 return f
459 return f
460 cache = {}
460 cache = {}
461 if func.func_code.co_argcount == 1:
461 if func.__code__.co_argcount == 1:
462 # we gain a small amount of time because
462 # we gain a small amount of time because
463 # we don't need to pack/unpack the list
463 # we don't need to pack/unpack the list
464 def f(arg):
464 def f(arg):
465 if arg not in cache:
465 if arg not in cache:
466 cache[arg] = func(arg)
466 cache[arg] = func(arg)
467 return cache[arg]
467 return cache[arg]
468 else:
468 else:
469 def f(*args):
469 def f(*args):
470 if args not in cache:
470 if args not in cache:
471 cache[args] = func(*args)
471 cache[args] = func(*args)
472 return cache[args]
472 return cache[args]
473
473
474 return f
474 return f
475
475
476 class sortdict(dict):
476 class sortdict(dict):
477 '''a simple sorted dictionary'''
477 '''a simple sorted dictionary'''
478 def __init__(self, data=None):
478 def __init__(self, data=None):
479 self._list = []
479 self._list = []
480 if data:
480 if data:
481 self.update(data)
481 self.update(data)
482 def copy(self):
482 def copy(self):
483 return sortdict(self)
483 return sortdict(self)
484 def __setitem__(self, key, val):
484 def __setitem__(self, key, val):
485 if key in self:
485 if key in self:
486 self._list.remove(key)
486 self._list.remove(key)
487 self._list.append(key)
487 self._list.append(key)
488 dict.__setitem__(self, key, val)
488 dict.__setitem__(self, key, val)
489 def __iter__(self):
489 def __iter__(self):
490 return self._list.__iter__()
490 return self._list.__iter__()
491 def update(self, src):
491 def update(self, src):
492 if isinstance(src, dict):
492 if isinstance(src, dict):
493 src = src.iteritems()
493 src = src.iteritems()
494 for k, v in src:
494 for k, v in src:
495 self[k] = v
495 self[k] = v
496 def clear(self):
496 def clear(self):
497 dict.clear(self)
497 dict.clear(self)
498 self._list = []
498 self._list = []
499 def items(self):
499 def items(self):
500 return [(k, self[k]) for k in self._list]
500 return [(k, self[k]) for k in self._list]
501 def __delitem__(self, key):
501 def __delitem__(self, key):
502 dict.__delitem__(self, key)
502 dict.__delitem__(self, key)
503 self._list.remove(key)
503 self._list.remove(key)
504 def pop(self, key, *args, **kwargs):
504 def pop(self, key, *args, **kwargs):
505 dict.pop(self, key, *args, **kwargs)
505 dict.pop(self, key, *args, **kwargs)
506 try:
506 try:
507 self._list.remove(key)
507 self._list.remove(key)
508 except ValueError:
508 except ValueError:
509 pass
509 pass
510 def keys(self):
510 def keys(self):
511 return self._list
511 return self._list
512 def iterkeys(self):
512 def iterkeys(self):
513 return self._list.__iter__()
513 return self._list.__iter__()
514 def iteritems(self):
514 def iteritems(self):
515 for k in self._list:
515 for k in self._list:
516 yield k, self[k]
516 yield k, self[k]
517 def insert(self, index, key, val):
517 def insert(self, index, key, val):
518 self._list.insert(index, key)
518 self._list.insert(index, key)
519 dict.__setitem__(self, key, val)
519 dict.__setitem__(self, key, val)
520
520
521 class _lrucachenode(object):
521 class _lrucachenode(object):
522 """A node in a doubly linked list.
522 """A node in a doubly linked list.
523
523
524 Holds a reference to nodes on either side as well as a key-value
524 Holds a reference to nodes on either side as well as a key-value
525 pair for the dictionary entry.
525 pair for the dictionary entry.
526 """
526 """
527 __slots__ = ('next', 'prev', 'key', 'value')
527 __slots__ = ('next', 'prev', 'key', 'value')
528
528
529 def __init__(self):
529 def __init__(self):
530 self.next = None
530 self.next = None
531 self.prev = None
531 self.prev = None
532
532
533 self.key = _notset
533 self.key = _notset
534 self.value = None
534 self.value = None
535
535
536 def markempty(self):
536 def markempty(self):
537 """Mark the node as emptied."""
537 """Mark the node as emptied."""
538 self.key = _notset
538 self.key = _notset
539
539
540 class lrucachedict(object):
540 class lrucachedict(object):
541 """Dict that caches most recent accesses and sets.
541 """Dict that caches most recent accesses and sets.
542
542
543 The dict consists of an actual backing dict - indexed by original
543 The dict consists of an actual backing dict - indexed by original
544 key - and a doubly linked circular list defining the order of entries in
544 key - and a doubly linked circular list defining the order of entries in
545 the cache.
545 the cache.
546
546
547 The head node is the newest entry in the cache. If the cache is full,
547 The head node is the newest entry in the cache. If the cache is full,
548 we recycle head.prev and make it the new head. Cache accesses result in
548 we recycle head.prev and make it the new head. Cache accesses result in
549 the node being moved to before the existing head and being marked as the
549 the node being moved to before the existing head and being marked as the
550 new head node.
550 new head node.
551 """
551 """
552 def __init__(self, max):
552 def __init__(self, max):
553 self._cache = {}
553 self._cache = {}
554
554
555 self._head = head = _lrucachenode()
555 self._head = head = _lrucachenode()
556 head.prev = head
556 head.prev = head
557 head.next = head
557 head.next = head
558 self._size = 1
558 self._size = 1
559 self._capacity = max
559 self._capacity = max
560
560
561 def __len__(self):
561 def __len__(self):
562 return len(self._cache)
562 return len(self._cache)
563
563
564 def __contains__(self, k):
564 def __contains__(self, k):
565 return k in self._cache
565 return k in self._cache
566
566
567 def __iter__(self):
567 def __iter__(self):
568 # We don't have to iterate in cache order, but why not.
568 # We don't have to iterate in cache order, but why not.
569 n = self._head
569 n = self._head
570 for i in range(len(self._cache)):
570 for i in range(len(self._cache)):
571 yield n.key
571 yield n.key
572 n = n.next
572 n = n.next
573
573
574 def __getitem__(self, k):
574 def __getitem__(self, k):
575 node = self._cache[k]
575 node = self._cache[k]
576 self._movetohead(node)
576 self._movetohead(node)
577 return node.value
577 return node.value
578
578
579 def __setitem__(self, k, v):
579 def __setitem__(self, k, v):
580 node = self._cache.get(k)
580 node = self._cache.get(k)
581 # Replace existing value and mark as newest.
581 # Replace existing value and mark as newest.
582 if node is not None:
582 if node is not None:
583 node.value = v
583 node.value = v
584 self._movetohead(node)
584 self._movetohead(node)
585 return
585 return
586
586
587 if self._size < self._capacity:
587 if self._size < self._capacity:
588 node = self._addcapacity()
588 node = self._addcapacity()
589 else:
589 else:
590 # Grab the last/oldest item.
590 # Grab the last/oldest item.
591 node = self._head.prev
591 node = self._head.prev
592
592
593 # At capacity. Kill the old entry.
593 # At capacity. Kill the old entry.
594 if node.key is not _notset:
594 if node.key is not _notset:
595 del self._cache[node.key]
595 del self._cache[node.key]
596
596
597 node.key = k
597 node.key = k
598 node.value = v
598 node.value = v
599 self._cache[k] = node
599 self._cache[k] = node
600 # And mark it as newest entry. No need to adjust order since it
600 # And mark it as newest entry. No need to adjust order since it
601 # is already self._head.prev.
601 # is already self._head.prev.
602 self._head = node
602 self._head = node
603
603
604 def __delitem__(self, k):
604 def __delitem__(self, k):
605 node = self._cache.pop(k)
605 node = self._cache.pop(k)
606 node.markempty()
606 node.markempty()
607
607
608 # Temporarily mark as newest item before re-adjusting head to make
608 # Temporarily mark as newest item before re-adjusting head to make
609 # this node the oldest item.
609 # this node the oldest item.
610 self._movetohead(node)
610 self._movetohead(node)
611 self._head = node.next
611 self._head = node.next
612
612
613 # Additional dict methods.
613 # Additional dict methods.
614
614
615 def get(self, k, default=None):
615 def get(self, k, default=None):
616 try:
616 try:
617 return self._cache[k]
617 return self._cache[k]
618 except KeyError:
618 except KeyError:
619 return default
619 return default
620
620
621 def clear(self):
621 def clear(self):
622 n = self._head
622 n = self._head
623 while n.key is not _notset:
623 while n.key is not _notset:
624 n.markempty()
624 n.markempty()
625 n = n.next
625 n = n.next
626
626
627 self._cache.clear()
627 self._cache.clear()
628
628
629 def copy(self):
629 def copy(self):
630 result = lrucachedict(self._capacity)
630 result = lrucachedict(self._capacity)
631 n = self._head.prev
631 n = self._head.prev
632 # Iterate in oldest-to-newest order, so the copy has the right ordering
632 # Iterate in oldest-to-newest order, so the copy has the right ordering
633 for i in range(len(self._cache)):
633 for i in range(len(self._cache)):
634 result[n.key] = n.value
634 result[n.key] = n.value
635 n = n.prev
635 n = n.prev
636 return result
636 return result
637
637
638 def _movetohead(self, node):
638 def _movetohead(self, node):
639 """Mark a node as the newest, making it the new head.
639 """Mark a node as the newest, making it the new head.
640
640
641 When a node is accessed, it becomes the freshest entry in the LRU
641 When a node is accessed, it becomes the freshest entry in the LRU
642 list, which is denoted by self._head.
642 list, which is denoted by self._head.
643
643
644 Visually, let's make ``N`` the new head node (* denotes head):
644 Visually, let's make ``N`` the new head node (* denotes head):
645
645
646 previous/oldest <-> head <-> next/next newest
646 previous/oldest <-> head <-> next/next newest
647
647
648 ----<->--- A* ---<->-----
648 ----<->--- A* ---<->-----
649 | |
649 | |
650 E <-> D <-> N <-> C <-> B
650 E <-> D <-> N <-> C <-> B
651
651
652 To:
652 To:
653
653
654 ----<->--- N* ---<->-----
654 ----<->--- N* ---<->-----
655 | |
655 | |
656 E <-> D <-> C <-> B <-> A
656 E <-> D <-> C <-> B <-> A
657
657
658 This requires the following moves:
658 This requires the following moves:
659
659
660 C.next = D (node.prev.next = node.next)
660 C.next = D (node.prev.next = node.next)
661 D.prev = C (node.next.prev = node.prev)
661 D.prev = C (node.next.prev = node.prev)
662 E.next = N (head.prev.next = node)
662 E.next = N (head.prev.next = node)
663 N.prev = E (node.prev = head.prev)
663 N.prev = E (node.prev = head.prev)
664 N.next = A (node.next = head)
664 N.next = A (node.next = head)
665 A.prev = N (head.prev = node)
665 A.prev = N (head.prev = node)
666 """
666 """
667 head = self._head
667 head = self._head
668 # C.next = D
668 # C.next = D
669 node.prev.next = node.next
669 node.prev.next = node.next
670 # D.prev = C
670 # D.prev = C
671 node.next.prev = node.prev
671 node.next.prev = node.prev
672 # N.prev = E
672 # N.prev = E
673 node.prev = head.prev
673 node.prev = head.prev
674 # N.next = A
674 # N.next = A
675 # It is tempting to do just "head" here, however if node is
675 # It is tempting to do just "head" here, however if node is
676 # adjacent to head, this will do bad things.
676 # adjacent to head, this will do bad things.
677 node.next = head.prev.next
677 node.next = head.prev.next
678 # E.next = N
678 # E.next = N
679 node.next.prev = node
679 node.next.prev = node
680 # A.prev = N
680 # A.prev = N
681 node.prev.next = node
681 node.prev.next = node
682
682
683 self._head = node
683 self._head = node
684
684
685 def _addcapacity(self):
685 def _addcapacity(self):
686 """Add a node to the circular linked list.
686 """Add a node to the circular linked list.
687
687
688 The new node is inserted before the head node.
688 The new node is inserted before the head node.
689 """
689 """
690 head = self._head
690 head = self._head
691 node = _lrucachenode()
691 node = _lrucachenode()
692 head.prev.next = node
692 head.prev.next = node
693 node.prev = head.prev
693 node.prev = head.prev
694 node.next = head
694 node.next = head
695 head.prev = node
695 head.prev = node
696 self._size += 1
696 self._size += 1
697 return node
697 return node
698
698
699 def lrucachefunc(func):
699 def lrucachefunc(func):
700 '''cache most recent results of function calls'''
700 '''cache most recent results of function calls'''
701 cache = {}
701 cache = {}
702 order = collections.deque()
702 order = collections.deque()
703 if func.func_code.co_argcount == 1:
703 if func.__code__.co_argcount == 1:
704 def f(arg):
704 def f(arg):
705 if arg not in cache:
705 if arg not in cache:
706 if len(cache) > 20:
706 if len(cache) > 20:
707 del cache[order.popleft()]
707 del cache[order.popleft()]
708 cache[arg] = func(arg)
708 cache[arg] = func(arg)
709 else:
709 else:
710 order.remove(arg)
710 order.remove(arg)
711 order.append(arg)
711 order.append(arg)
712 return cache[arg]
712 return cache[arg]
713 else:
713 else:
714 def f(*args):
714 def f(*args):
715 if args not in cache:
715 if args not in cache:
716 if len(cache) > 20:
716 if len(cache) > 20:
717 del cache[order.popleft()]
717 del cache[order.popleft()]
718 cache[args] = func(*args)
718 cache[args] = func(*args)
719 else:
719 else:
720 order.remove(args)
720 order.remove(args)
721 order.append(args)
721 order.append(args)
722 return cache[args]
722 return cache[args]
723
723
724 return f
724 return f
725
725
726 class propertycache(object):
726 class propertycache(object):
727 def __init__(self, func):
727 def __init__(self, func):
728 self.func = func
728 self.func = func
729 self.name = func.__name__
729 self.name = func.__name__
730 def __get__(self, obj, type=None):
730 def __get__(self, obj, type=None):
731 result = self.func(obj)
731 result = self.func(obj)
732 self.cachevalue(obj, result)
732 self.cachevalue(obj, result)
733 return result
733 return result
734
734
735 def cachevalue(self, obj, value):
735 def cachevalue(self, obj, value):
736 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
736 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
737 obj.__dict__[self.name] = value
737 obj.__dict__[self.name] = value
738
738
739 def pipefilter(s, cmd):
739 def pipefilter(s, cmd):
740 '''filter string S through command CMD, returning its output'''
740 '''filter string S through command CMD, returning its output'''
741 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
741 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
742 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
742 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
743 pout, perr = p.communicate(s)
743 pout, perr = p.communicate(s)
744 return pout
744 return pout
745
745
746 def tempfilter(s, cmd):
746 def tempfilter(s, cmd):
747 '''filter string S through a pair of temporary files with CMD.
747 '''filter string S through a pair of temporary files with CMD.
748 CMD is used as a template to create the real command to be run,
748 CMD is used as a template to create the real command to be run,
749 with the strings INFILE and OUTFILE replaced by the real names of
749 with the strings INFILE and OUTFILE replaced by the real names of
750 the temporary files generated.'''
750 the temporary files generated.'''
751 inname, outname = None, None
751 inname, outname = None, None
752 try:
752 try:
753 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
753 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
754 fp = os.fdopen(infd, 'wb')
754 fp = os.fdopen(infd, 'wb')
755 fp.write(s)
755 fp.write(s)
756 fp.close()
756 fp.close()
757 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
757 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
758 os.close(outfd)
758 os.close(outfd)
759 cmd = cmd.replace('INFILE', inname)
759 cmd = cmd.replace('INFILE', inname)
760 cmd = cmd.replace('OUTFILE', outname)
760 cmd = cmd.replace('OUTFILE', outname)
761 code = os.system(cmd)
761 code = os.system(cmd)
762 if sys.platform == 'OpenVMS' and code & 1:
762 if sys.platform == 'OpenVMS' and code & 1:
763 code = 0
763 code = 0
764 if code:
764 if code:
765 raise Abort(_("command '%s' failed: %s") %
765 raise Abort(_("command '%s' failed: %s") %
766 (cmd, explainexit(code)))
766 (cmd, explainexit(code)))
767 return readfile(outname)
767 return readfile(outname)
768 finally:
768 finally:
769 try:
769 try:
770 if inname:
770 if inname:
771 os.unlink(inname)
771 os.unlink(inname)
772 except OSError:
772 except OSError:
773 pass
773 pass
774 try:
774 try:
775 if outname:
775 if outname:
776 os.unlink(outname)
776 os.unlink(outname)
777 except OSError:
777 except OSError:
778 pass
778 pass
779
779
780 filtertable = {
780 filtertable = {
781 'tempfile:': tempfilter,
781 'tempfile:': tempfilter,
782 'pipe:': pipefilter,
782 'pipe:': pipefilter,
783 }
783 }
784
784
785 def filter(s, cmd):
785 def filter(s, cmd):
786 "filter a string through a command that transforms its input to its output"
786 "filter a string through a command that transforms its input to its output"
787 for name, fn in filtertable.iteritems():
787 for name, fn in filtertable.iteritems():
788 if cmd.startswith(name):
788 if cmd.startswith(name):
789 return fn(s, cmd[len(name):].lstrip())
789 return fn(s, cmd[len(name):].lstrip())
790 return pipefilter(s, cmd)
790 return pipefilter(s, cmd)
791
791
792 def binary(s):
792 def binary(s):
793 """return true if a string is binary data"""
793 """return true if a string is binary data"""
794 return bool(s and '\0' in s)
794 return bool(s and '\0' in s)
795
795
796 def increasingchunks(source, min=1024, max=65536):
796 def increasingchunks(source, min=1024, max=65536):
797 '''return no less than min bytes per chunk while data remains,
797 '''return no less than min bytes per chunk while data remains,
798 doubling min after each chunk until it reaches max'''
798 doubling min after each chunk until it reaches max'''
799 def log2(x):
799 def log2(x):
800 if not x:
800 if not x:
801 return 0
801 return 0
802 i = 0
802 i = 0
803 while x:
803 while x:
804 x >>= 1
804 x >>= 1
805 i += 1
805 i += 1
806 return i - 1
806 return i - 1
807
807
808 buf = []
808 buf = []
809 blen = 0
809 blen = 0
810 for chunk in source:
810 for chunk in source:
811 buf.append(chunk)
811 buf.append(chunk)
812 blen += len(chunk)
812 blen += len(chunk)
813 if blen >= min:
813 if blen >= min:
814 if min < max:
814 if min < max:
815 min = min << 1
815 min = min << 1
816 nmin = 1 << log2(blen)
816 nmin = 1 << log2(blen)
817 if nmin > min:
817 if nmin > min:
818 min = nmin
818 min = nmin
819 if min > max:
819 if min > max:
820 min = max
820 min = max
821 yield ''.join(buf)
821 yield ''.join(buf)
822 blen = 0
822 blen = 0
823 buf = []
823 buf = []
824 if buf:
824 if buf:
825 yield ''.join(buf)
825 yield ''.join(buf)
826
826
827 Abort = error.Abort
827 Abort = error.Abort
828
828
829 def always(fn):
829 def always(fn):
830 return True
830 return True
831
831
832 def never(fn):
832 def never(fn):
833 return False
833 return False
834
834
835 def nogc(func):
835 def nogc(func):
836 """disable garbage collector
836 """disable garbage collector
837
837
838 Python's garbage collector triggers a GC each time a certain number of
838 Python's garbage collector triggers a GC each time a certain number of
839 container objects (the number being defined by gc.get_threshold()) are
839 container objects (the number being defined by gc.get_threshold()) are
840 allocated even when marked not to be tracked by the collector. Tracking has
840 allocated even when marked not to be tracked by the collector. Tracking has
841 no effect on when GCs are triggered, only on what objects the GC looks
841 no effect on when GCs are triggered, only on what objects the GC looks
842 into. As a workaround, disable GC while building complex (huge)
842 into. As a workaround, disable GC while building complex (huge)
843 containers.
843 containers.
844
844
845 This garbage collector issue have been fixed in 2.7.
845 This garbage collector issue have been fixed in 2.7.
846 """
846 """
847 def wrapper(*args, **kwargs):
847 def wrapper(*args, **kwargs):
848 gcenabled = gc.isenabled()
848 gcenabled = gc.isenabled()
849 gc.disable()
849 gc.disable()
850 try:
850 try:
851 return func(*args, **kwargs)
851 return func(*args, **kwargs)
852 finally:
852 finally:
853 if gcenabled:
853 if gcenabled:
854 gc.enable()
854 gc.enable()
855 return wrapper
855 return wrapper
856
856
857 def pathto(root, n1, n2):
857 def pathto(root, n1, n2):
858 '''return the relative path from one place to another.
858 '''return the relative path from one place to another.
859 root should use os.sep to separate directories
859 root should use os.sep to separate directories
860 n1 should use os.sep to separate directories
860 n1 should use os.sep to separate directories
861 n2 should use "/" to separate directories
861 n2 should use "/" to separate directories
862 returns an os.sep-separated path.
862 returns an os.sep-separated path.
863
863
864 If n1 is a relative path, it's assumed it's
864 If n1 is a relative path, it's assumed it's
865 relative to root.
865 relative to root.
866 n2 should always be relative to root.
866 n2 should always be relative to root.
867 '''
867 '''
868 if not n1:
868 if not n1:
869 return localpath(n2)
869 return localpath(n2)
870 if os.path.isabs(n1):
870 if os.path.isabs(n1):
871 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
871 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
872 return os.path.join(root, localpath(n2))
872 return os.path.join(root, localpath(n2))
873 n2 = '/'.join((pconvert(root), n2))
873 n2 = '/'.join((pconvert(root), n2))
874 a, b = splitpath(n1), n2.split('/')
874 a, b = splitpath(n1), n2.split('/')
875 a.reverse()
875 a.reverse()
876 b.reverse()
876 b.reverse()
877 while a and b and a[-1] == b[-1]:
877 while a and b and a[-1] == b[-1]:
878 a.pop()
878 a.pop()
879 b.pop()
879 b.pop()
880 b.reverse()
880 b.reverse()
881 return os.sep.join((['..'] * len(a)) + b) or '.'
881 return os.sep.join((['..'] * len(a)) + b) or '.'
882
882
883 def mainfrozen():
883 def mainfrozen():
884 """return True if we are a frozen executable.
884 """return True if we are a frozen executable.
885
885
886 The code supports py2exe (most common, Windows only) and tools/freeze
886 The code supports py2exe (most common, Windows only) and tools/freeze
887 (portable, not much used).
887 (portable, not much used).
888 """
888 """
889 return (safehasattr(sys, "frozen") or # new py2exe
889 return (safehasattr(sys, "frozen") or # new py2exe
890 safehasattr(sys, "importers") or # old py2exe
890 safehasattr(sys, "importers") or # old py2exe
891 imp.is_frozen("__main__")) # tools/freeze
891 imp.is_frozen("__main__")) # tools/freeze
892
892
893 # the location of data files matching the source code
893 # the location of data files matching the source code
894 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
894 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
895 # executable version (py2exe) doesn't support __file__
895 # executable version (py2exe) doesn't support __file__
896 datapath = os.path.dirname(sys.executable)
896 datapath = os.path.dirname(sys.executable)
897 else:
897 else:
898 datapath = os.path.dirname(__file__)
898 datapath = os.path.dirname(__file__)
899
899
900 i18n.setdatapath(datapath)
900 i18n.setdatapath(datapath)
901
901
902 _hgexecutable = None
902 _hgexecutable = None
903
903
904 def hgexecutable():
904 def hgexecutable():
905 """return location of the 'hg' executable.
905 """return location of the 'hg' executable.
906
906
907 Defaults to $HG or 'hg' in the search path.
907 Defaults to $HG or 'hg' in the search path.
908 """
908 """
909 if _hgexecutable is None:
909 if _hgexecutable is None:
910 hg = os.environ.get('HG')
910 hg = os.environ.get('HG')
911 mainmod = sys.modules['__main__']
911 mainmod = sys.modules['__main__']
912 if hg:
912 if hg:
913 _sethgexecutable(hg)
913 _sethgexecutable(hg)
914 elif mainfrozen():
914 elif mainfrozen():
915 if getattr(sys, 'frozen', None) == 'macosx_app':
915 if getattr(sys, 'frozen', None) == 'macosx_app':
916 # Env variable set by py2app
916 # Env variable set by py2app
917 _sethgexecutable(os.environ['EXECUTABLEPATH'])
917 _sethgexecutable(os.environ['EXECUTABLEPATH'])
918 else:
918 else:
919 _sethgexecutable(sys.executable)
919 _sethgexecutable(sys.executable)
920 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
920 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
921 _sethgexecutable(mainmod.__file__)
921 _sethgexecutable(mainmod.__file__)
922 else:
922 else:
923 exe = findexe('hg') or os.path.basename(sys.argv[0])
923 exe = findexe('hg') or os.path.basename(sys.argv[0])
924 _sethgexecutable(exe)
924 _sethgexecutable(exe)
925 return _hgexecutable
925 return _hgexecutable
926
926
927 def _sethgexecutable(path):
927 def _sethgexecutable(path):
928 """set location of the 'hg' executable"""
928 """set location of the 'hg' executable"""
929 global _hgexecutable
929 global _hgexecutable
930 _hgexecutable = path
930 _hgexecutable = path
931
931
932 def _isstdout(f):
932 def _isstdout(f):
933 fileno = getattr(f, 'fileno', None)
933 fileno = getattr(f, 'fileno', None)
934 return fileno and fileno() == sys.__stdout__.fileno()
934 return fileno and fileno() == sys.__stdout__.fileno()
935
935
936 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
936 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
937 '''enhanced shell command execution.
937 '''enhanced shell command execution.
938 run with environment maybe modified, maybe in different dir.
938 run with environment maybe modified, maybe in different dir.
939
939
940 if command fails and onerr is None, return status, else raise onerr
940 if command fails and onerr is None, return status, else raise onerr
941 object as exception.
941 object as exception.
942
942
943 if out is specified, it is assumed to be a file-like object that has a
943 if out is specified, it is assumed to be a file-like object that has a
944 write() method. stdout and stderr will be redirected to out.'''
944 write() method. stdout and stderr will be redirected to out.'''
945 if environ is None:
945 if environ is None:
946 environ = {}
946 environ = {}
947 try:
947 try:
948 sys.stdout.flush()
948 sys.stdout.flush()
949 except Exception:
949 except Exception:
950 pass
950 pass
951 def py2shell(val):
951 def py2shell(val):
952 'convert python object into string that is useful to shell'
952 'convert python object into string that is useful to shell'
953 if val is None or val is False:
953 if val is None or val is False:
954 return '0'
954 return '0'
955 if val is True:
955 if val is True:
956 return '1'
956 return '1'
957 return str(val)
957 return str(val)
958 origcmd = cmd
958 origcmd = cmd
959 cmd = quotecommand(cmd)
959 cmd = quotecommand(cmd)
960 if sys.platform == 'plan9' and (sys.version_info[0] == 2
960 if sys.platform == 'plan9' and (sys.version_info[0] == 2
961 and sys.version_info[1] < 7):
961 and sys.version_info[1] < 7):
962 # subprocess kludge to work around issues in half-baked Python
962 # subprocess kludge to work around issues in half-baked Python
963 # ports, notably bichued/python:
963 # ports, notably bichued/python:
964 if not cwd is None:
964 if not cwd is None:
965 os.chdir(cwd)
965 os.chdir(cwd)
966 rc = os.system(cmd)
966 rc = os.system(cmd)
967 else:
967 else:
968 env = dict(os.environ)
968 env = dict(os.environ)
969 env.update((k, py2shell(v)) for k, v in environ.iteritems())
969 env.update((k, py2shell(v)) for k, v in environ.iteritems())
970 env['HG'] = hgexecutable()
970 env['HG'] = hgexecutable()
971 if out is None or _isstdout(out):
971 if out is None or _isstdout(out):
972 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
972 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
973 env=env, cwd=cwd)
973 env=env, cwd=cwd)
974 else:
974 else:
975 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
975 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
976 env=env, cwd=cwd, stdout=subprocess.PIPE,
976 env=env, cwd=cwd, stdout=subprocess.PIPE,
977 stderr=subprocess.STDOUT)
977 stderr=subprocess.STDOUT)
978 while True:
978 while True:
979 line = proc.stdout.readline()
979 line = proc.stdout.readline()
980 if not line:
980 if not line:
981 break
981 break
982 out.write(line)
982 out.write(line)
983 proc.wait()
983 proc.wait()
984 rc = proc.returncode
984 rc = proc.returncode
985 if sys.platform == 'OpenVMS' and rc & 1:
985 if sys.platform == 'OpenVMS' and rc & 1:
986 rc = 0
986 rc = 0
987 if rc and onerr:
987 if rc and onerr:
988 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
988 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
989 explainexit(rc)[0])
989 explainexit(rc)[0])
990 if errprefix:
990 if errprefix:
991 errmsg = '%s: %s' % (errprefix, errmsg)
991 errmsg = '%s: %s' % (errprefix, errmsg)
992 raise onerr(errmsg)
992 raise onerr(errmsg)
993 return rc
993 return rc
994
994
995 def checksignature(func):
995 def checksignature(func):
996 '''wrap a function with code to check for calling errors'''
996 '''wrap a function with code to check for calling errors'''
997 def check(*args, **kwargs):
997 def check(*args, **kwargs):
998 try:
998 try:
999 return func(*args, **kwargs)
999 return func(*args, **kwargs)
1000 except TypeError:
1000 except TypeError:
1001 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1001 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1002 raise error.SignatureError
1002 raise error.SignatureError
1003 raise
1003 raise
1004
1004
1005 return check
1005 return check
1006
1006
1007 def copyfile(src, dest, hardlink=False, copystat=False):
1007 def copyfile(src, dest, hardlink=False, copystat=False):
1008 '''copy a file, preserving mode and optionally other stat info like
1008 '''copy a file, preserving mode and optionally other stat info like
1009 atime/mtime'''
1009 atime/mtime'''
1010 if os.path.lexists(dest):
1010 if os.path.lexists(dest):
1011 unlink(dest)
1011 unlink(dest)
1012 # hardlinks are problematic on CIFS, quietly ignore this flag
1012 # hardlinks are problematic on CIFS, quietly ignore this flag
1013 # until we find a way to work around it cleanly (issue4546)
1013 # until we find a way to work around it cleanly (issue4546)
1014 if False and hardlink:
1014 if False and hardlink:
1015 try:
1015 try:
1016 oslink(src, dest)
1016 oslink(src, dest)
1017 return
1017 return
1018 except (IOError, OSError):
1018 except (IOError, OSError):
1019 pass # fall back to normal copy
1019 pass # fall back to normal copy
1020 if os.path.islink(src):
1020 if os.path.islink(src):
1021 os.symlink(os.readlink(src), dest)
1021 os.symlink(os.readlink(src), dest)
1022 # copytime is ignored for symlinks, but in general copytime isn't needed
1022 # copytime is ignored for symlinks, but in general copytime isn't needed
1023 # for them anyway
1023 # for them anyway
1024 else:
1024 else:
1025 try:
1025 try:
1026 shutil.copyfile(src, dest)
1026 shutil.copyfile(src, dest)
1027 if copystat:
1027 if copystat:
1028 # copystat also copies mode
1028 # copystat also copies mode
1029 shutil.copystat(src, dest)
1029 shutil.copystat(src, dest)
1030 else:
1030 else:
1031 shutil.copymode(src, dest)
1031 shutil.copymode(src, dest)
1032 except shutil.Error as inst:
1032 except shutil.Error as inst:
1033 raise Abort(str(inst))
1033 raise Abort(str(inst))
1034
1034
1035 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1035 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1036 """Copy a directory tree using hardlinks if possible."""
1036 """Copy a directory tree using hardlinks if possible."""
1037 num = 0
1037 num = 0
1038
1038
1039 if hardlink is None:
1039 if hardlink is None:
1040 hardlink = (os.stat(src).st_dev ==
1040 hardlink = (os.stat(src).st_dev ==
1041 os.stat(os.path.dirname(dst)).st_dev)
1041 os.stat(os.path.dirname(dst)).st_dev)
1042 if hardlink:
1042 if hardlink:
1043 topic = _('linking')
1043 topic = _('linking')
1044 else:
1044 else:
1045 topic = _('copying')
1045 topic = _('copying')
1046
1046
1047 if os.path.isdir(src):
1047 if os.path.isdir(src):
1048 os.mkdir(dst)
1048 os.mkdir(dst)
1049 for name, kind in osutil.listdir(src):
1049 for name, kind in osutil.listdir(src):
1050 srcname = os.path.join(src, name)
1050 srcname = os.path.join(src, name)
1051 dstname = os.path.join(dst, name)
1051 dstname = os.path.join(dst, name)
1052 def nprog(t, pos):
1052 def nprog(t, pos):
1053 if pos is not None:
1053 if pos is not None:
1054 return progress(t, pos + num)
1054 return progress(t, pos + num)
1055 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1055 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1056 num += n
1056 num += n
1057 else:
1057 else:
1058 if hardlink:
1058 if hardlink:
1059 try:
1059 try:
1060 oslink(src, dst)
1060 oslink(src, dst)
1061 except (IOError, OSError):
1061 except (IOError, OSError):
1062 hardlink = False
1062 hardlink = False
1063 shutil.copy(src, dst)
1063 shutil.copy(src, dst)
1064 else:
1064 else:
1065 shutil.copy(src, dst)
1065 shutil.copy(src, dst)
1066 num += 1
1066 num += 1
1067 progress(topic, num)
1067 progress(topic, num)
1068 progress(topic, None)
1068 progress(topic, None)
1069
1069
1070 return hardlink, num
1070 return hardlink, num
1071
1071
1072 _winreservednames = '''con prn aux nul
1072 _winreservednames = '''con prn aux nul
1073 com1 com2 com3 com4 com5 com6 com7 com8 com9
1073 com1 com2 com3 com4 com5 com6 com7 com8 com9
1074 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1074 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1075 _winreservedchars = ':*?"<>|'
1075 _winreservedchars = ':*?"<>|'
1076 def checkwinfilename(path):
1076 def checkwinfilename(path):
1077 r'''Check that the base-relative path is a valid filename on Windows.
1077 r'''Check that the base-relative path is a valid filename on Windows.
1078 Returns None if the path is ok, or a UI string describing the problem.
1078 Returns None if the path is ok, or a UI string describing the problem.
1079
1079
1080 >>> checkwinfilename("just/a/normal/path")
1080 >>> checkwinfilename("just/a/normal/path")
1081 >>> checkwinfilename("foo/bar/con.xml")
1081 >>> checkwinfilename("foo/bar/con.xml")
1082 "filename contains 'con', which is reserved on Windows"
1082 "filename contains 'con', which is reserved on Windows"
1083 >>> checkwinfilename("foo/con.xml/bar")
1083 >>> checkwinfilename("foo/con.xml/bar")
1084 "filename contains 'con', which is reserved on Windows"
1084 "filename contains 'con', which is reserved on Windows"
1085 >>> checkwinfilename("foo/bar/xml.con")
1085 >>> checkwinfilename("foo/bar/xml.con")
1086 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1086 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1087 "filename contains 'AUX', which is reserved on Windows"
1087 "filename contains 'AUX', which is reserved on Windows"
1088 >>> checkwinfilename("foo/bar/bla:.txt")
1088 >>> checkwinfilename("foo/bar/bla:.txt")
1089 "filename contains ':', which is reserved on Windows"
1089 "filename contains ':', which is reserved on Windows"
1090 >>> checkwinfilename("foo/bar/b\07la.txt")
1090 >>> checkwinfilename("foo/bar/b\07la.txt")
1091 "filename contains '\\x07', which is invalid on Windows"
1091 "filename contains '\\x07', which is invalid on Windows"
1092 >>> checkwinfilename("foo/bar/bla ")
1092 >>> checkwinfilename("foo/bar/bla ")
1093 "filename ends with ' ', which is not allowed on Windows"
1093 "filename ends with ' ', which is not allowed on Windows"
1094 >>> checkwinfilename("../bar")
1094 >>> checkwinfilename("../bar")
1095 >>> checkwinfilename("foo\\")
1095 >>> checkwinfilename("foo\\")
1096 "filename ends with '\\', which is invalid on Windows"
1096 "filename ends with '\\', which is invalid on Windows"
1097 >>> checkwinfilename("foo\\/bar")
1097 >>> checkwinfilename("foo\\/bar")
1098 "directory name ends with '\\', which is invalid on Windows"
1098 "directory name ends with '\\', which is invalid on Windows"
1099 '''
1099 '''
1100 if path.endswith('\\'):
1100 if path.endswith('\\'):
1101 return _("filename ends with '\\', which is invalid on Windows")
1101 return _("filename ends with '\\', which is invalid on Windows")
1102 if '\\/' in path:
1102 if '\\/' in path:
1103 return _("directory name ends with '\\', which is invalid on Windows")
1103 return _("directory name ends with '\\', which is invalid on Windows")
1104 for n in path.replace('\\', '/').split('/'):
1104 for n in path.replace('\\', '/').split('/'):
1105 if not n:
1105 if not n:
1106 continue
1106 continue
1107 for c in n:
1107 for c in n:
1108 if c in _winreservedchars:
1108 if c in _winreservedchars:
1109 return _("filename contains '%s', which is reserved "
1109 return _("filename contains '%s', which is reserved "
1110 "on Windows") % c
1110 "on Windows") % c
1111 if ord(c) <= 31:
1111 if ord(c) <= 31:
1112 return _("filename contains %r, which is invalid "
1112 return _("filename contains %r, which is invalid "
1113 "on Windows") % c
1113 "on Windows") % c
1114 base = n.split('.')[0]
1114 base = n.split('.')[0]
1115 if base and base.lower() in _winreservednames:
1115 if base and base.lower() in _winreservednames:
1116 return _("filename contains '%s', which is reserved "
1116 return _("filename contains '%s', which is reserved "
1117 "on Windows") % base
1117 "on Windows") % base
1118 t = n[-1]
1118 t = n[-1]
1119 if t in '. ' and n not in '..':
1119 if t in '. ' and n not in '..':
1120 return _("filename ends with '%s', which is not allowed "
1120 return _("filename ends with '%s', which is not allowed "
1121 "on Windows") % t
1121 "on Windows") % t
1122
1122
1123 if os.name == 'nt':
1123 if os.name == 'nt':
1124 checkosfilename = checkwinfilename
1124 checkosfilename = checkwinfilename
1125 else:
1125 else:
1126 checkosfilename = platform.checkosfilename
1126 checkosfilename = platform.checkosfilename
1127
1127
1128 def makelock(info, pathname):
1128 def makelock(info, pathname):
1129 try:
1129 try:
1130 return os.symlink(info, pathname)
1130 return os.symlink(info, pathname)
1131 except OSError as why:
1131 except OSError as why:
1132 if why.errno == errno.EEXIST:
1132 if why.errno == errno.EEXIST:
1133 raise
1133 raise
1134 except AttributeError: # no symlink in os
1134 except AttributeError: # no symlink in os
1135 pass
1135 pass
1136
1136
1137 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1137 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1138 os.write(ld, info)
1138 os.write(ld, info)
1139 os.close(ld)
1139 os.close(ld)
1140
1140
1141 def readlock(pathname):
1141 def readlock(pathname):
1142 try:
1142 try:
1143 return os.readlink(pathname)
1143 return os.readlink(pathname)
1144 except OSError as why:
1144 except OSError as why:
1145 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1145 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1146 raise
1146 raise
1147 except AttributeError: # no symlink in os
1147 except AttributeError: # no symlink in os
1148 pass
1148 pass
1149 fp = posixfile(pathname)
1149 fp = posixfile(pathname)
1150 r = fp.read()
1150 r = fp.read()
1151 fp.close()
1151 fp.close()
1152 return r
1152 return r
1153
1153
1154 def fstat(fp):
1154 def fstat(fp):
1155 '''stat file object that may not have fileno method.'''
1155 '''stat file object that may not have fileno method.'''
1156 try:
1156 try:
1157 return os.fstat(fp.fileno())
1157 return os.fstat(fp.fileno())
1158 except AttributeError:
1158 except AttributeError:
1159 return os.stat(fp.name)
1159 return os.stat(fp.name)
1160
1160
1161 # File system features
1161 # File system features
1162
1162
1163 def checkcase(path):
1163 def checkcase(path):
1164 """
1164 """
1165 Return true if the given path is on a case-sensitive filesystem
1165 Return true if the given path is on a case-sensitive filesystem
1166
1166
1167 Requires a path (like /foo/.hg) ending with a foldable final
1167 Requires a path (like /foo/.hg) ending with a foldable final
1168 directory component.
1168 directory component.
1169 """
1169 """
1170 s1 = os.lstat(path)
1170 s1 = os.lstat(path)
1171 d, b = os.path.split(path)
1171 d, b = os.path.split(path)
1172 b2 = b.upper()
1172 b2 = b.upper()
1173 if b == b2:
1173 if b == b2:
1174 b2 = b.lower()
1174 b2 = b.lower()
1175 if b == b2:
1175 if b == b2:
1176 return True # no evidence against case sensitivity
1176 return True # no evidence against case sensitivity
1177 p2 = os.path.join(d, b2)
1177 p2 = os.path.join(d, b2)
1178 try:
1178 try:
1179 s2 = os.lstat(p2)
1179 s2 = os.lstat(p2)
1180 if s2 == s1:
1180 if s2 == s1:
1181 return False
1181 return False
1182 return True
1182 return True
1183 except OSError:
1183 except OSError:
1184 return True
1184 return True
1185
1185
1186 try:
1186 try:
1187 import re2
1187 import re2
1188 _re2 = None
1188 _re2 = None
1189 except ImportError:
1189 except ImportError:
1190 _re2 = False
1190 _re2 = False
1191
1191
1192 class _re(object):
1192 class _re(object):
1193 def _checkre2(self):
1193 def _checkre2(self):
1194 global _re2
1194 global _re2
1195 try:
1195 try:
1196 # check if match works, see issue3964
1196 # check if match works, see issue3964
1197 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1197 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1198 except ImportError:
1198 except ImportError:
1199 _re2 = False
1199 _re2 = False
1200
1200
1201 def compile(self, pat, flags=0):
1201 def compile(self, pat, flags=0):
1202 '''Compile a regular expression, using re2 if possible
1202 '''Compile a regular expression, using re2 if possible
1203
1203
1204 For best performance, use only re2-compatible regexp features. The
1204 For best performance, use only re2-compatible regexp features. The
1205 only flags from the re module that are re2-compatible are
1205 only flags from the re module that are re2-compatible are
1206 IGNORECASE and MULTILINE.'''
1206 IGNORECASE and MULTILINE.'''
1207 if _re2 is None:
1207 if _re2 is None:
1208 self._checkre2()
1208 self._checkre2()
1209 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1209 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1210 if flags & remod.IGNORECASE:
1210 if flags & remod.IGNORECASE:
1211 pat = '(?i)' + pat
1211 pat = '(?i)' + pat
1212 if flags & remod.MULTILINE:
1212 if flags & remod.MULTILINE:
1213 pat = '(?m)' + pat
1213 pat = '(?m)' + pat
1214 try:
1214 try:
1215 return re2.compile(pat)
1215 return re2.compile(pat)
1216 except re2.error:
1216 except re2.error:
1217 pass
1217 pass
1218 return remod.compile(pat, flags)
1218 return remod.compile(pat, flags)
1219
1219
1220 @propertycache
1220 @propertycache
1221 def escape(self):
1221 def escape(self):
1222 '''Return the version of escape corresponding to self.compile.
1222 '''Return the version of escape corresponding to self.compile.
1223
1223
1224 This is imperfect because whether re2 or re is used for a particular
1224 This is imperfect because whether re2 or re is used for a particular
1225 function depends on the flags, etc, but it's the best we can do.
1225 function depends on the flags, etc, but it's the best we can do.
1226 '''
1226 '''
1227 global _re2
1227 global _re2
1228 if _re2 is None:
1228 if _re2 is None:
1229 self._checkre2()
1229 self._checkre2()
1230 if _re2:
1230 if _re2:
1231 return re2.escape
1231 return re2.escape
1232 else:
1232 else:
1233 return remod.escape
1233 return remod.escape
1234
1234
1235 re = _re()
1235 re = _re()
1236
1236
1237 _fspathcache = {}
1237 _fspathcache = {}
1238 def fspath(name, root):
1238 def fspath(name, root):
1239 '''Get name in the case stored in the filesystem
1239 '''Get name in the case stored in the filesystem
1240
1240
1241 The name should be relative to root, and be normcase-ed for efficiency.
1241 The name should be relative to root, and be normcase-ed for efficiency.
1242
1242
1243 Note that this function is unnecessary, and should not be
1243 Note that this function is unnecessary, and should not be
1244 called, for case-sensitive filesystems (simply because it's expensive).
1244 called, for case-sensitive filesystems (simply because it's expensive).
1245
1245
1246 The root should be normcase-ed, too.
1246 The root should be normcase-ed, too.
1247 '''
1247 '''
1248 def _makefspathcacheentry(dir):
1248 def _makefspathcacheentry(dir):
1249 return dict((normcase(n), n) for n in os.listdir(dir))
1249 return dict((normcase(n), n) for n in os.listdir(dir))
1250
1250
1251 seps = os.sep
1251 seps = os.sep
1252 if os.altsep:
1252 if os.altsep:
1253 seps = seps + os.altsep
1253 seps = seps + os.altsep
1254 # Protect backslashes. This gets silly very quickly.
1254 # Protect backslashes. This gets silly very quickly.
1255 seps.replace('\\','\\\\')
1255 seps.replace('\\','\\\\')
1256 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1256 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1257 dir = os.path.normpath(root)
1257 dir = os.path.normpath(root)
1258 result = []
1258 result = []
1259 for part, sep in pattern.findall(name):
1259 for part, sep in pattern.findall(name):
1260 if sep:
1260 if sep:
1261 result.append(sep)
1261 result.append(sep)
1262 continue
1262 continue
1263
1263
1264 if dir not in _fspathcache:
1264 if dir not in _fspathcache:
1265 _fspathcache[dir] = _makefspathcacheentry(dir)
1265 _fspathcache[dir] = _makefspathcacheentry(dir)
1266 contents = _fspathcache[dir]
1266 contents = _fspathcache[dir]
1267
1267
1268 found = contents.get(part)
1268 found = contents.get(part)
1269 if not found:
1269 if not found:
1270 # retry "once per directory" per "dirstate.walk" which
1270 # retry "once per directory" per "dirstate.walk" which
1271 # may take place for each patches of "hg qpush", for example
1271 # may take place for each patches of "hg qpush", for example
1272 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1272 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1273 found = contents.get(part)
1273 found = contents.get(part)
1274
1274
1275 result.append(found or part)
1275 result.append(found or part)
1276 dir = os.path.join(dir, part)
1276 dir = os.path.join(dir, part)
1277
1277
1278 return ''.join(result)
1278 return ''.join(result)
1279
1279
1280 def checknlink(testfile):
1280 def checknlink(testfile):
1281 '''check whether hardlink count reporting works properly'''
1281 '''check whether hardlink count reporting works properly'''
1282
1282
1283 # testfile may be open, so we need a separate file for checking to
1283 # testfile may be open, so we need a separate file for checking to
1284 # work around issue2543 (or testfile may get lost on Samba shares)
1284 # work around issue2543 (or testfile may get lost on Samba shares)
1285 f1 = testfile + ".hgtmp1"
1285 f1 = testfile + ".hgtmp1"
1286 if os.path.lexists(f1):
1286 if os.path.lexists(f1):
1287 return False
1287 return False
1288 try:
1288 try:
1289 posixfile(f1, 'w').close()
1289 posixfile(f1, 'w').close()
1290 except IOError:
1290 except IOError:
1291 return False
1291 return False
1292
1292
1293 f2 = testfile + ".hgtmp2"
1293 f2 = testfile + ".hgtmp2"
1294 fd = None
1294 fd = None
1295 try:
1295 try:
1296 oslink(f1, f2)
1296 oslink(f1, f2)
1297 # nlinks() may behave differently for files on Windows shares if
1297 # nlinks() may behave differently for files on Windows shares if
1298 # the file is open.
1298 # the file is open.
1299 fd = posixfile(f2)
1299 fd = posixfile(f2)
1300 return nlinks(f2) > 1
1300 return nlinks(f2) > 1
1301 except OSError:
1301 except OSError:
1302 return False
1302 return False
1303 finally:
1303 finally:
1304 if fd is not None:
1304 if fd is not None:
1305 fd.close()
1305 fd.close()
1306 for f in (f1, f2):
1306 for f in (f1, f2):
1307 try:
1307 try:
1308 os.unlink(f)
1308 os.unlink(f)
1309 except OSError:
1309 except OSError:
1310 pass
1310 pass
1311
1311
1312 def endswithsep(path):
1312 def endswithsep(path):
1313 '''Check path ends with os.sep or os.altsep.'''
1313 '''Check path ends with os.sep or os.altsep.'''
1314 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1314 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1315
1315
1316 def splitpath(path):
1316 def splitpath(path):
1317 '''Split path by os.sep.
1317 '''Split path by os.sep.
1318 Note that this function does not use os.altsep because this is
1318 Note that this function does not use os.altsep because this is
1319 an alternative of simple "xxx.split(os.sep)".
1319 an alternative of simple "xxx.split(os.sep)".
1320 It is recommended to use os.path.normpath() before using this
1320 It is recommended to use os.path.normpath() before using this
1321 function if need.'''
1321 function if need.'''
1322 return path.split(os.sep)
1322 return path.split(os.sep)
1323
1323
1324 def gui():
1324 def gui():
1325 '''Are we running in a GUI?'''
1325 '''Are we running in a GUI?'''
1326 if sys.platform == 'darwin':
1326 if sys.platform == 'darwin':
1327 if 'SSH_CONNECTION' in os.environ:
1327 if 'SSH_CONNECTION' in os.environ:
1328 # handle SSH access to a box where the user is logged in
1328 # handle SSH access to a box where the user is logged in
1329 return False
1329 return False
1330 elif getattr(osutil, 'isgui', None):
1330 elif getattr(osutil, 'isgui', None):
1331 # check if a CoreGraphics session is available
1331 # check if a CoreGraphics session is available
1332 return osutil.isgui()
1332 return osutil.isgui()
1333 else:
1333 else:
1334 # pure build; use a safe default
1334 # pure build; use a safe default
1335 return True
1335 return True
1336 else:
1336 else:
1337 return os.name == "nt" or os.environ.get("DISPLAY")
1337 return os.name == "nt" or os.environ.get("DISPLAY")
1338
1338
1339 def mktempcopy(name, emptyok=False, createmode=None):
1339 def mktempcopy(name, emptyok=False, createmode=None):
1340 """Create a temporary file with the same contents from name
1340 """Create a temporary file with the same contents from name
1341
1341
1342 The permission bits are copied from the original file.
1342 The permission bits are copied from the original file.
1343
1343
1344 If the temporary file is going to be truncated immediately, you
1344 If the temporary file is going to be truncated immediately, you
1345 can use emptyok=True as an optimization.
1345 can use emptyok=True as an optimization.
1346
1346
1347 Returns the name of the temporary file.
1347 Returns the name of the temporary file.
1348 """
1348 """
1349 d, fn = os.path.split(name)
1349 d, fn = os.path.split(name)
1350 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1350 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1351 os.close(fd)
1351 os.close(fd)
1352 # Temporary files are created with mode 0600, which is usually not
1352 # Temporary files are created with mode 0600, which is usually not
1353 # what we want. If the original file already exists, just copy
1353 # what we want. If the original file already exists, just copy
1354 # its mode. Otherwise, manually obey umask.
1354 # its mode. Otherwise, manually obey umask.
1355 copymode(name, temp, createmode)
1355 copymode(name, temp, createmode)
1356 if emptyok:
1356 if emptyok:
1357 return temp
1357 return temp
1358 try:
1358 try:
1359 try:
1359 try:
1360 ifp = posixfile(name, "rb")
1360 ifp = posixfile(name, "rb")
1361 except IOError as inst:
1361 except IOError as inst:
1362 if inst.errno == errno.ENOENT:
1362 if inst.errno == errno.ENOENT:
1363 return temp
1363 return temp
1364 if not getattr(inst, 'filename', None):
1364 if not getattr(inst, 'filename', None):
1365 inst.filename = name
1365 inst.filename = name
1366 raise
1366 raise
1367 ofp = posixfile(temp, "wb")
1367 ofp = posixfile(temp, "wb")
1368 for chunk in filechunkiter(ifp):
1368 for chunk in filechunkiter(ifp):
1369 ofp.write(chunk)
1369 ofp.write(chunk)
1370 ifp.close()
1370 ifp.close()
1371 ofp.close()
1371 ofp.close()
1372 except: # re-raises
1372 except: # re-raises
1373 try: os.unlink(temp)
1373 try: os.unlink(temp)
1374 except OSError: pass
1374 except OSError: pass
1375 raise
1375 raise
1376 return temp
1376 return temp
1377
1377
1378 class atomictempfile(object):
1378 class atomictempfile(object):
1379 '''writable file object that atomically updates a file
1379 '''writable file object that atomically updates a file
1380
1380
1381 All writes will go to a temporary copy of the original file. Call
1381 All writes will go to a temporary copy of the original file. Call
1382 close() when you are done writing, and atomictempfile will rename
1382 close() when you are done writing, and atomictempfile will rename
1383 the temporary copy to the original name, making the changes
1383 the temporary copy to the original name, making the changes
1384 visible. If the object is destroyed without being closed, all your
1384 visible. If the object is destroyed without being closed, all your
1385 writes are discarded.
1385 writes are discarded.
1386 '''
1386 '''
1387 def __init__(self, name, mode='w+b', createmode=None):
1387 def __init__(self, name, mode='w+b', createmode=None):
1388 self.__name = name # permanent name
1388 self.__name = name # permanent name
1389 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1389 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1390 createmode=createmode)
1390 createmode=createmode)
1391 self._fp = posixfile(self._tempname, mode)
1391 self._fp = posixfile(self._tempname, mode)
1392
1392
1393 # delegated methods
1393 # delegated methods
1394 self.write = self._fp.write
1394 self.write = self._fp.write
1395 self.seek = self._fp.seek
1395 self.seek = self._fp.seek
1396 self.tell = self._fp.tell
1396 self.tell = self._fp.tell
1397 self.fileno = self._fp.fileno
1397 self.fileno = self._fp.fileno
1398
1398
1399 def close(self):
1399 def close(self):
1400 if not self._fp.closed:
1400 if not self._fp.closed:
1401 self._fp.close()
1401 self._fp.close()
1402 rename(self._tempname, localpath(self.__name))
1402 rename(self._tempname, localpath(self.__name))
1403
1403
1404 def discard(self):
1404 def discard(self):
1405 if not self._fp.closed:
1405 if not self._fp.closed:
1406 try:
1406 try:
1407 os.unlink(self._tempname)
1407 os.unlink(self._tempname)
1408 except OSError:
1408 except OSError:
1409 pass
1409 pass
1410 self._fp.close()
1410 self._fp.close()
1411
1411
1412 def __del__(self):
1412 def __del__(self):
1413 if safehasattr(self, '_fp'): # constructor actually did something
1413 if safehasattr(self, '_fp'): # constructor actually did something
1414 self.discard()
1414 self.discard()
1415
1415
1416 def makedirs(name, mode=None, notindexed=False):
1416 def makedirs(name, mode=None, notindexed=False):
1417 """recursive directory creation with parent mode inheritance"""
1417 """recursive directory creation with parent mode inheritance"""
1418 try:
1418 try:
1419 makedir(name, notindexed)
1419 makedir(name, notindexed)
1420 except OSError as err:
1420 except OSError as err:
1421 if err.errno == errno.EEXIST:
1421 if err.errno == errno.EEXIST:
1422 return
1422 return
1423 if err.errno != errno.ENOENT or not name:
1423 if err.errno != errno.ENOENT or not name:
1424 raise
1424 raise
1425 parent = os.path.dirname(os.path.abspath(name))
1425 parent = os.path.dirname(os.path.abspath(name))
1426 if parent == name:
1426 if parent == name:
1427 raise
1427 raise
1428 makedirs(parent, mode, notindexed)
1428 makedirs(parent, mode, notindexed)
1429 makedir(name, notindexed)
1429 makedir(name, notindexed)
1430 if mode is not None:
1430 if mode is not None:
1431 os.chmod(name, mode)
1431 os.chmod(name, mode)
1432
1432
1433 def ensuredirs(name, mode=None, notindexed=False):
1433 def ensuredirs(name, mode=None, notindexed=False):
1434 """race-safe recursive directory creation
1434 """race-safe recursive directory creation
1435
1435
1436 Newly created directories are marked as "not to be indexed by
1436 Newly created directories are marked as "not to be indexed by
1437 the content indexing service", if ``notindexed`` is specified
1437 the content indexing service", if ``notindexed`` is specified
1438 for "write" mode access.
1438 for "write" mode access.
1439 """
1439 """
1440 if os.path.isdir(name):
1440 if os.path.isdir(name):
1441 return
1441 return
1442 parent = os.path.dirname(os.path.abspath(name))
1442 parent = os.path.dirname(os.path.abspath(name))
1443 if parent != name:
1443 if parent != name:
1444 ensuredirs(parent, mode, notindexed)
1444 ensuredirs(parent, mode, notindexed)
1445 try:
1445 try:
1446 makedir(name, notindexed)
1446 makedir(name, notindexed)
1447 except OSError as err:
1447 except OSError as err:
1448 if err.errno == errno.EEXIST and os.path.isdir(name):
1448 if err.errno == errno.EEXIST and os.path.isdir(name):
1449 # someone else seems to have won a directory creation race
1449 # someone else seems to have won a directory creation race
1450 return
1450 return
1451 raise
1451 raise
1452 if mode is not None:
1452 if mode is not None:
1453 os.chmod(name, mode)
1453 os.chmod(name, mode)
1454
1454
1455 def readfile(path):
1455 def readfile(path):
1456 with open(path, 'rb') as fp:
1456 with open(path, 'rb') as fp:
1457 return fp.read()
1457 return fp.read()
1458
1458
1459 def writefile(path, text):
1459 def writefile(path, text):
1460 with open(path, 'wb') as fp:
1460 with open(path, 'wb') as fp:
1461 fp.write(text)
1461 fp.write(text)
1462
1462
1463 def appendfile(path, text):
1463 def appendfile(path, text):
1464 with open(path, 'ab') as fp:
1464 with open(path, 'ab') as fp:
1465 fp.write(text)
1465 fp.write(text)
1466
1466
1467 class chunkbuffer(object):
1467 class chunkbuffer(object):
1468 """Allow arbitrary sized chunks of data to be efficiently read from an
1468 """Allow arbitrary sized chunks of data to be efficiently read from an
1469 iterator over chunks of arbitrary size."""
1469 iterator over chunks of arbitrary size."""
1470
1470
1471 def __init__(self, in_iter):
1471 def __init__(self, in_iter):
1472 """in_iter is the iterator that's iterating over the input chunks.
1472 """in_iter is the iterator that's iterating over the input chunks.
1473 targetsize is how big a buffer to try to maintain."""
1473 targetsize is how big a buffer to try to maintain."""
1474 def splitbig(chunks):
1474 def splitbig(chunks):
1475 for chunk in chunks:
1475 for chunk in chunks:
1476 if len(chunk) > 2**20:
1476 if len(chunk) > 2**20:
1477 pos = 0
1477 pos = 0
1478 while pos < len(chunk):
1478 while pos < len(chunk):
1479 end = pos + 2 ** 18
1479 end = pos + 2 ** 18
1480 yield chunk[pos:end]
1480 yield chunk[pos:end]
1481 pos = end
1481 pos = end
1482 else:
1482 else:
1483 yield chunk
1483 yield chunk
1484 self.iter = splitbig(in_iter)
1484 self.iter = splitbig(in_iter)
1485 self._queue = collections.deque()
1485 self._queue = collections.deque()
1486 self._chunkoffset = 0
1486 self._chunkoffset = 0
1487
1487
1488 def read(self, l=None):
1488 def read(self, l=None):
1489 """Read L bytes of data from the iterator of chunks of data.
1489 """Read L bytes of data from the iterator of chunks of data.
1490 Returns less than L bytes if the iterator runs dry.
1490 Returns less than L bytes if the iterator runs dry.
1491
1491
1492 If size parameter is omitted, read everything"""
1492 If size parameter is omitted, read everything"""
1493 if l is None:
1493 if l is None:
1494 return ''.join(self.iter)
1494 return ''.join(self.iter)
1495
1495
1496 left = l
1496 left = l
1497 buf = []
1497 buf = []
1498 queue = self._queue
1498 queue = self._queue
1499 while left > 0:
1499 while left > 0:
1500 # refill the queue
1500 # refill the queue
1501 if not queue:
1501 if not queue:
1502 target = 2**18
1502 target = 2**18
1503 for chunk in self.iter:
1503 for chunk in self.iter:
1504 queue.append(chunk)
1504 queue.append(chunk)
1505 target -= len(chunk)
1505 target -= len(chunk)
1506 if target <= 0:
1506 if target <= 0:
1507 break
1507 break
1508 if not queue:
1508 if not queue:
1509 break
1509 break
1510
1510
1511 # The easy way to do this would be to queue.popleft(), modify the
1511 # The easy way to do this would be to queue.popleft(), modify the
1512 # chunk (if necessary), then queue.appendleft(). However, for cases
1512 # chunk (if necessary), then queue.appendleft(). However, for cases
1513 # where we read partial chunk content, this incurs 2 dequeue
1513 # where we read partial chunk content, this incurs 2 dequeue
1514 # mutations and creates a new str for the remaining chunk in the
1514 # mutations and creates a new str for the remaining chunk in the
1515 # queue. Our code below avoids this overhead.
1515 # queue. Our code below avoids this overhead.
1516
1516
1517 chunk = queue[0]
1517 chunk = queue[0]
1518 chunkl = len(chunk)
1518 chunkl = len(chunk)
1519 offset = self._chunkoffset
1519 offset = self._chunkoffset
1520
1520
1521 # Use full chunk.
1521 # Use full chunk.
1522 if offset == 0 and left >= chunkl:
1522 if offset == 0 and left >= chunkl:
1523 left -= chunkl
1523 left -= chunkl
1524 queue.popleft()
1524 queue.popleft()
1525 buf.append(chunk)
1525 buf.append(chunk)
1526 # self._chunkoffset remains at 0.
1526 # self._chunkoffset remains at 0.
1527 continue
1527 continue
1528
1528
1529 chunkremaining = chunkl - offset
1529 chunkremaining = chunkl - offset
1530
1530
1531 # Use all of unconsumed part of chunk.
1531 # Use all of unconsumed part of chunk.
1532 if left >= chunkremaining:
1532 if left >= chunkremaining:
1533 left -= chunkremaining
1533 left -= chunkremaining
1534 queue.popleft()
1534 queue.popleft()
1535 # offset == 0 is enabled by block above, so this won't merely
1535 # offset == 0 is enabled by block above, so this won't merely
1536 # copy via ``chunk[0:]``.
1536 # copy via ``chunk[0:]``.
1537 buf.append(chunk[offset:])
1537 buf.append(chunk[offset:])
1538 self._chunkoffset = 0
1538 self._chunkoffset = 0
1539
1539
1540 # Partial chunk needed.
1540 # Partial chunk needed.
1541 else:
1541 else:
1542 buf.append(chunk[offset:offset + left])
1542 buf.append(chunk[offset:offset + left])
1543 self._chunkoffset += left
1543 self._chunkoffset += left
1544 left -= chunkremaining
1544 left -= chunkremaining
1545
1545
1546 return ''.join(buf)
1546 return ''.join(buf)
1547
1547
1548 def filechunkiter(f, size=65536, limit=None):
1548 def filechunkiter(f, size=65536, limit=None):
1549 """Create a generator that produces the data in the file size
1549 """Create a generator that produces the data in the file size
1550 (default 65536) bytes at a time, up to optional limit (default is
1550 (default 65536) bytes at a time, up to optional limit (default is
1551 to read all data). Chunks may be less than size bytes if the
1551 to read all data). Chunks may be less than size bytes if the
1552 chunk is the last chunk in the file, or the file is a socket or
1552 chunk is the last chunk in the file, or the file is a socket or
1553 some other type of file that sometimes reads less data than is
1553 some other type of file that sometimes reads less data than is
1554 requested."""
1554 requested."""
1555 assert size >= 0
1555 assert size >= 0
1556 assert limit is None or limit >= 0
1556 assert limit is None or limit >= 0
1557 while True:
1557 while True:
1558 if limit is None:
1558 if limit is None:
1559 nbytes = size
1559 nbytes = size
1560 else:
1560 else:
1561 nbytes = min(limit, size)
1561 nbytes = min(limit, size)
1562 s = nbytes and f.read(nbytes)
1562 s = nbytes and f.read(nbytes)
1563 if not s:
1563 if not s:
1564 break
1564 break
1565 if limit:
1565 if limit:
1566 limit -= len(s)
1566 limit -= len(s)
1567 yield s
1567 yield s
1568
1568
1569 def makedate(timestamp=None):
1569 def makedate(timestamp=None):
1570 '''Return a unix timestamp (or the current time) as a (unixtime,
1570 '''Return a unix timestamp (or the current time) as a (unixtime,
1571 offset) tuple based off the local timezone.'''
1571 offset) tuple based off the local timezone.'''
1572 if timestamp is None:
1572 if timestamp is None:
1573 timestamp = time.time()
1573 timestamp = time.time()
1574 if timestamp < 0:
1574 if timestamp < 0:
1575 hint = _("check your clock")
1575 hint = _("check your clock")
1576 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1576 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1577 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1577 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1578 datetime.datetime.fromtimestamp(timestamp))
1578 datetime.datetime.fromtimestamp(timestamp))
1579 tz = delta.days * 86400 + delta.seconds
1579 tz = delta.days * 86400 + delta.seconds
1580 return timestamp, tz
1580 return timestamp, tz
1581
1581
1582 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1582 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1583 """represent a (unixtime, offset) tuple as a localized time.
1583 """represent a (unixtime, offset) tuple as a localized time.
1584 unixtime is seconds since the epoch, and offset is the time zone's
1584 unixtime is seconds since the epoch, and offset is the time zone's
1585 number of seconds away from UTC."""
1585 number of seconds away from UTC."""
1586 t, tz = date or makedate()
1586 t, tz = date or makedate()
1587 if "%1" in format or "%2" in format or "%z" in format:
1587 if "%1" in format or "%2" in format or "%z" in format:
1588 sign = (tz > 0) and "-" or "+"
1588 sign = (tz > 0) and "-" or "+"
1589 minutes = abs(tz) // 60
1589 minutes = abs(tz) // 60
1590 q, r = divmod(minutes, 60)
1590 q, r = divmod(minutes, 60)
1591 format = format.replace("%z", "%1%2")
1591 format = format.replace("%z", "%1%2")
1592 format = format.replace("%1", "%c%02d" % (sign, q))
1592 format = format.replace("%1", "%c%02d" % (sign, q))
1593 format = format.replace("%2", "%02d" % r)
1593 format = format.replace("%2", "%02d" % r)
1594 d = t - tz
1594 d = t - tz
1595 if d > 0x7fffffff:
1595 if d > 0x7fffffff:
1596 d = 0x7fffffff
1596 d = 0x7fffffff
1597 elif d < -0x7fffffff:
1597 elif d < -0x7fffffff:
1598 d = -0x7fffffff
1598 d = -0x7fffffff
1599 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1599 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1600 # because they use the gmtime() system call which is buggy on Windows
1600 # because they use the gmtime() system call which is buggy on Windows
1601 # for negative values.
1601 # for negative values.
1602 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1602 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1603 s = t.strftime(format)
1603 s = t.strftime(format)
1604 return s
1604 return s
1605
1605
1606 def shortdate(date=None):
1606 def shortdate(date=None):
1607 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1607 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1608 return datestr(date, format='%Y-%m-%d')
1608 return datestr(date, format='%Y-%m-%d')
1609
1609
1610 def parsetimezone(tz):
1610 def parsetimezone(tz):
1611 """parse a timezone string and return an offset integer"""
1611 """parse a timezone string and return an offset integer"""
1612 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1612 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1613 sign = (tz[0] == "+") and 1 or -1
1613 sign = (tz[0] == "+") and 1 or -1
1614 hours = int(tz[1:3])
1614 hours = int(tz[1:3])
1615 minutes = int(tz[3:5])
1615 minutes = int(tz[3:5])
1616 return -sign * (hours * 60 + minutes) * 60
1616 return -sign * (hours * 60 + minutes) * 60
1617 if tz == "GMT" or tz == "UTC":
1617 if tz == "GMT" or tz == "UTC":
1618 return 0
1618 return 0
1619 return None
1619 return None
1620
1620
1621 def strdate(string, format, defaults=[]):
1621 def strdate(string, format, defaults=[]):
1622 """parse a localized time string and return a (unixtime, offset) tuple.
1622 """parse a localized time string and return a (unixtime, offset) tuple.
1623 if the string cannot be parsed, ValueError is raised."""
1623 if the string cannot be parsed, ValueError is raised."""
1624 # NOTE: unixtime = localunixtime + offset
1624 # NOTE: unixtime = localunixtime + offset
1625 offset, date = parsetimezone(string.split()[-1]), string
1625 offset, date = parsetimezone(string.split()[-1]), string
1626 if offset is not None:
1626 if offset is not None:
1627 date = " ".join(string.split()[:-1])
1627 date = " ".join(string.split()[:-1])
1628
1628
1629 # add missing elements from defaults
1629 # add missing elements from defaults
1630 usenow = False # default to using biased defaults
1630 usenow = False # default to using biased defaults
1631 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1631 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1632 found = [True for p in part if ("%"+p) in format]
1632 found = [True for p in part if ("%"+p) in format]
1633 if not found:
1633 if not found:
1634 date += "@" + defaults[part][usenow]
1634 date += "@" + defaults[part][usenow]
1635 format += "@%" + part[0]
1635 format += "@%" + part[0]
1636 else:
1636 else:
1637 # We've found a specific time element, less specific time
1637 # We've found a specific time element, less specific time
1638 # elements are relative to today
1638 # elements are relative to today
1639 usenow = True
1639 usenow = True
1640
1640
1641 timetuple = time.strptime(date, format)
1641 timetuple = time.strptime(date, format)
1642 localunixtime = int(calendar.timegm(timetuple))
1642 localunixtime = int(calendar.timegm(timetuple))
1643 if offset is None:
1643 if offset is None:
1644 # local timezone
1644 # local timezone
1645 unixtime = int(time.mktime(timetuple))
1645 unixtime = int(time.mktime(timetuple))
1646 offset = unixtime - localunixtime
1646 offset = unixtime - localunixtime
1647 else:
1647 else:
1648 unixtime = localunixtime + offset
1648 unixtime = localunixtime + offset
1649 return unixtime, offset
1649 return unixtime, offset
1650
1650
1651 def parsedate(date, formats=None, bias=None):
1651 def parsedate(date, formats=None, bias=None):
1652 """parse a localized date/time and return a (unixtime, offset) tuple.
1652 """parse a localized date/time and return a (unixtime, offset) tuple.
1653
1653
1654 The date may be a "unixtime offset" string or in one of the specified
1654 The date may be a "unixtime offset" string or in one of the specified
1655 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1655 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1656
1656
1657 >>> parsedate(' today ') == parsedate(\
1657 >>> parsedate(' today ') == parsedate(\
1658 datetime.date.today().strftime('%b %d'))
1658 datetime.date.today().strftime('%b %d'))
1659 True
1659 True
1660 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1660 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1661 datetime.timedelta(days=1)\
1661 datetime.timedelta(days=1)\
1662 ).strftime('%b %d'))
1662 ).strftime('%b %d'))
1663 True
1663 True
1664 >>> now, tz = makedate()
1664 >>> now, tz = makedate()
1665 >>> strnow, strtz = parsedate('now')
1665 >>> strnow, strtz = parsedate('now')
1666 >>> (strnow - now) < 1
1666 >>> (strnow - now) < 1
1667 True
1667 True
1668 >>> tz == strtz
1668 >>> tz == strtz
1669 True
1669 True
1670 """
1670 """
1671 if bias is None:
1671 if bias is None:
1672 bias = {}
1672 bias = {}
1673 if not date:
1673 if not date:
1674 return 0, 0
1674 return 0, 0
1675 if isinstance(date, tuple) and len(date) == 2:
1675 if isinstance(date, tuple) and len(date) == 2:
1676 return date
1676 return date
1677 if not formats:
1677 if not formats:
1678 formats = defaultdateformats
1678 formats = defaultdateformats
1679 date = date.strip()
1679 date = date.strip()
1680
1680
1681 if date == 'now' or date == _('now'):
1681 if date == 'now' or date == _('now'):
1682 return makedate()
1682 return makedate()
1683 if date == 'today' or date == _('today'):
1683 if date == 'today' or date == _('today'):
1684 date = datetime.date.today().strftime('%b %d')
1684 date = datetime.date.today().strftime('%b %d')
1685 elif date == 'yesterday' or date == _('yesterday'):
1685 elif date == 'yesterday' or date == _('yesterday'):
1686 date = (datetime.date.today() -
1686 date = (datetime.date.today() -
1687 datetime.timedelta(days=1)).strftime('%b %d')
1687 datetime.timedelta(days=1)).strftime('%b %d')
1688
1688
1689 try:
1689 try:
1690 when, offset = map(int, date.split(' '))
1690 when, offset = map(int, date.split(' '))
1691 except ValueError:
1691 except ValueError:
1692 # fill out defaults
1692 # fill out defaults
1693 now = makedate()
1693 now = makedate()
1694 defaults = {}
1694 defaults = {}
1695 for part in ("d", "mb", "yY", "HI", "M", "S"):
1695 for part in ("d", "mb", "yY", "HI", "M", "S"):
1696 # this piece is for rounding the specific end of unknowns
1696 # this piece is for rounding the specific end of unknowns
1697 b = bias.get(part)
1697 b = bias.get(part)
1698 if b is None:
1698 if b is None:
1699 if part[0] in "HMS":
1699 if part[0] in "HMS":
1700 b = "00"
1700 b = "00"
1701 else:
1701 else:
1702 b = "0"
1702 b = "0"
1703
1703
1704 # this piece is for matching the generic end to today's date
1704 # this piece is for matching the generic end to today's date
1705 n = datestr(now, "%" + part[0])
1705 n = datestr(now, "%" + part[0])
1706
1706
1707 defaults[part] = (b, n)
1707 defaults[part] = (b, n)
1708
1708
1709 for format in formats:
1709 for format in formats:
1710 try:
1710 try:
1711 when, offset = strdate(date, format, defaults)
1711 when, offset = strdate(date, format, defaults)
1712 except (ValueError, OverflowError):
1712 except (ValueError, OverflowError):
1713 pass
1713 pass
1714 else:
1714 else:
1715 break
1715 break
1716 else:
1716 else:
1717 raise Abort(_('invalid date: %r') % date)
1717 raise Abort(_('invalid date: %r') % date)
1718 # validate explicit (probably user-specified) date and
1718 # validate explicit (probably user-specified) date and
1719 # time zone offset. values must fit in signed 32 bits for
1719 # time zone offset. values must fit in signed 32 bits for
1720 # current 32-bit linux runtimes. timezones go from UTC-12
1720 # current 32-bit linux runtimes. timezones go from UTC-12
1721 # to UTC+14
1721 # to UTC+14
1722 if abs(when) > 0x7fffffff:
1722 if abs(when) > 0x7fffffff:
1723 raise Abort(_('date exceeds 32 bits: %d') % when)
1723 raise Abort(_('date exceeds 32 bits: %d') % when)
1724 if offset < -50400 or offset > 43200:
1724 if offset < -50400 or offset > 43200:
1725 raise Abort(_('impossible time zone offset: %d') % offset)
1725 raise Abort(_('impossible time zone offset: %d') % offset)
1726 return when, offset
1726 return when, offset
1727
1727
1728 def matchdate(date):
1728 def matchdate(date):
1729 """Return a function that matches a given date match specifier
1729 """Return a function that matches a given date match specifier
1730
1730
1731 Formats include:
1731 Formats include:
1732
1732
1733 '{date}' match a given date to the accuracy provided
1733 '{date}' match a given date to the accuracy provided
1734
1734
1735 '<{date}' on or before a given date
1735 '<{date}' on or before a given date
1736
1736
1737 '>{date}' on or after a given date
1737 '>{date}' on or after a given date
1738
1738
1739 >>> p1 = parsedate("10:29:59")
1739 >>> p1 = parsedate("10:29:59")
1740 >>> p2 = parsedate("10:30:00")
1740 >>> p2 = parsedate("10:30:00")
1741 >>> p3 = parsedate("10:30:59")
1741 >>> p3 = parsedate("10:30:59")
1742 >>> p4 = parsedate("10:31:00")
1742 >>> p4 = parsedate("10:31:00")
1743 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1743 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1744 >>> f = matchdate("10:30")
1744 >>> f = matchdate("10:30")
1745 >>> f(p1[0])
1745 >>> f(p1[0])
1746 False
1746 False
1747 >>> f(p2[0])
1747 >>> f(p2[0])
1748 True
1748 True
1749 >>> f(p3[0])
1749 >>> f(p3[0])
1750 True
1750 True
1751 >>> f(p4[0])
1751 >>> f(p4[0])
1752 False
1752 False
1753 >>> f(p5[0])
1753 >>> f(p5[0])
1754 False
1754 False
1755 """
1755 """
1756
1756
1757 def lower(date):
1757 def lower(date):
1758 d = {'mb': "1", 'd': "1"}
1758 d = {'mb': "1", 'd': "1"}
1759 return parsedate(date, extendeddateformats, d)[0]
1759 return parsedate(date, extendeddateformats, d)[0]
1760
1760
1761 def upper(date):
1761 def upper(date):
1762 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1762 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1763 for days in ("31", "30", "29"):
1763 for days in ("31", "30", "29"):
1764 try:
1764 try:
1765 d["d"] = days
1765 d["d"] = days
1766 return parsedate(date, extendeddateformats, d)[0]
1766 return parsedate(date, extendeddateformats, d)[0]
1767 except Abort:
1767 except Abort:
1768 pass
1768 pass
1769 d["d"] = "28"
1769 d["d"] = "28"
1770 return parsedate(date, extendeddateformats, d)[0]
1770 return parsedate(date, extendeddateformats, d)[0]
1771
1771
1772 date = date.strip()
1772 date = date.strip()
1773
1773
1774 if not date:
1774 if not date:
1775 raise Abort(_("dates cannot consist entirely of whitespace"))
1775 raise Abort(_("dates cannot consist entirely of whitespace"))
1776 elif date[0] == "<":
1776 elif date[0] == "<":
1777 if not date[1:]:
1777 if not date[1:]:
1778 raise Abort(_("invalid day spec, use '<DATE'"))
1778 raise Abort(_("invalid day spec, use '<DATE'"))
1779 when = upper(date[1:])
1779 when = upper(date[1:])
1780 return lambda x: x <= when
1780 return lambda x: x <= when
1781 elif date[0] == ">":
1781 elif date[0] == ">":
1782 if not date[1:]:
1782 if not date[1:]:
1783 raise Abort(_("invalid day spec, use '>DATE'"))
1783 raise Abort(_("invalid day spec, use '>DATE'"))
1784 when = lower(date[1:])
1784 when = lower(date[1:])
1785 return lambda x: x >= when
1785 return lambda x: x >= when
1786 elif date[0] == "-":
1786 elif date[0] == "-":
1787 try:
1787 try:
1788 days = int(date[1:])
1788 days = int(date[1:])
1789 except ValueError:
1789 except ValueError:
1790 raise Abort(_("invalid day spec: %s") % date[1:])
1790 raise Abort(_("invalid day spec: %s") % date[1:])
1791 if days < 0:
1791 if days < 0:
1792 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1792 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1793 % date[1:])
1793 % date[1:])
1794 when = makedate()[0] - days * 3600 * 24
1794 when = makedate()[0] - days * 3600 * 24
1795 return lambda x: x >= when
1795 return lambda x: x >= when
1796 elif " to " in date:
1796 elif " to " in date:
1797 a, b = date.split(" to ")
1797 a, b = date.split(" to ")
1798 start, stop = lower(a), upper(b)
1798 start, stop = lower(a), upper(b)
1799 return lambda x: x >= start and x <= stop
1799 return lambda x: x >= start and x <= stop
1800 else:
1800 else:
1801 start, stop = lower(date), upper(date)
1801 start, stop = lower(date), upper(date)
1802 return lambda x: x >= start and x <= stop
1802 return lambda x: x >= start and x <= stop
1803
1803
1804 def stringmatcher(pattern):
1804 def stringmatcher(pattern):
1805 """
1805 """
1806 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1806 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1807 returns the matcher name, pattern, and matcher function.
1807 returns the matcher name, pattern, and matcher function.
1808 missing or unknown prefixes are treated as literal matches.
1808 missing or unknown prefixes are treated as literal matches.
1809
1809
1810 helper for tests:
1810 helper for tests:
1811 >>> def test(pattern, *tests):
1811 >>> def test(pattern, *tests):
1812 ... kind, pattern, matcher = stringmatcher(pattern)
1812 ... kind, pattern, matcher = stringmatcher(pattern)
1813 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1813 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1814
1814
1815 exact matching (no prefix):
1815 exact matching (no prefix):
1816 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1816 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1817 ('literal', 'abcdefg', [False, False, True])
1817 ('literal', 'abcdefg', [False, False, True])
1818
1818
1819 regex matching ('re:' prefix)
1819 regex matching ('re:' prefix)
1820 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1820 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1821 ('re', 'a.+b', [False, False, True])
1821 ('re', 'a.+b', [False, False, True])
1822
1822
1823 force exact matches ('literal:' prefix)
1823 force exact matches ('literal:' prefix)
1824 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1824 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1825 ('literal', 're:foobar', [False, True])
1825 ('literal', 're:foobar', [False, True])
1826
1826
1827 unknown prefixes are ignored and treated as literals
1827 unknown prefixes are ignored and treated as literals
1828 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1828 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1829 ('literal', 'foo:bar', [False, False, True])
1829 ('literal', 'foo:bar', [False, False, True])
1830 """
1830 """
1831 if pattern.startswith('re:'):
1831 if pattern.startswith('re:'):
1832 pattern = pattern[3:]
1832 pattern = pattern[3:]
1833 try:
1833 try:
1834 regex = remod.compile(pattern)
1834 regex = remod.compile(pattern)
1835 except remod.error as e:
1835 except remod.error as e:
1836 raise error.ParseError(_('invalid regular expression: %s')
1836 raise error.ParseError(_('invalid regular expression: %s')
1837 % e)
1837 % e)
1838 return 're', pattern, regex.search
1838 return 're', pattern, regex.search
1839 elif pattern.startswith('literal:'):
1839 elif pattern.startswith('literal:'):
1840 pattern = pattern[8:]
1840 pattern = pattern[8:]
1841 return 'literal', pattern, pattern.__eq__
1841 return 'literal', pattern, pattern.__eq__
1842
1842
1843 def shortuser(user):
1843 def shortuser(user):
1844 """Return a short representation of a user name or email address."""
1844 """Return a short representation of a user name or email address."""
1845 f = user.find('@')
1845 f = user.find('@')
1846 if f >= 0:
1846 if f >= 0:
1847 user = user[:f]
1847 user = user[:f]
1848 f = user.find('<')
1848 f = user.find('<')
1849 if f >= 0:
1849 if f >= 0:
1850 user = user[f + 1:]
1850 user = user[f + 1:]
1851 f = user.find(' ')
1851 f = user.find(' ')
1852 if f >= 0:
1852 if f >= 0:
1853 user = user[:f]
1853 user = user[:f]
1854 f = user.find('.')
1854 f = user.find('.')
1855 if f >= 0:
1855 if f >= 0:
1856 user = user[:f]
1856 user = user[:f]
1857 return user
1857 return user
1858
1858
1859 def emailuser(user):
1859 def emailuser(user):
1860 """Return the user portion of an email address."""
1860 """Return the user portion of an email address."""
1861 f = user.find('@')
1861 f = user.find('@')
1862 if f >= 0:
1862 if f >= 0:
1863 user = user[:f]
1863 user = user[:f]
1864 f = user.find('<')
1864 f = user.find('<')
1865 if f >= 0:
1865 if f >= 0:
1866 user = user[f + 1:]
1866 user = user[f + 1:]
1867 return user
1867 return user
1868
1868
1869 def email(author):
1869 def email(author):
1870 '''get email of author.'''
1870 '''get email of author.'''
1871 r = author.find('>')
1871 r = author.find('>')
1872 if r == -1:
1872 if r == -1:
1873 r = None
1873 r = None
1874 return author[author.find('<') + 1:r]
1874 return author[author.find('<') + 1:r]
1875
1875
1876 def ellipsis(text, maxlength=400):
1876 def ellipsis(text, maxlength=400):
1877 """Trim string to at most maxlength (default: 400) columns in display."""
1877 """Trim string to at most maxlength (default: 400) columns in display."""
1878 return encoding.trim(text, maxlength, ellipsis='...')
1878 return encoding.trim(text, maxlength, ellipsis='...')
1879
1879
1880 def unitcountfn(*unittable):
1880 def unitcountfn(*unittable):
1881 '''return a function that renders a readable count of some quantity'''
1881 '''return a function that renders a readable count of some quantity'''
1882
1882
1883 def go(count):
1883 def go(count):
1884 for multiplier, divisor, format in unittable:
1884 for multiplier, divisor, format in unittable:
1885 if count >= divisor * multiplier:
1885 if count >= divisor * multiplier:
1886 return format % (count / float(divisor))
1886 return format % (count / float(divisor))
1887 return unittable[-1][2] % count
1887 return unittable[-1][2] % count
1888
1888
1889 return go
1889 return go
1890
1890
1891 bytecount = unitcountfn(
1891 bytecount = unitcountfn(
1892 (100, 1 << 30, _('%.0f GB')),
1892 (100, 1 << 30, _('%.0f GB')),
1893 (10, 1 << 30, _('%.1f GB')),
1893 (10, 1 << 30, _('%.1f GB')),
1894 (1, 1 << 30, _('%.2f GB')),
1894 (1, 1 << 30, _('%.2f GB')),
1895 (100, 1 << 20, _('%.0f MB')),
1895 (100, 1 << 20, _('%.0f MB')),
1896 (10, 1 << 20, _('%.1f MB')),
1896 (10, 1 << 20, _('%.1f MB')),
1897 (1, 1 << 20, _('%.2f MB')),
1897 (1, 1 << 20, _('%.2f MB')),
1898 (100, 1 << 10, _('%.0f KB')),
1898 (100, 1 << 10, _('%.0f KB')),
1899 (10, 1 << 10, _('%.1f KB')),
1899 (10, 1 << 10, _('%.1f KB')),
1900 (1, 1 << 10, _('%.2f KB')),
1900 (1, 1 << 10, _('%.2f KB')),
1901 (1, 1, _('%.0f bytes')),
1901 (1, 1, _('%.0f bytes')),
1902 )
1902 )
1903
1903
1904 def uirepr(s):
1904 def uirepr(s):
1905 # Avoid double backslash in Windows path repr()
1905 # Avoid double backslash in Windows path repr()
1906 return repr(s).replace('\\\\', '\\')
1906 return repr(s).replace('\\\\', '\\')
1907
1907
1908 # delay import of textwrap
1908 # delay import of textwrap
1909 def MBTextWrapper(**kwargs):
1909 def MBTextWrapper(**kwargs):
1910 class tw(textwrap.TextWrapper):
1910 class tw(textwrap.TextWrapper):
1911 """
1911 """
1912 Extend TextWrapper for width-awareness.
1912 Extend TextWrapper for width-awareness.
1913
1913
1914 Neither number of 'bytes' in any encoding nor 'characters' is
1914 Neither number of 'bytes' in any encoding nor 'characters' is
1915 appropriate to calculate terminal columns for specified string.
1915 appropriate to calculate terminal columns for specified string.
1916
1916
1917 Original TextWrapper implementation uses built-in 'len()' directly,
1917 Original TextWrapper implementation uses built-in 'len()' directly,
1918 so overriding is needed to use width information of each characters.
1918 so overriding is needed to use width information of each characters.
1919
1919
1920 In addition, characters classified into 'ambiguous' width are
1920 In addition, characters classified into 'ambiguous' width are
1921 treated as wide in East Asian area, but as narrow in other.
1921 treated as wide in East Asian area, but as narrow in other.
1922
1922
1923 This requires use decision to determine width of such characters.
1923 This requires use decision to determine width of such characters.
1924 """
1924 """
1925 def _cutdown(self, ucstr, space_left):
1925 def _cutdown(self, ucstr, space_left):
1926 l = 0
1926 l = 0
1927 colwidth = encoding.ucolwidth
1927 colwidth = encoding.ucolwidth
1928 for i in xrange(len(ucstr)):
1928 for i in xrange(len(ucstr)):
1929 l += colwidth(ucstr[i])
1929 l += colwidth(ucstr[i])
1930 if space_left < l:
1930 if space_left < l:
1931 return (ucstr[:i], ucstr[i:])
1931 return (ucstr[:i], ucstr[i:])
1932 return ucstr, ''
1932 return ucstr, ''
1933
1933
1934 # overriding of base class
1934 # overriding of base class
1935 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1935 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1936 space_left = max(width - cur_len, 1)
1936 space_left = max(width - cur_len, 1)
1937
1937
1938 if self.break_long_words:
1938 if self.break_long_words:
1939 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1939 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1940 cur_line.append(cut)
1940 cur_line.append(cut)
1941 reversed_chunks[-1] = res
1941 reversed_chunks[-1] = res
1942 elif not cur_line:
1942 elif not cur_line:
1943 cur_line.append(reversed_chunks.pop())
1943 cur_line.append(reversed_chunks.pop())
1944
1944
1945 # this overriding code is imported from TextWrapper of Python 2.6
1945 # this overriding code is imported from TextWrapper of Python 2.6
1946 # to calculate columns of string by 'encoding.ucolwidth()'
1946 # to calculate columns of string by 'encoding.ucolwidth()'
1947 def _wrap_chunks(self, chunks):
1947 def _wrap_chunks(self, chunks):
1948 colwidth = encoding.ucolwidth
1948 colwidth = encoding.ucolwidth
1949
1949
1950 lines = []
1950 lines = []
1951 if self.width <= 0:
1951 if self.width <= 0:
1952 raise ValueError("invalid width %r (must be > 0)" % self.width)
1952 raise ValueError("invalid width %r (must be > 0)" % self.width)
1953
1953
1954 # Arrange in reverse order so items can be efficiently popped
1954 # Arrange in reverse order so items can be efficiently popped
1955 # from a stack of chucks.
1955 # from a stack of chucks.
1956 chunks.reverse()
1956 chunks.reverse()
1957
1957
1958 while chunks:
1958 while chunks:
1959
1959
1960 # Start the list of chunks that will make up the current line.
1960 # Start the list of chunks that will make up the current line.
1961 # cur_len is just the length of all the chunks in cur_line.
1961 # cur_len is just the length of all the chunks in cur_line.
1962 cur_line = []
1962 cur_line = []
1963 cur_len = 0
1963 cur_len = 0
1964
1964
1965 # Figure out which static string will prefix this line.
1965 # Figure out which static string will prefix this line.
1966 if lines:
1966 if lines:
1967 indent = self.subsequent_indent
1967 indent = self.subsequent_indent
1968 else:
1968 else:
1969 indent = self.initial_indent
1969 indent = self.initial_indent
1970
1970
1971 # Maximum width for this line.
1971 # Maximum width for this line.
1972 width = self.width - len(indent)
1972 width = self.width - len(indent)
1973
1973
1974 # First chunk on line is whitespace -- drop it, unless this
1974 # First chunk on line is whitespace -- drop it, unless this
1975 # is the very beginning of the text (i.e. no lines started yet).
1975 # is the very beginning of the text (i.e. no lines started yet).
1976 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1976 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1977 del chunks[-1]
1977 del chunks[-1]
1978
1978
1979 while chunks:
1979 while chunks:
1980 l = colwidth(chunks[-1])
1980 l = colwidth(chunks[-1])
1981
1981
1982 # Can at least squeeze this chunk onto the current line.
1982 # Can at least squeeze this chunk onto the current line.
1983 if cur_len + l <= width:
1983 if cur_len + l <= width:
1984 cur_line.append(chunks.pop())
1984 cur_line.append(chunks.pop())
1985 cur_len += l
1985 cur_len += l
1986
1986
1987 # Nope, this line is full.
1987 # Nope, this line is full.
1988 else:
1988 else:
1989 break
1989 break
1990
1990
1991 # The current line is full, and the next chunk is too big to
1991 # The current line is full, and the next chunk is too big to
1992 # fit on *any* line (not just this one).
1992 # fit on *any* line (not just this one).
1993 if chunks and colwidth(chunks[-1]) > width:
1993 if chunks and colwidth(chunks[-1]) > width:
1994 self._handle_long_word(chunks, cur_line, cur_len, width)
1994 self._handle_long_word(chunks, cur_line, cur_len, width)
1995
1995
1996 # If the last chunk on this line is all whitespace, drop it.
1996 # If the last chunk on this line is all whitespace, drop it.
1997 if (self.drop_whitespace and
1997 if (self.drop_whitespace and
1998 cur_line and cur_line[-1].strip() == ''):
1998 cur_line and cur_line[-1].strip() == ''):
1999 del cur_line[-1]
1999 del cur_line[-1]
2000
2000
2001 # Convert current line back to a string and store it in list
2001 # Convert current line back to a string and store it in list
2002 # of all lines (return value).
2002 # of all lines (return value).
2003 if cur_line:
2003 if cur_line:
2004 lines.append(indent + ''.join(cur_line))
2004 lines.append(indent + ''.join(cur_line))
2005
2005
2006 return lines
2006 return lines
2007
2007
2008 global MBTextWrapper
2008 global MBTextWrapper
2009 MBTextWrapper = tw
2009 MBTextWrapper = tw
2010 return tw(**kwargs)
2010 return tw(**kwargs)
2011
2011
2012 def wrap(line, width, initindent='', hangindent=''):
2012 def wrap(line, width, initindent='', hangindent=''):
2013 maxindent = max(len(hangindent), len(initindent))
2013 maxindent = max(len(hangindent), len(initindent))
2014 if width <= maxindent:
2014 if width <= maxindent:
2015 # adjust for weird terminal size
2015 # adjust for weird terminal size
2016 width = max(78, maxindent + 1)
2016 width = max(78, maxindent + 1)
2017 line = line.decode(encoding.encoding, encoding.encodingmode)
2017 line = line.decode(encoding.encoding, encoding.encodingmode)
2018 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2018 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2019 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2019 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2020 wrapper = MBTextWrapper(width=width,
2020 wrapper = MBTextWrapper(width=width,
2021 initial_indent=initindent,
2021 initial_indent=initindent,
2022 subsequent_indent=hangindent)
2022 subsequent_indent=hangindent)
2023 return wrapper.fill(line).encode(encoding.encoding)
2023 return wrapper.fill(line).encode(encoding.encoding)
2024
2024
2025 def iterlines(iterator):
2025 def iterlines(iterator):
2026 for chunk in iterator:
2026 for chunk in iterator:
2027 for line in chunk.splitlines():
2027 for line in chunk.splitlines():
2028 yield line
2028 yield line
2029
2029
2030 def expandpath(path):
2030 def expandpath(path):
2031 return os.path.expanduser(os.path.expandvars(path))
2031 return os.path.expanduser(os.path.expandvars(path))
2032
2032
2033 def hgcmd():
2033 def hgcmd():
2034 """Return the command used to execute current hg
2034 """Return the command used to execute current hg
2035
2035
2036 This is different from hgexecutable() because on Windows we want
2036 This is different from hgexecutable() because on Windows we want
2037 to avoid things opening new shell windows like batch files, so we
2037 to avoid things opening new shell windows like batch files, so we
2038 get either the python call or current executable.
2038 get either the python call or current executable.
2039 """
2039 """
2040 if mainfrozen():
2040 if mainfrozen():
2041 if getattr(sys, 'frozen', None) == 'macosx_app':
2041 if getattr(sys, 'frozen', None) == 'macosx_app':
2042 # Env variable set by py2app
2042 # Env variable set by py2app
2043 return [os.environ['EXECUTABLEPATH']]
2043 return [os.environ['EXECUTABLEPATH']]
2044 else:
2044 else:
2045 return [sys.executable]
2045 return [sys.executable]
2046 return gethgcmd()
2046 return gethgcmd()
2047
2047
2048 def rundetached(args, condfn):
2048 def rundetached(args, condfn):
2049 """Execute the argument list in a detached process.
2049 """Execute the argument list in a detached process.
2050
2050
2051 condfn is a callable which is called repeatedly and should return
2051 condfn is a callable which is called repeatedly and should return
2052 True once the child process is known to have started successfully.
2052 True once the child process is known to have started successfully.
2053 At this point, the child process PID is returned. If the child
2053 At this point, the child process PID is returned. If the child
2054 process fails to start or finishes before condfn() evaluates to
2054 process fails to start or finishes before condfn() evaluates to
2055 True, return -1.
2055 True, return -1.
2056 """
2056 """
2057 # Windows case is easier because the child process is either
2057 # Windows case is easier because the child process is either
2058 # successfully starting and validating the condition or exiting
2058 # successfully starting and validating the condition or exiting
2059 # on failure. We just poll on its PID. On Unix, if the child
2059 # on failure. We just poll on its PID. On Unix, if the child
2060 # process fails to start, it will be left in a zombie state until
2060 # process fails to start, it will be left in a zombie state until
2061 # the parent wait on it, which we cannot do since we expect a long
2061 # the parent wait on it, which we cannot do since we expect a long
2062 # running process on success. Instead we listen for SIGCHLD telling
2062 # running process on success. Instead we listen for SIGCHLD telling
2063 # us our child process terminated.
2063 # us our child process terminated.
2064 terminated = set()
2064 terminated = set()
2065 def handler(signum, frame):
2065 def handler(signum, frame):
2066 terminated.add(os.wait())
2066 terminated.add(os.wait())
2067 prevhandler = None
2067 prevhandler = None
2068 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2068 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2069 if SIGCHLD is not None:
2069 if SIGCHLD is not None:
2070 prevhandler = signal.signal(SIGCHLD, handler)
2070 prevhandler = signal.signal(SIGCHLD, handler)
2071 try:
2071 try:
2072 pid = spawndetached(args)
2072 pid = spawndetached(args)
2073 while not condfn():
2073 while not condfn():
2074 if ((pid in terminated or not testpid(pid))
2074 if ((pid in terminated or not testpid(pid))
2075 and not condfn()):
2075 and not condfn()):
2076 return -1
2076 return -1
2077 time.sleep(0.1)
2077 time.sleep(0.1)
2078 return pid
2078 return pid
2079 finally:
2079 finally:
2080 if prevhandler is not None:
2080 if prevhandler is not None:
2081 signal.signal(signal.SIGCHLD, prevhandler)
2081 signal.signal(signal.SIGCHLD, prevhandler)
2082
2082
2083 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2083 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2084 """Return the result of interpolating items in the mapping into string s.
2084 """Return the result of interpolating items in the mapping into string s.
2085
2085
2086 prefix is a single character string, or a two character string with
2086 prefix is a single character string, or a two character string with
2087 a backslash as the first character if the prefix needs to be escaped in
2087 a backslash as the first character if the prefix needs to be escaped in
2088 a regular expression.
2088 a regular expression.
2089
2089
2090 fn is an optional function that will be applied to the replacement text
2090 fn is an optional function that will be applied to the replacement text
2091 just before replacement.
2091 just before replacement.
2092
2092
2093 escape_prefix is an optional flag that allows using doubled prefix for
2093 escape_prefix is an optional flag that allows using doubled prefix for
2094 its escaping.
2094 its escaping.
2095 """
2095 """
2096 fn = fn or (lambda s: s)
2096 fn = fn or (lambda s: s)
2097 patterns = '|'.join(mapping.keys())
2097 patterns = '|'.join(mapping.keys())
2098 if escape_prefix:
2098 if escape_prefix:
2099 patterns += '|' + prefix
2099 patterns += '|' + prefix
2100 if len(prefix) > 1:
2100 if len(prefix) > 1:
2101 prefix_char = prefix[1:]
2101 prefix_char = prefix[1:]
2102 else:
2102 else:
2103 prefix_char = prefix
2103 prefix_char = prefix
2104 mapping[prefix_char] = prefix_char
2104 mapping[prefix_char] = prefix_char
2105 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2105 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2106 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2106 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2107
2107
2108 def getport(port):
2108 def getport(port):
2109 """Return the port for a given network service.
2109 """Return the port for a given network service.
2110
2110
2111 If port is an integer, it's returned as is. If it's a string, it's
2111 If port is an integer, it's returned as is. If it's a string, it's
2112 looked up using socket.getservbyname(). If there's no matching
2112 looked up using socket.getservbyname(). If there's no matching
2113 service, error.Abort is raised.
2113 service, error.Abort is raised.
2114 """
2114 """
2115 try:
2115 try:
2116 return int(port)
2116 return int(port)
2117 except ValueError:
2117 except ValueError:
2118 pass
2118 pass
2119
2119
2120 try:
2120 try:
2121 return socket.getservbyname(port)
2121 return socket.getservbyname(port)
2122 except socket.error:
2122 except socket.error:
2123 raise Abort(_("no port number associated with service '%s'") % port)
2123 raise Abort(_("no port number associated with service '%s'") % port)
2124
2124
2125 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2125 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2126 '0': False, 'no': False, 'false': False, 'off': False,
2126 '0': False, 'no': False, 'false': False, 'off': False,
2127 'never': False}
2127 'never': False}
2128
2128
2129 def parsebool(s):
2129 def parsebool(s):
2130 """Parse s into a boolean.
2130 """Parse s into a boolean.
2131
2131
2132 If s is not a valid boolean, returns None.
2132 If s is not a valid boolean, returns None.
2133 """
2133 """
2134 return _booleans.get(s.lower(), None)
2134 return _booleans.get(s.lower(), None)
2135
2135
2136 _hexdig = '0123456789ABCDEFabcdef'
2136 _hexdig = '0123456789ABCDEFabcdef'
2137 _hextochr = dict((a + b, chr(int(a + b, 16)))
2137 _hextochr = dict((a + b, chr(int(a + b, 16)))
2138 for a in _hexdig for b in _hexdig)
2138 for a in _hexdig for b in _hexdig)
2139
2139
2140 def _urlunquote(s):
2140 def _urlunquote(s):
2141 """Decode HTTP/HTML % encoding.
2141 """Decode HTTP/HTML % encoding.
2142
2142
2143 >>> _urlunquote('abc%20def')
2143 >>> _urlunquote('abc%20def')
2144 'abc def'
2144 'abc def'
2145 """
2145 """
2146 res = s.split('%')
2146 res = s.split('%')
2147 # fastpath
2147 # fastpath
2148 if len(res) == 1:
2148 if len(res) == 1:
2149 return s
2149 return s
2150 s = res[0]
2150 s = res[0]
2151 for item in res[1:]:
2151 for item in res[1:]:
2152 try:
2152 try:
2153 s += _hextochr[item[:2]] + item[2:]
2153 s += _hextochr[item[:2]] + item[2:]
2154 except KeyError:
2154 except KeyError:
2155 s += '%' + item
2155 s += '%' + item
2156 except UnicodeDecodeError:
2156 except UnicodeDecodeError:
2157 s += unichr(int(item[:2], 16)) + item[2:]
2157 s += unichr(int(item[:2], 16)) + item[2:]
2158 return s
2158 return s
2159
2159
2160 class url(object):
2160 class url(object):
2161 r"""Reliable URL parser.
2161 r"""Reliable URL parser.
2162
2162
2163 This parses URLs and provides attributes for the following
2163 This parses URLs and provides attributes for the following
2164 components:
2164 components:
2165
2165
2166 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2166 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2167
2167
2168 Missing components are set to None. The only exception is
2168 Missing components are set to None. The only exception is
2169 fragment, which is set to '' if present but empty.
2169 fragment, which is set to '' if present but empty.
2170
2170
2171 If parsefragment is False, fragment is included in query. If
2171 If parsefragment is False, fragment is included in query. If
2172 parsequery is False, query is included in path. If both are
2172 parsequery is False, query is included in path. If both are
2173 False, both fragment and query are included in path.
2173 False, both fragment and query are included in path.
2174
2174
2175 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2175 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2176
2176
2177 Note that for backward compatibility reasons, bundle URLs do not
2177 Note that for backward compatibility reasons, bundle URLs do not
2178 take host names. That means 'bundle://../' has a path of '../'.
2178 take host names. That means 'bundle://../' has a path of '../'.
2179
2179
2180 Examples:
2180 Examples:
2181
2181
2182 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2182 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2183 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2183 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2184 >>> url('ssh://[::1]:2200//home/joe/repo')
2184 >>> url('ssh://[::1]:2200//home/joe/repo')
2185 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2185 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2186 >>> url('file:///home/joe/repo')
2186 >>> url('file:///home/joe/repo')
2187 <url scheme: 'file', path: '/home/joe/repo'>
2187 <url scheme: 'file', path: '/home/joe/repo'>
2188 >>> url('file:///c:/temp/foo/')
2188 >>> url('file:///c:/temp/foo/')
2189 <url scheme: 'file', path: 'c:/temp/foo/'>
2189 <url scheme: 'file', path: 'c:/temp/foo/'>
2190 >>> url('bundle:foo')
2190 >>> url('bundle:foo')
2191 <url scheme: 'bundle', path: 'foo'>
2191 <url scheme: 'bundle', path: 'foo'>
2192 >>> url('bundle://../foo')
2192 >>> url('bundle://../foo')
2193 <url scheme: 'bundle', path: '../foo'>
2193 <url scheme: 'bundle', path: '../foo'>
2194 >>> url(r'c:\foo\bar')
2194 >>> url(r'c:\foo\bar')
2195 <url path: 'c:\\foo\\bar'>
2195 <url path: 'c:\\foo\\bar'>
2196 >>> url(r'\\blah\blah\blah')
2196 >>> url(r'\\blah\blah\blah')
2197 <url path: '\\\\blah\\blah\\blah'>
2197 <url path: '\\\\blah\\blah\\blah'>
2198 >>> url(r'\\blah\blah\blah#baz')
2198 >>> url(r'\\blah\blah\blah#baz')
2199 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2199 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2200 >>> url(r'file:///C:\users\me')
2200 >>> url(r'file:///C:\users\me')
2201 <url scheme: 'file', path: 'C:\\users\\me'>
2201 <url scheme: 'file', path: 'C:\\users\\me'>
2202
2202
2203 Authentication credentials:
2203 Authentication credentials:
2204
2204
2205 >>> url('ssh://joe:xyz@x/repo')
2205 >>> url('ssh://joe:xyz@x/repo')
2206 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2206 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2207 >>> url('ssh://joe@x/repo')
2207 >>> url('ssh://joe@x/repo')
2208 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2208 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2209
2209
2210 Query strings and fragments:
2210 Query strings and fragments:
2211
2211
2212 >>> url('http://host/a?b#c')
2212 >>> url('http://host/a?b#c')
2213 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2213 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2214 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2214 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2215 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2215 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2216 """
2216 """
2217
2217
2218 _safechars = "!~*'()+"
2218 _safechars = "!~*'()+"
2219 _safepchars = "/!~*'()+:\\"
2219 _safepchars = "/!~*'()+:\\"
2220 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2220 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2221
2221
2222 def __init__(self, path, parsequery=True, parsefragment=True):
2222 def __init__(self, path, parsequery=True, parsefragment=True):
2223 # We slowly chomp away at path until we have only the path left
2223 # We slowly chomp away at path until we have only the path left
2224 self.scheme = self.user = self.passwd = self.host = None
2224 self.scheme = self.user = self.passwd = self.host = None
2225 self.port = self.path = self.query = self.fragment = None
2225 self.port = self.path = self.query = self.fragment = None
2226 self._localpath = True
2226 self._localpath = True
2227 self._hostport = ''
2227 self._hostport = ''
2228 self._origpath = path
2228 self._origpath = path
2229
2229
2230 if parsefragment and '#' in path:
2230 if parsefragment and '#' in path:
2231 path, self.fragment = path.split('#', 1)
2231 path, self.fragment = path.split('#', 1)
2232 if not path:
2232 if not path:
2233 path = None
2233 path = None
2234
2234
2235 # special case for Windows drive letters and UNC paths
2235 # special case for Windows drive letters and UNC paths
2236 if hasdriveletter(path) or path.startswith(r'\\'):
2236 if hasdriveletter(path) or path.startswith(r'\\'):
2237 self.path = path
2237 self.path = path
2238 return
2238 return
2239
2239
2240 # For compatibility reasons, we can't handle bundle paths as
2240 # For compatibility reasons, we can't handle bundle paths as
2241 # normal URLS
2241 # normal URLS
2242 if path.startswith('bundle:'):
2242 if path.startswith('bundle:'):
2243 self.scheme = 'bundle'
2243 self.scheme = 'bundle'
2244 path = path[7:]
2244 path = path[7:]
2245 if path.startswith('//'):
2245 if path.startswith('//'):
2246 path = path[2:]
2246 path = path[2:]
2247 self.path = path
2247 self.path = path
2248 return
2248 return
2249
2249
2250 if self._matchscheme(path):
2250 if self._matchscheme(path):
2251 parts = path.split(':', 1)
2251 parts = path.split(':', 1)
2252 if parts[0]:
2252 if parts[0]:
2253 self.scheme, path = parts
2253 self.scheme, path = parts
2254 self._localpath = False
2254 self._localpath = False
2255
2255
2256 if not path:
2256 if not path:
2257 path = None
2257 path = None
2258 if self._localpath:
2258 if self._localpath:
2259 self.path = ''
2259 self.path = ''
2260 return
2260 return
2261 else:
2261 else:
2262 if self._localpath:
2262 if self._localpath:
2263 self.path = path
2263 self.path = path
2264 return
2264 return
2265
2265
2266 if parsequery and '?' in path:
2266 if parsequery and '?' in path:
2267 path, self.query = path.split('?', 1)
2267 path, self.query = path.split('?', 1)
2268 if not path:
2268 if not path:
2269 path = None
2269 path = None
2270 if not self.query:
2270 if not self.query:
2271 self.query = None
2271 self.query = None
2272
2272
2273 # // is required to specify a host/authority
2273 # // is required to specify a host/authority
2274 if path and path.startswith('//'):
2274 if path and path.startswith('//'):
2275 parts = path[2:].split('/', 1)
2275 parts = path[2:].split('/', 1)
2276 if len(parts) > 1:
2276 if len(parts) > 1:
2277 self.host, path = parts
2277 self.host, path = parts
2278 else:
2278 else:
2279 self.host = parts[0]
2279 self.host = parts[0]
2280 path = None
2280 path = None
2281 if not self.host:
2281 if not self.host:
2282 self.host = None
2282 self.host = None
2283 # path of file:///d is /d
2283 # path of file:///d is /d
2284 # path of file:///d:/ is d:/, not /d:/
2284 # path of file:///d:/ is d:/, not /d:/
2285 if path and not hasdriveletter(path):
2285 if path and not hasdriveletter(path):
2286 path = '/' + path
2286 path = '/' + path
2287
2287
2288 if self.host and '@' in self.host:
2288 if self.host and '@' in self.host:
2289 self.user, self.host = self.host.rsplit('@', 1)
2289 self.user, self.host = self.host.rsplit('@', 1)
2290 if ':' in self.user:
2290 if ':' in self.user:
2291 self.user, self.passwd = self.user.split(':', 1)
2291 self.user, self.passwd = self.user.split(':', 1)
2292 if not self.host:
2292 if not self.host:
2293 self.host = None
2293 self.host = None
2294
2294
2295 # Don't split on colons in IPv6 addresses without ports
2295 # Don't split on colons in IPv6 addresses without ports
2296 if (self.host and ':' in self.host and
2296 if (self.host and ':' in self.host and
2297 not (self.host.startswith('[') and self.host.endswith(']'))):
2297 not (self.host.startswith('[') and self.host.endswith(']'))):
2298 self._hostport = self.host
2298 self._hostport = self.host
2299 self.host, self.port = self.host.rsplit(':', 1)
2299 self.host, self.port = self.host.rsplit(':', 1)
2300 if not self.host:
2300 if not self.host:
2301 self.host = None
2301 self.host = None
2302
2302
2303 if (self.host and self.scheme == 'file' and
2303 if (self.host and self.scheme == 'file' and
2304 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2304 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2305 raise Abort(_('file:// URLs can only refer to localhost'))
2305 raise Abort(_('file:// URLs can only refer to localhost'))
2306
2306
2307 self.path = path
2307 self.path = path
2308
2308
2309 # leave the query string escaped
2309 # leave the query string escaped
2310 for a in ('user', 'passwd', 'host', 'port',
2310 for a in ('user', 'passwd', 'host', 'port',
2311 'path', 'fragment'):
2311 'path', 'fragment'):
2312 v = getattr(self, a)
2312 v = getattr(self, a)
2313 if v is not None:
2313 if v is not None:
2314 setattr(self, a, _urlunquote(v))
2314 setattr(self, a, _urlunquote(v))
2315
2315
2316 def __repr__(self):
2316 def __repr__(self):
2317 attrs = []
2317 attrs = []
2318 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2318 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2319 'query', 'fragment'):
2319 'query', 'fragment'):
2320 v = getattr(self, a)
2320 v = getattr(self, a)
2321 if v is not None:
2321 if v is not None:
2322 attrs.append('%s: %r' % (a, v))
2322 attrs.append('%s: %r' % (a, v))
2323 return '<url %s>' % ', '.join(attrs)
2323 return '<url %s>' % ', '.join(attrs)
2324
2324
2325 def __str__(self):
2325 def __str__(self):
2326 r"""Join the URL's components back into a URL string.
2326 r"""Join the URL's components back into a URL string.
2327
2327
2328 Examples:
2328 Examples:
2329
2329
2330 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2330 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2331 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2331 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2332 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2332 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2333 'http://user:pw@host:80/?foo=bar&baz=42'
2333 'http://user:pw@host:80/?foo=bar&baz=42'
2334 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2334 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2335 'http://user:pw@host:80/?foo=bar%3dbaz'
2335 'http://user:pw@host:80/?foo=bar%3dbaz'
2336 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2336 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2337 'ssh://user:pw@[::1]:2200//home/joe#'
2337 'ssh://user:pw@[::1]:2200//home/joe#'
2338 >>> str(url('http://localhost:80//'))
2338 >>> str(url('http://localhost:80//'))
2339 'http://localhost:80//'
2339 'http://localhost:80//'
2340 >>> str(url('http://localhost:80/'))
2340 >>> str(url('http://localhost:80/'))
2341 'http://localhost:80/'
2341 'http://localhost:80/'
2342 >>> str(url('http://localhost:80'))
2342 >>> str(url('http://localhost:80'))
2343 'http://localhost:80/'
2343 'http://localhost:80/'
2344 >>> str(url('bundle:foo'))
2344 >>> str(url('bundle:foo'))
2345 'bundle:foo'
2345 'bundle:foo'
2346 >>> str(url('bundle://../foo'))
2346 >>> str(url('bundle://../foo'))
2347 'bundle:../foo'
2347 'bundle:../foo'
2348 >>> str(url('path'))
2348 >>> str(url('path'))
2349 'path'
2349 'path'
2350 >>> str(url('file:///tmp/foo/bar'))
2350 >>> str(url('file:///tmp/foo/bar'))
2351 'file:///tmp/foo/bar'
2351 'file:///tmp/foo/bar'
2352 >>> str(url('file:///c:/tmp/foo/bar'))
2352 >>> str(url('file:///c:/tmp/foo/bar'))
2353 'file:///c:/tmp/foo/bar'
2353 'file:///c:/tmp/foo/bar'
2354 >>> print url(r'bundle:foo\bar')
2354 >>> print url(r'bundle:foo\bar')
2355 bundle:foo\bar
2355 bundle:foo\bar
2356 >>> print url(r'file:///D:\data\hg')
2356 >>> print url(r'file:///D:\data\hg')
2357 file:///D:\data\hg
2357 file:///D:\data\hg
2358 """
2358 """
2359 if self._localpath:
2359 if self._localpath:
2360 s = self.path
2360 s = self.path
2361 if self.scheme == 'bundle':
2361 if self.scheme == 'bundle':
2362 s = 'bundle:' + s
2362 s = 'bundle:' + s
2363 if self.fragment:
2363 if self.fragment:
2364 s += '#' + self.fragment
2364 s += '#' + self.fragment
2365 return s
2365 return s
2366
2366
2367 s = self.scheme + ':'
2367 s = self.scheme + ':'
2368 if self.user or self.passwd or self.host:
2368 if self.user or self.passwd or self.host:
2369 s += '//'
2369 s += '//'
2370 elif self.scheme and (not self.path or self.path.startswith('/')
2370 elif self.scheme and (not self.path or self.path.startswith('/')
2371 or hasdriveletter(self.path)):
2371 or hasdriveletter(self.path)):
2372 s += '//'
2372 s += '//'
2373 if hasdriveletter(self.path):
2373 if hasdriveletter(self.path):
2374 s += '/'
2374 s += '/'
2375 if self.user:
2375 if self.user:
2376 s += urllib.quote(self.user, safe=self._safechars)
2376 s += urllib.quote(self.user, safe=self._safechars)
2377 if self.passwd:
2377 if self.passwd:
2378 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2378 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2379 if self.user or self.passwd:
2379 if self.user or self.passwd:
2380 s += '@'
2380 s += '@'
2381 if self.host:
2381 if self.host:
2382 if not (self.host.startswith('[') and self.host.endswith(']')):
2382 if not (self.host.startswith('[') and self.host.endswith(']')):
2383 s += urllib.quote(self.host)
2383 s += urllib.quote(self.host)
2384 else:
2384 else:
2385 s += self.host
2385 s += self.host
2386 if self.port:
2386 if self.port:
2387 s += ':' + urllib.quote(self.port)
2387 s += ':' + urllib.quote(self.port)
2388 if self.host:
2388 if self.host:
2389 s += '/'
2389 s += '/'
2390 if self.path:
2390 if self.path:
2391 # TODO: similar to the query string, we should not unescape the
2391 # TODO: similar to the query string, we should not unescape the
2392 # path when we store it, the path might contain '%2f' = '/',
2392 # path when we store it, the path might contain '%2f' = '/',
2393 # which we should *not* escape.
2393 # which we should *not* escape.
2394 s += urllib.quote(self.path, safe=self._safepchars)
2394 s += urllib.quote(self.path, safe=self._safepchars)
2395 if self.query:
2395 if self.query:
2396 # we store the query in escaped form.
2396 # we store the query in escaped form.
2397 s += '?' + self.query
2397 s += '?' + self.query
2398 if self.fragment is not None:
2398 if self.fragment is not None:
2399 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2399 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2400 return s
2400 return s
2401
2401
2402 def authinfo(self):
2402 def authinfo(self):
2403 user, passwd = self.user, self.passwd
2403 user, passwd = self.user, self.passwd
2404 try:
2404 try:
2405 self.user, self.passwd = None, None
2405 self.user, self.passwd = None, None
2406 s = str(self)
2406 s = str(self)
2407 finally:
2407 finally:
2408 self.user, self.passwd = user, passwd
2408 self.user, self.passwd = user, passwd
2409 if not self.user:
2409 if not self.user:
2410 return (s, None)
2410 return (s, None)
2411 # authinfo[1] is passed to urllib2 password manager, and its
2411 # authinfo[1] is passed to urllib2 password manager, and its
2412 # URIs must not contain credentials. The host is passed in the
2412 # URIs must not contain credentials. The host is passed in the
2413 # URIs list because Python < 2.4.3 uses only that to search for
2413 # URIs list because Python < 2.4.3 uses only that to search for
2414 # a password.
2414 # a password.
2415 return (s, (None, (s, self.host),
2415 return (s, (None, (s, self.host),
2416 self.user, self.passwd or ''))
2416 self.user, self.passwd or ''))
2417
2417
2418 def isabs(self):
2418 def isabs(self):
2419 if self.scheme and self.scheme != 'file':
2419 if self.scheme and self.scheme != 'file':
2420 return True # remote URL
2420 return True # remote URL
2421 if hasdriveletter(self.path):
2421 if hasdriveletter(self.path):
2422 return True # absolute for our purposes - can't be joined()
2422 return True # absolute for our purposes - can't be joined()
2423 if self.path.startswith(r'\\'):
2423 if self.path.startswith(r'\\'):
2424 return True # Windows UNC path
2424 return True # Windows UNC path
2425 if self.path.startswith('/'):
2425 if self.path.startswith('/'):
2426 return True # POSIX-style
2426 return True # POSIX-style
2427 return False
2427 return False
2428
2428
2429 def localpath(self):
2429 def localpath(self):
2430 if self.scheme == 'file' or self.scheme == 'bundle':
2430 if self.scheme == 'file' or self.scheme == 'bundle':
2431 path = self.path or '/'
2431 path = self.path or '/'
2432 # For Windows, we need to promote hosts containing drive
2432 # For Windows, we need to promote hosts containing drive
2433 # letters to paths with drive letters.
2433 # letters to paths with drive letters.
2434 if hasdriveletter(self._hostport):
2434 if hasdriveletter(self._hostport):
2435 path = self._hostport + '/' + self.path
2435 path = self._hostport + '/' + self.path
2436 elif (self.host is not None and self.path
2436 elif (self.host is not None and self.path
2437 and not hasdriveletter(path)):
2437 and not hasdriveletter(path)):
2438 path = '/' + path
2438 path = '/' + path
2439 return path
2439 return path
2440 return self._origpath
2440 return self._origpath
2441
2441
2442 def islocal(self):
2442 def islocal(self):
2443 '''whether localpath will return something that posixfile can open'''
2443 '''whether localpath will return something that posixfile can open'''
2444 return (not self.scheme or self.scheme == 'file'
2444 return (not self.scheme or self.scheme == 'file'
2445 or self.scheme == 'bundle')
2445 or self.scheme == 'bundle')
2446
2446
2447 def hasscheme(path):
2447 def hasscheme(path):
2448 return bool(url(path).scheme)
2448 return bool(url(path).scheme)
2449
2449
2450 def hasdriveletter(path):
2450 def hasdriveletter(path):
2451 return path and path[1:2] == ':' and path[0:1].isalpha()
2451 return path and path[1:2] == ':' and path[0:1].isalpha()
2452
2452
2453 def urllocalpath(path):
2453 def urllocalpath(path):
2454 return url(path, parsequery=False, parsefragment=False).localpath()
2454 return url(path, parsequery=False, parsefragment=False).localpath()
2455
2455
2456 def hidepassword(u):
2456 def hidepassword(u):
2457 '''hide user credential in a url string'''
2457 '''hide user credential in a url string'''
2458 u = url(u)
2458 u = url(u)
2459 if u.passwd:
2459 if u.passwd:
2460 u.passwd = '***'
2460 u.passwd = '***'
2461 return str(u)
2461 return str(u)
2462
2462
2463 def removeauth(u):
2463 def removeauth(u):
2464 '''remove all authentication information from a url string'''
2464 '''remove all authentication information from a url string'''
2465 u = url(u)
2465 u = url(u)
2466 u.user = u.passwd = None
2466 u.user = u.passwd = None
2467 return str(u)
2467 return str(u)
2468
2468
2469 def isatty(fp):
2469 def isatty(fp):
2470 try:
2470 try:
2471 return fp.isatty()
2471 return fp.isatty()
2472 except AttributeError:
2472 except AttributeError:
2473 return False
2473 return False
2474
2474
2475 timecount = unitcountfn(
2475 timecount = unitcountfn(
2476 (1, 1e3, _('%.0f s')),
2476 (1, 1e3, _('%.0f s')),
2477 (100, 1, _('%.1f s')),
2477 (100, 1, _('%.1f s')),
2478 (10, 1, _('%.2f s')),
2478 (10, 1, _('%.2f s')),
2479 (1, 1, _('%.3f s')),
2479 (1, 1, _('%.3f s')),
2480 (100, 0.001, _('%.1f ms')),
2480 (100, 0.001, _('%.1f ms')),
2481 (10, 0.001, _('%.2f ms')),
2481 (10, 0.001, _('%.2f ms')),
2482 (1, 0.001, _('%.3f ms')),
2482 (1, 0.001, _('%.3f ms')),
2483 (100, 0.000001, _('%.1f us')),
2483 (100, 0.000001, _('%.1f us')),
2484 (10, 0.000001, _('%.2f us')),
2484 (10, 0.000001, _('%.2f us')),
2485 (1, 0.000001, _('%.3f us')),
2485 (1, 0.000001, _('%.3f us')),
2486 (100, 0.000000001, _('%.1f ns')),
2486 (100, 0.000000001, _('%.1f ns')),
2487 (10, 0.000000001, _('%.2f ns')),
2487 (10, 0.000000001, _('%.2f ns')),
2488 (1, 0.000000001, _('%.3f ns')),
2488 (1, 0.000000001, _('%.3f ns')),
2489 )
2489 )
2490
2490
2491 _timenesting = [0]
2491 _timenesting = [0]
2492
2492
2493 def timed(func):
2493 def timed(func):
2494 '''Report the execution time of a function call to stderr.
2494 '''Report the execution time of a function call to stderr.
2495
2495
2496 During development, use as a decorator when you need to measure
2496 During development, use as a decorator when you need to measure
2497 the cost of a function, e.g. as follows:
2497 the cost of a function, e.g. as follows:
2498
2498
2499 @util.timed
2499 @util.timed
2500 def foo(a, b, c):
2500 def foo(a, b, c):
2501 pass
2501 pass
2502 '''
2502 '''
2503
2503
2504 def wrapper(*args, **kwargs):
2504 def wrapper(*args, **kwargs):
2505 start = time.time()
2505 start = time.time()
2506 indent = 2
2506 indent = 2
2507 _timenesting[0] += indent
2507 _timenesting[0] += indent
2508 try:
2508 try:
2509 return func(*args, **kwargs)
2509 return func(*args, **kwargs)
2510 finally:
2510 finally:
2511 elapsed = time.time() - start
2511 elapsed = time.time() - start
2512 _timenesting[0] -= indent
2512 _timenesting[0] -= indent
2513 sys.stderr.write('%s%s: %s\n' %
2513 sys.stderr.write('%s%s: %s\n' %
2514 (' ' * _timenesting[0], func.__name__,
2514 (' ' * _timenesting[0], func.__name__,
2515 timecount(elapsed)))
2515 timecount(elapsed)))
2516 return wrapper
2516 return wrapper
2517
2517
2518 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2518 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2519 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2519 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2520
2520
2521 def sizetoint(s):
2521 def sizetoint(s):
2522 '''Convert a space specifier to a byte count.
2522 '''Convert a space specifier to a byte count.
2523
2523
2524 >>> sizetoint('30')
2524 >>> sizetoint('30')
2525 30
2525 30
2526 >>> sizetoint('2.2kb')
2526 >>> sizetoint('2.2kb')
2527 2252
2527 2252
2528 >>> sizetoint('6M')
2528 >>> sizetoint('6M')
2529 6291456
2529 6291456
2530 '''
2530 '''
2531 t = s.strip().lower()
2531 t = s.strip().lower()
2532 try:
2532 try:
2533 for k, u in _sizeunits:
2533 for k, u in _sizeunits:
2534 if t.endswith(k):
2534 if t.endswith(k):
2535 return int(float(t[:-len(k)]) * u)
2535 return int(float(t[:-len(k)]) * u)
2536 return int(t)
2536 return int(t)
2537 except ValueError:
2537 except ValueError:
2538 raise error.ParseError(_("couldn't parse size: %s") % s)
2538 raise error.ParseError(_("couldn't parse size: %s") % s)
2539
2539
2540 class hooks(object):
2540 class hooks(object):
2541 '''A collection of hook functions that can be used to extend a
2541 '''A collection of hook functions that can be used to extend a
2542 function's behavior. Hooks are called in lexicographic order,
2542 function's behavior. Hooks are called in lexicographic order,
2543 based on the names of their sources.'''
2543 based on the names of their sources.'''
2544
2544
2545 def __init__(self):
2545 def __init__(self):
2546 self._hooks = []
2546 self._hooks = []
2547
2547
2548 def add(self, source, hook):
2548 def add(self, source, hook):
2549 self._hooks.append((source, hook))
2549 self._hooks.append((source, hook))
2550
2550
2551 def __call__(self, *args):
2551 def __call__(self, *args):
2552 self._hooks.sort(key=lambda x: x[0])
2552 self._hooks.sort(key=lambda x: x[0])
2553 results = []
2553 results = []
2554 for source, hook in self._hooks:
2554 for source, hook in self._hooks:
2555 results.append(hook(*args))
2555 results.append(hook(*args))
2556 return results
2556 return results
2557
2557
2558 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2558 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2559 '''Yields lines for a nicely formatted stacktrace.
2559 '''Yields lines for a nicely formatted stacktrace.
2560 Skips the 'skip' last entries.
2560 Skips the 'skip' last entries.
2561 Each file+linenumber is formatted according to fileline.
2561 Each file+linenumber is formatted according to fileline.
2562 Each line is formatted according to line.
2562 Each line is formatted according to line.
2563 If line is None, it yields:
2563 If line is None, it yields:
2564 length of longest filepath+line number,
2564 length of longest filepath+line number,
2565 filepath+linenumber,
2565 filepath+linenumber,
2566 function
2566 function
2567
2567
2568 Not be used in production code but very convenient while developing.
2568 Not be used in production code but very convenient while developing.
2569 '''
2569 '''
2570 entries = [(fileline % (fn, ln), func)
2570 entries = [(fileline % (fn, ln), func)
2571 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2571 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2572 if entries:
2572 if entries:
2573 fnmax = max(len(entry[0]) for entry in entries)
2573 fnmax = max(len(entry[0]) for entry in entries)
2574 for fnln, func in entries:
2574 for fnln, func in entries:
2575 if line is None:
2575 if line is None:
2576 yield (fnmax, fnln, func)
2576 yield (fnmax, fnln, func)
2577 else:
2577 else:
2578 yield line % (fnmax, fnln, func)
2578 yield line % (fnmax, fnln, func)
2579
2579
2580 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2580 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2581 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2581 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2582 Skips the 'skip' last entries. By default it will flush stdout first.
2582 Skips the 'skip' last entries. By default it will flush stdout first.
2583 It can be used everywhere and intentionally does not require an ui object.
2583 It can be used everywhere and intentionally does not require an ui object.
2584 Not be used in production code but very convenient while developing.
2584 Not be used in production code but very convenient while developing.
2585 '''
2585 '''
2586 if otherf:
2586 if otherf:
2587 otherf.flush()
2587 otherf.flush()
2588 f.write('%s at:\n' % msg)
2588 f.write('%s at:\n' % msg)
2589 for line in getstackframes(skip + 1):
2589 for line in getstackframes(skip + 1):
2590 f.write(line)
2590 f.write(line)
2591 f.flush()
2591 f.flush()
2592
2592
2593 class dirs(object):
2593 class dirs(object):
2594 '''a multiset of directory names from a dirstate or manifest'''
2594 '''a multiset of directory names from a dirstate or manifest'''
2595
2595
2596 def __init__(self, map, skip=None):
2596 def __init__(self, map, skip=None):
2597 self._dirs = {}
2597 self._dirs = {}
2598 addpath = self.addpath
2598 addpath = self.addpath
2599 if safehasattr(map, 'iteritems') and skip is not None:
2599 if safehasattr(map, 'iteritems') and skip is not None:
2600 for f, s in map.iteritems():
2600 for f, s in map.iteritems():
2601 if s[0] != skip:
2601 if s[0] != skip:
2602 addpath(f)
2602 addpath(f)
2603 else:
2603 else:
2604 for f in map:
2604 for f in map:
2605 addpath(f)
2605 addpath(f)
2606
2606
2607 def addpath(self, path):
2607 def addpath(self, path):
2608 dirs = self._dirs
2608 dirs = self._dirs
2609 for base in finddirs(path):
2609 for base in finddirs(path):
2610 if base in dirs:
2610 if base in dirs:
2611 dirs[base] += 1
2611 dirs[base] += 1
2612 return
2612 return
2613 dirs[base] = 1
2613 dirs[base] = 1
2614
2614
2615 def delpath(self, path):
2615 def delpath(self, path):
2616 dirs = self._dirs
2616 dirs = self._dirs
2617 for base in finddirs(path):
2617 for base in finddirs(path):
2618 if dirs[base] > 1:
2618 if dirs[base] > 1:
2619 dirs[base] -= 1
2619 dirs[base] -= 1
2620 return
2620 return
2621 del dirs[base]
2621 del dirs[base]
2622
2622
2623 def __iter__(self):
2623 def __iter__(self):
2624 return self._dirs.iterkeys()
2624 return self._dirs.iterkeys()
2625
2625
2626 def __contains__(self, d):
2626 def __contains__(self, d):
2627 return d in self._dirs
2627 return d in self._dirs
2628
2628
2629 if safehasattr(parsers, 'dirs'):
2629 if safehasattr(parsers, 'dirs'):
2630 dirs = parsers.dirs
2630 dirs = parsers.dirs
2631
2631
2632 def finddirs(path):
2632 def finddirs(path):
2633 pos = path.rfind('/')
2633 pos = path.rfind('/')
2634 while pos != -1:
2634 while pos != -1:
2635 yield path[:pos]
2635 yield path[:pos]
2636 pos = path.rfind('/', 0, pos)
2636 pos = path.rfind('/', 0, pos)
2637
2637
2638 # compression utility
2638 # compression utility
2639
2639
2640 class nocompress(object):
2640 class nocompress(object):
2641 def compress(self, x):
2641 def compress(self, x):
2642 return x
2642 return x
2643 def flush(self):
2643 def flush(self):
2644 return ""
2644 return ""
2645
2645
2646 compressors = {
2646 compressors = {
2647 None: nocompress,
2647 None: nocompress,
2648 # lambda to prevent early import
2648 # lambda to prevent early import
2649 'BZ': lambda: bz2.BZ2Compressor(),
2649 'BZ': lambda: bz2.BZ2Compressor(),
2650 'GZ': lambda: zlib.compressobj(),
2650 'GZ': lambda: zlib.compressobj(),
2651 }
2651 }
2652 # also support the old form by courtesies
2652 # also support the old form by courtesies
2653 compressors['UN'] = compressors[None]
2653 compressors['UN'] = compressors[None]
2654
2654
2655 def _makedecompressor(decompcls):
2655 def _makedecompressor(decompcls):
2656 def generator(f):
2656 def generator(f):
2657 d = decompcls()
2657 d = decompcls()
2658 for chunk in filechunkiter(f):
2658 for chunk in filechunkiter(f):
2659 yield d.decompress(chunk)
2659 yield d.decompress(chunk)
2660 def func(fh):
2660 def func(fh):
2661 return chunkbuffer(generator(fh))
2661 return chunkbuffer(generator(fh))
2662 return func
2662 return func
2663
2663
2664 class ctxmanager(object):
2664 class ctxmanager(object):
2665 '''A context manager for use in 'with' blocks to allow multiple
2665 '''A context manager for use in 'with' blocks to allow multiple
2666 contexts to be entered at once. This is both safer and more
2666 contexts to be entered at once. This is both safer and more
2667 flexible than contextlib.nested.
2667 flexible than contextlib.nested.
2668
2668
2669 Once Mercurial supports Python 2.7+, this will become mostly
2669 Once Mercurial supports Python 2.7+, this will become mostly
2670 unnecessary.
2670 unnecessary.
2671 '''
2671 '''
2672
2672
2673 def __init__(self, *args):
2673 def __init__(self, *args):
2674 '''Accepts a list of no-argument functions that return context
2674 '''Accepts a list of no-argument functions that return context
2675 managers. These will be invoked at __call__ time.'''
2675 managers. These will be invoked at __call__ time.'''
2676 self._pending = args
2676 self._pending = args
2677 self._atexit = []
2677 self._atexit = []
2678
2678
2679 def __enter__(self):
2679 def __enter__(self):
2680 return self
2680 return self
2681
2681
2682 def enter(self):
2682 def enter(self):
2683 '''Create and enter context managers in the order in which they were
2683 '''Create and enter context managers in the order in which they were
2684 passed to the constructor.'''
2684 passed to the constructor.'''
2685 values = []
2685 values = []
2686 for func in self._pending:
2686 for func in self._pending:
2687 obj = func()
2687 obj = func()
2688 values.append(obj.__enter__())
2688 values.append(obj.__enter__())
2689 self._atexit.append(obj.__exit__)
2689 self._atexit.append(obj.__exit__)
2690 del self._pending
2690 del self._pending
2691 return values
2691 return values
2692
2692
2693 def atexit(self, func, *args, **kwargs):
2693 def atexit(self, func, *args, **kwargs):
2694 '''Add a function to call when this context manager exits. The
2694 '''Add a function to call when this context manager exits. The
2695 ordering of multiple atexit calls is unspecified, save that
2695 ordering of multiple atexit calls is unspecified, save that
2696 they will happen before any __exit__ functions.'''
2696 they will happen before any __exit__ functions.'''
2697 def wrapper(exc_type, exc_val, exc_tb):
2697 def wrapper(exc_type, exc_val, exc_tb):
2698 func(*args, **kwargs)
2698 func(*args, **kwargs)
2699 self._atexit.append(wrapper)
2699 self._atexit.append(wrapper)
2700 return func
2700 return func
2701
2701
2702 def __exit__(self, exc_type, exc_val, exc_tb):
2702 def __exit__(self, exc_type, exc_val, exc_tb):
2703 '''Context managers are exited in the reverse order from which
2703 '''Context managers are exited in the reverse order from which
2704 they were created.'''
2704 they were created.'''
2705 received = exc_type is not None
2705 received = exc_type is not None
2706 suppressed = False
2706 suppressed = False
2707 pending = None
2707 pending = None
2708 self._atexit.reverse()
2708 self._atexit.reverse()
2709 for exitfunc in self._atexit:
2709 for exitfunc in self._atexit:
2710 try:
2710 try:
2711 if exitfunc(exc_type, exc_val, exc_tb):
2711 if exitfunc(exc_type, exc_val, exc_tb):
2712 suppressed = True
2712 suppressed = True
2713 exc_type = None
2713 exc_type = None
2714 exc_val = None
2714 exc_val = None
2715 exc_tb = None
2715 exc_tb = None
2716 except BaseException:
2716 except BaseException:
2717 pending = sys.exc_info()
2717 pending = sys.exc_info()
2718 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2718 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2719 del self._atexit
2719 del self._atexit
2720 if pending:
2720 if pending:
2721 raise exc_val
2721 raise exc_val
2722 return received and suppressed
2722 return received and suppressed
2723
2723
2724 def _bz2():
2724 def _bz2():
2725 d = bz2.BZ2Decompressor()
2725 d = bz2.BZ2Decompressor()
2726 # Bzip2 stream start with BZ, but we stripped it.
2726 # Bzip2 stream start with BZ, but we stripped it.
2727 # we put it back for good measure.
2727 # we put it back for good measure.
2728 d.decompress('BZ')
2728 d.decompress('BZ')
2729 return d
2729 return d
2730
2730
2731 decompressors = {None: lambda fh: fh,
2731 decompressors = {None: lambda fh: fh,
2732 '_truncatedBZ': _makedecompressor(_bz2),
2732 '_truncatedBZ': _makedecompressor(_bz2),
2733 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2733 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2734 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2734 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2735 }
2735 }
2736 # also support the old form by courtesies
2736 # also support the old form by courtesies
2737 decompressors['UN'] = decompressors[None]
2737 decompressors['UN'] = decompressors[None]
2738
2738
2739 # convenient shortcut
2739 # convenient shortcut
2740 dst = debugstacktrace
2740 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now