##// END OF EJS Templates
pycompat: add empty and queue to handle py3 divergence...
timeless -
r28818:6041fb8f default
parent child Browse files
Show More
@@ -0,0 +1,18 b''
1 # pycompat.py - portability shim for python 3
2 #
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
5
6 """Mercurial portability shim for python 3.
7
8 This contains aliases to hide python version-specific details from the core.
9 """
10
11 from __future__ import absolute_import
12
13 try:
14 import Queue as _queue
15 except ImportError:
16 import queue as _queue
17 empty = _queue.Empty
18 queue = _queue.Queue
@@ -1,2735 +1,2742 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import urllib
37 import urllib
38 import zlib
38 import zlib
39
39
40 from . import (
40 from . import (
41 encoding,
41 encoding,
42 error,
42 error,
43 i18n,
43 i18n,
44 osutil,
44 osutil,
45 parsers,
45 parsers,
46 pycompat,
46 )
47 )
47
48
49 for attr in (
50 'empty',
51 'queue',
52 ):
53 globals()[attr] = getattr(pycompat, attr)
54
48 if os.name == 'nt':
55 if os.name == 'nt':
49 from . import windows as platform
56 from . import windows as platform
50 else:
57 else:
51 from . import posix as platform
58 from . import posix as platform
52
59
53 md5 = hashlib.md5
60 md5 = hashlib.md5
54 sha1 = hashlib.sha1
61 sha1 = hashlib.sha1
55 sha512 = hashlib.sha512
62 sha512 = hashlib.sha512
56 _ = i18n._
63 _ = i18n._
57
64
58 cachestat = platform.cachestat
65 cachestat = platform.cachestat
59 checkexec = platform.checkexec
66 checkexec = platform.checkexec
60 checklink = platform.checklink
67 checklink = platform.checklink
61 copymode = platform.copymode
68 copymode = platform.copymode
62 executablepath = platform.executablepath
69 executablepath = platform.executablepath
63 expandglobs = platform.expandglobs
70 expandglobs = platform.expandglobs
64 explainexit = platform.explainexit
71 explainexit = platform.explainexit
65 findexe = platform.findexe
72 findexe = platform.findexe
66 gethgcmd = platform.gethgcmd
73 gethgcmd = platform.gethgcmd
67 getuser = platform.getuser
74 getuser = platform.getuser
68 getpid = os.getpid
75 getpid = os.getpid
69 groupmembers = platform.groupmembers
76 groupmembers = platform.groupmembers
70 groupname = platform.groupname
77 groupname = platform.groupname
71 hidewindow = platform.hidewindow
78 hidewindow = platform.hidewindow
72 isexec = platform.isexec
79 isexec = platform.isexec
73 isowner = platform.isowner
80 isowner = platform.isowner
74 localpath = platform.localpath
81 localpath = platform.localpath
75 lookupreg = platform.lookupreg
82 lookupreg = platform.lookupreg
76 makedir = platform.makedir
83 makedir = platform.makedir
77 nlinks = platform.nlinks
84 nlinks = platform.nlinks
78 normpath = platform.normpath
85 normpath = platform.normpath
79 normcase = platform.normcase
86 normcase = platform.normcase
80 normcasespec = platform.normcasespec
87 normcasespec = platform.normcasespec
81 normcasefallback = platform.normcasefallback
88 normcasefallback = platform.normcasefallback
82 openhardlinks = platform.openhardlinks
89 openhardlinks = platform.openhardlinks
83 oslink = platform.oslink
90 oslink = platform.oslink
84 parsepatchoutput = platform.parsepatchoutput
91 parsepatchoutput = platform.parsepatchoutput
85 pconvert = platform.pconvert
92 pconvert = platform.pconvert
86 poll = platform.poll
93 poll = platform.poll
87 popen = platform.popen
94 popen = platform.popen
88 posixfile = platform.posixfile
95 posixfile = platform.posixfile
89 quotecommand = platform.quotecommand
96 quotecommand = platform.quotecommand
90 readpipe = platform.readpipe
97 readpipe = platform.readpipe
91 rename = platform.rename
98 rename = platform.rename
92 removedirs = platform.removedirs
99 removedirs = platform.removedirs
93 samedevice = platform.samedevice
100 samedevice = platform.samedevice
94 samefile = platform.samefile
101 samefile = platform.samefile
95 samestat = platform.samestat
102 samestat = platform.samestat
96 setbinary = platform.setbinary
103 setbinary = platform.setbinary
97 setflags = platform.setflags
104 setflags = platform.setflags
98 setsignalhandler = platform.setsignalhandler
105 setsignalhandler = platform.setsignalhandler
99 shellquote = platform.shellquote
106 shellquote = platform.shellquote
100 spawndetached = platform.spawndetached
107 spawndetached = platform.spawndetached
101 split = platform.split
108 split = platform.split
102 sshargs = platform.sshargs
109 sshargs = platform.sshargs
103 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
110 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
104 statisexec = platform.statisexec
111 statisexec = platform.statisexec
105 statislink = platform.statislink
112 statislink = platform.statislink
106 termwidth = platform.termwidth
113 termwidth = platform.termwidth
107 testpid = platform.testpid
114 testpid = platform.testpid
108 umask = platform.umask
115 umask = platform.umask
109 unlink = platform.unlink
116 unlink = platform.unlink
110 unlinkpath = platform.unlinkpath
117 unlinkpath = platform.unlinkpath
111 username = platform.username
118 username = platform.username
112
119
113 # Python compatibility
120 # Python compatibility
114
121
115 _notset = object()
122 _notset = object()
116
123
117 # disable Python's problematic floating point timestamps (issue4836)
124 # disable Python's problematic floating point timestamps (issue4836)
118 # (Python hypocritically says you shouldn't change this behavior in
125 # (Python hypocritically says you shouldn't change this behavior in
119 # libraries, and sure enough Mercurial is not a library.)
126 # libraries, and sure enough Mercurial is not a library.)
120 os.stat_float_times(False)
127 os.stat_float_times(False)
121
128
122 def safehasattr(thing, attr):
129 def safehasattr(thing, attr):
123 return getattr(thing, attr, _notset) is not _notset
130 return getattr(thing, attr, _notset) is not _notset
124
131
125 DIGESTS = {
132 DIGESTS = {
126 'md5': md5,
133 'md5': md5,
127 'sha1': sha1,
134 'sha1': sha1,
128 'sha512': sha512,
135 'sha512': sha512,
129 }
136 }
130 # List of digest types from strongest to weakest
137 # List of digest types from strongest to weakest
131 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
138 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
132
139
133 for k in DIGESTS_BY_STRENGTH:
140 for k in DIGESTS_BY_STRENGTH:
134 assert k in DIGESTS
141 assert k in DIGESTS
135
142
136 class digester(object):
143 class digester(object):
137 """helper to compute digests.
144 """helper to compute digests.
138
145
139 This helper can be used to compute one or more digests given their name.
146 This helper can be used to compute one or more digests given their name.
140
147
141 >>> d = digester(['md5', 'sha1'])
148 >>> d = digester(['md5', 'sha1'])
142 >>> d.update('foo')
149 >>> d.update('foo')
143 >>> [k for k in sorted(d)]
150 >>> [k for k in sorted(d)]
144 ['md5', 'sha1']
151 ['md5', 'sha1']
145 >>> d['md5']
152 >>> d['md5']
146 'acbd18db4cc2f85cedef654fccc4a4d8'
153 'acbd18db4cc2f85cedef654fccc4a4d8'
147 >>> d['sha1']
154 >>> d['sha1']
148 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
155 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
149 >>> digester.preferred(['md5', 'sha1'])
156 >>> digester.preferred(['md5', 'sha1'])
150 'sha1'
157 'sha1'
151 """
158 """
152
159
153 def __init__(self, digests, s=''):
160 def __init__(self, digests, s=''):
154 self._hashes = {}
161 self._hashes = {}
155 for k in digests:
162 for k in digests:
156 if k not in DIGESTS:
163 if k not in DIGESTS:
157 raise Abort(_('unknown digest type: %s') % k)
164 raise Abort(_('unknown digest type: %s') % k)
158 self._hashes[k] = DIGESTS[k]()
165 self._hashes[k] = DIGESTS[k]()
159 if s:
166 if s:
160 self.update(s)
167 self.update(s)
161
168
162 def update(self, data):
169 def update(self, data):
163 for h in self._hashes.values():
170 for h in self._hashes.values():
164 h.update(data)
171 h.update(data)
165
172
166 def __getitem__(self, key):
173 def __getitem__(self, key):
167 if key not in DIGESTS:
174 if key not in DIGESTS:
168 raise Abort(_('unknown digest type: %s') % k)
175 raise Abort(_('unknown digest type: %s') % k)
169 return self._hashes[key].hexdigest()
176 return self._hashes[key].hexdigest()
170
177
171 def __iter__(self):
178 def __iter__(self):
172 return iter(self._hashes)
179 return iter(self._hashes)
173
180
174 @staticmethod
181 @staticmethod
175 def preferred(supported):
182 def preferred(supported):
176 """returns the strongest digest type in both supported and DIGESTS."""
183 """returns the strongest digest type in both supported and DIGESTS."""
177
184
178 for k in DIGESTS_BY_STRENGTH:
185 for k in DIGESTS_BY_STRENGTH:
179 if k in supported:
186 if k in supported:
180 return k
187 return k
181 return None
188 return None
182
189
183 class digestchecker(object):
190 class digestchecker(object):
184 """file handle wrapper that additionally checks content against a given
191 """file handle wrapper that additionally checks content against a given
185 size and digests.
192 size and digests.
186
193
187 d = digestchecker(fh, size, {'md5': '...'})
194 d = digestchecker(fh, size, {'md5': '...'})
188
195
189 When multiple digests are given, all of them are validated.
196 When multiple digests are given, all of them are validated.
190 """
197 """
191
198
192 def __init__(self, fh, size, digests):
199 def __init__(self, fh, size, digests):
193 self._fh = fh
200 self._fh = fh
194 self._size = size
201 self._size = size
195 self._got = 0
202 self._got = 0
196 self._digests = dict(digests)
203 self._digests = dict(digests)
197 self._digester = digester(self._digests.keys())
204 self._digester = digester(self._digests.keys())
198
205
199 def read(self, length=-1):
206 def read(self, length=-1):
200 content = self._fh.read(length)
207 content = self._fh.read(length)
201 self._digester.update(content)
208 self._digester.update(content)
202 self._got += len(content)
209 self._got += len(content)
203 return content
210 return content
204
211
205 def validate(self):
212 def validate(self):
206 if self._size != self._got:
213 if self._size != self._got:
207 raise Abort(_('size mismatch: expected %d, got %d') %
214 raise Abort(_('size mismatch: expected %d, got %d') %
208 (self._size, self._got))
215 (self._size, self._got))
209 for k, v in self._digests.items():
216 for k, v in self._digests.items():
210 if v != self._digester[k]:
217 if v != self._digester[k]:
211 # i18n: first parameter is a digest name
218 # i18n: first parameter is a digest name
212 raise Abort(_('%s mismatch: expected %s, got %s') %
219 raise Abort(_('%s mismatch: expected %s, got %s') %
213 (k, v, self._digester[k]))
220 (k, v, self._digester[k]))
214
221
215 try:
222 try:
216 buffer = buffer
223 buffer = buffer
217 except NameError:
224 except NameError:
218 if sys.version_info[0] < 3:
225 if sys.version_info[0] < 3:
219 def buffer(sliceable, offset=0):
226 def buffer(sliceable, offset=0):
220 return sliceable[offset:]
227 return sliceable[offset:]
221 else:
228 else:
222 def buffer(sliceable, offset=0):
229 def buffer(sliceable, offset=0):
223 return memoryview(sliceable)[offset:]
230 return memoryview(sliceable)[offset:]
224
231
225 closefds = os.name == 'posix'
232 closefds = os.name == 'posix'
226
233
227 _chunksize = 4096
234 _chunksize = 4096
228
235
229 class bufferedinputpipe(object):
236 class bufferedinputpipe(object):
230 """a manually buffered input pipe
237 """a manually buffered input pipe
231
238
232 Python will not let us use buffered IO and lazy reading with 'polling' at
239 Python will not let us use buffered IO and lazy reading with 'polling' at
233 the same time. We cannot probe the buffer state and select will not detect
240 the same time. We cannot probe the buffer state and select will not detect
234 that data are ready to read if they are already buffered.
241 that data are ready to read if they are already buffered.
235
242
236 This class let us work around that by implementing its own buffering
243 This class let us work around that by implementing its own buffering
237 (allowing efficient readline) while offering a way to know if the buffer is
244 (allowing efficient readline) while offering a way to know if the buffer is
238 empty from the output (allowing collaboration of the buffer with polling).
245 empty from the output (allowing collaboration of the buffer with polling).
239
246
240 This class lives in the 'util' module because it makes use of the 'os'
247 This class lives in the 'util' module because it makes use of the 'os'
241 module from the python stdlib.
248 module from the python stdlib.
242 """
249 """
243
250
244 def __init__(self, input):
251 def __init__(self, input):
245 self._input = input
252 self._input = input
246 self._buffer = []
253 self._buffer = []
247 self._eof = False
254 self._eof = False
248 self._lenbuf = 0
255 self._lenbuf = 0
249
256
250 @property
257 @property
251 def hasbuffer(self):
258 def hasbuffer(self):
252 """True is any data is currently buffered
259 """True is any data is currently buffered
253
260
254 This will be used externally a pre-step for polling IO. If there is
261 This will be used externally a pre-step for polling IO. If there is
255 already data then no polling should be set in place."""
262 already data then no polling should be set in place."""
256 return bool(self._buffer)
263 return bool(self._buffer)
257
264
258 @property
265 @property
259 def closed(self):
266 def closed(self):
260 return self._input.closed
267 return self._input.closed
261
268
262 def fileno(self):
269 def fileno(self):
263 return self._input.fileno()
270 return self._input.fileno()
264
271
265 def close(self):
272 def close(self):
266 return self._input.close()
273 return self._input.close()
267
274
268 def read(self, size):
275 def read(self, size):
269 while (not self._eof) and (self._lenbuf < size):
276 while (not self._eof) and (self._lenbuf < size):
270 self._fillbuffer()
277 self._fillbuffer()
271 return self._frombuffer(size)
278 return self._frombuffer(size)
272
279
273 def readline(self, *args, **kwargs):
280 def readline(self, *args, **kwargs):
274 if 1 < len(self._buffer):
281 if 1 < len(self._buffer):
275 # this should not happen because both read and readline end with a
282 # this should not happen because both read and readline end with a
276 # _frombuffer call that collapse it.
283 # _frombuffer call that collapse it.
277 self._buffer = [''.join(self._buffer)]
284 self._buffer = [''.join(self._buffer)]
278 self._lenbuf = len(self._buffer[0])
285 self._lenbuf = len(self._buffer[0])
279 lfi = -1
286 lfi = -1
280 if self._buffer:
287 if self._buffer:
281 lfi = self._buffer[-1].find('\n')
288 lfi = self._buffer[-1].find('\n')
282 while (not self._eof) and lfi < 0:
289 while (not self._eof) and lfi < 0:
283 self._fillbuffer()
290 self._fillbuffer()
284 if self._buffer:
291 if self._buffer:
285 lfi = self._buffer[-1].find('\n')
292 lfi = self._buffer[-1].find('\n')
286 size = lfi + 1
293 size = lfi + 1
287 if lfi < 0: # end of file
294 if lfi < 0: # end of file
288 size = self._lenbuf
295 size = self._lenbuf
289 elif 1 < len(self._buffer):
296 elif 1 < len(self._buffer):
290 # we need to take previous chunks into account
297 # we need to take previous chunks into account
291 size += self._lenbuf - len(self._buffer[-1])
298 size += self._lenbuf - len(self._buffer[-1])
292 return self._frombuffer(size)
299 return self._frombuffer(size)
293
300
294 def _frombuffer(self, size):
301 def _frombuffer(self, size):
295 """return at most 'size' data from the buffer
302 """return at most 'size' data from the buffer
296
303
297 The data are removed from the buffer."""
304 The data are removed from the buffer."""
298 if size == 0 or not self._buffer:
305 if size == 0 or not self._buffer:
299 return ''
306 return ''
300 buf = self._buffer[0]
307 buf = self._buffer[0]
301 if 1 < len(self._buffer):
308 if 1 < len(self._buffer):
302 buf = ''.join(self._buffer)
309 buf = ''.join(self._buffer)
303
310
304 data = buf[:size]
311 data = buf[:size]
305 buf = buf[len(data):]
312 buf = buf[len(data):]
306 if buf:
313 if buf:
307 self._buffer = [buf]
314 self._buffer = [buf]
308 self._lenbuf = len(buf)
315 self._lenbuf = len(buf)
309 else:
316 else:
310 self._buffer = []
317 self._buffer = []
311 self._lenbuf = 0
318 self._lenbuf = 0
312 return data
319 return data
313
320
314 def _fillbuffer(self):
321 def _fillbuffer(self):
315 """read data to the buffer"""
322 """read data to the buffer"""
316 data = os.read(self._input.fileno(), _chunksize)
323 data = os.read(self._input.fileno(), _chunksize)
317 if not data:
324 if not data:
318 self._eof = True
325 self._eof = True
319 else:
326 else:
320 self._lenbuf += len(data)
327 self._lenbuf += len(data)
321 self._buffer.append(data)
328 self._buffer.append(data)
322
329
323 def popen2(cmd, env=None, newlines=False):
330 def popen2(cmd, env=None, newlines=False):
324 # Setting bufsize to -1 lets the system decide the buffer size.
331 # Setting bufsize to -1 lets the system decide the buffer size.
325 # The default for bufsize is 0, meaning unbuffered. This leads to
332 # The default for bufsize is 0, meaning unbuffered. This leads to
326 # poor performance on Mac OS X: http://bugs.python.org/issue4194
333 # poor performance on Mac OS X: http://bugs.python.org/issue4194
327 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
334 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
328 close_fds=closefds,
335 close_fds=closefds,
329 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
336 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
330 universal_newlines=newlines,
337 universal_newlines=newlines,
331 env=env)
338 env=env)
332 return p.stdin, p.stdout
339 return p.stdin, p.stdout
333
340
334 def popen3(cmd, env=None, newlines=False):
341 def popen3(cmd, env=None, newlines=False):
335 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
342 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
336 return stdin, stdout, stderr
343 return stdin, stdout, stderr
337
344
338 def popen4(cmd, env=None, newlines=False, bufsize=-1):
345 def popen4(cmd, env=None, newlines=False, bufsize=-1):
339 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
346 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
340 close_fds=closefds,
347 close_fds=closefds,
341 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
348 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
342 stderr=subprocess.PIPE,
349 stderr=subprocess.PIPE,
343 universal_newlines=newlines,
350 universal_newlines=newlines,
344 env=env)
351 env=env)
345 return p.stdin, p.stdout, p.stderr, p
352 return p.stdin, p.stdout, p.stderr, p
346
353
347 def version():
354 def version():
348 """Return version information if available."""
355 """Return version information if available."""
349 try:
356 try:
350 from . import __version__
357 from . import __version__
351 return __version__.version
358 return __version__.version
352 except ImportError:
359 except ImportError:
353 return 'unknown'
360 return 'unknown'
354
361
355 def versiontuple(v=None, n=4):
362 def versiontuple(v=None, n=4):
356 """Parses a Mercurial version string into an N-tuple.
363 """Parses a Mercurial version string into an N-tuple.
357
364
358 The version string to be parsed is specified with the ``v`` argument.
365 The version string to be parsed is specified with the ``v`` argument.
359 If it isn't defined, the current Mercurial version string will be parsed.
366 If it isn't defined, the current Mercurial version string will be parsed.
360
367
361 ``n`` can be 2, 3, or 4. Here is how some version strings map to
368 ``n`` can be 2, 3, or 4. Here is how some version strings map to
362 returned values:
369 returned values:
363
370
364 >>> v = '3.6.1+190-df9b73d2d444'
371 >>> v = '3.6.1+190-df9b73d2d444'
365 >>> versiontuple(v, 2)
372 >>> versiontuple(v, 2)
366 (3, 6)
373 (3, 6)
367 >>> versiontuple(v, 3)
374 >>> versiontuple(v, 3)
368 (3, 6, 1)
375 (3, 6, 1)
369 >>> versiontuple(v, 4)
376 >>> versiontuple(v, 4)
370 (3, 6, 1, '190-df9b73d2d444')
377 (3, 6, 1, '190-df9b73d2d444')
371
378
372 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
379 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
373 (3, 6, 1, '190-df9b73d2d444+20151118')
380 (3, 6, 1, '190-df9b73d2d444+20151118')
374
381
375 >>> v = '3.6'
382 >>> v = '3.6'
376 >>> versiontuple(v, 2)
383 >>> versiontuple(v, 2)
377 (3, 6)
384 (3, 6)
378 >>> versiontuple(v, 3)
385 >>> versiontuple(v, 3)
379 (3, 6, None)
386 (3, 6, None)
380 >>> versiontuple(v, 4)
387 >>> versiontuple(v, 4)
381 (3, 6, None, None)
388 (3, 6, None, None)
382 """
389 """
383 if not v:
390 if not v:
384 v = version()
391 v = version()
385 parts = v.split('+', 1)
392 parts = v.split('+', 1)
386 if len(parts) == 1:
393 if len(parts) == 1:
387 vparts, extra = parts[0], None
394 vparts, extra = parts[0], None
388 else:
395 else:
389 vparts, extra = parts
396 vparts, extra = parts
390
397
391 vints = []
398 vints = []
392 for i in vparts.split('.'):
399 for i in vparts.split('.'):
393 try:
400 try:
394 vints.append(int(i))
401 vints.append(int(i))
395 except ValueError:
402 except ValueError:
396 break
403 break
397 # (3, 6) -> (3, 6, None)
404 # (3, 6) -> (3, 6, None)
398 while len(vints) < 3:
405 while len(vints) < 3:
399 vints.append(None)
406 vints.append(None)
400
407
401 if n == 2:
408 if n == 2:
402 return (vints[0], vints[1])
409 return (vints[0], vints[1])
403 if n == 3:
410 if n == 3:
404 return (vints[0], vints[1], vints[2])
411 return (vints[0], vints[1], vints[2])
405 if n == 4:
412 if n == 4:
406 return (vints[0], vints[1], vints[2], extra)
413 return (vints[0], vints[1], vints[2], extra)
407
414
408 # used by parsedate
415 # used by parsedate
409 defaultdateformats = (
416 defaultdateformats = (
410 '%Y-%m-%d %H:%M:%S',
417 '%Y-%m-%d %H:%M:%S',
411 '%Y-%m-%d %I:%M:%S%p',
418 '%Y-%m-%d %I:%M:%S%p',
412 '%Y-%m-%d %H:%M',
419 '%Y-%m-%d %H:%M',
413 '%Y-%m-%d %I:%M%p',
420 '%Y-%m-%d %I:%M%p',
414 '%Y-%m-%d',
421 '%Y-%m-%d',
415 '%m-%d',
422 '%m-%d',
416 '%m/%d',
423 '%m/%d',
417 '%m/%d/%y',
424 '%m/%d/%y',
418 '%m/%d/%Y',
425 '%m/%d/%Y',
419 '%a %b %d %H:%M:%S %Y',
426 '%a %b %d %H:%M:%S %Y',
420 '%a %b %d %I:%M:%S%p %Y',
427 '%a %b %d %I:%M:%S%p %Y',
421 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
428 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
422 '%b %d %H:%M:%S %Y',
429 '%b %d %H:%M:%S %Y',
423 '%b %d %I:%M:%S%p %Y',
430 '%b %d %I:%M:%S%p %Y',
424 '%b %d %H:%M:%S',
431 '%b %d %H:%M:%S',
425 '%b %d %I:%M:%S%p',
432 '%b %d %I:%M:%S%p',
426 '%b %d %H:%M',
433 '%b %d %H:%M',
427 '%b %d %I:%M%p',
434 '%b %d %I:%M%p',
428 '%b %d %Y',
435 '%b %d %Y',
429 '%b %d',
436 '%b %d',
430 '%H:%M:%S',
437 '%H:%M:%S',
431 '%I:%M:%S%p',
438 '%I:%M:%S%p',
432 '%H:%M',
439 '%H:%M',
433 '%I:%M%p',
440 '%I:%M%p',
434 )
441 )
435
442
436 extendeddateformats = defaultdateformats + (
443 extendeddateformats = defaultdateformats + (
437 "%Y",
444 "%Y",
438 "%Y-%m",
445 "%Y-%m",
439 "%b",
446 "%b",
440 "%b %Y",
447 "%b %Y",
441 )
448 )
442
449
443 def cachefunc(func):
450 def cachefunc(func):
444 '''cache the result of function calls'''
451 '''cache the result of function calls'''
445 # XXX doesn't handle keywords args
452 # XXX doesn't handle keywords args
446 if func.func_code.co_argcount == 0:
453 if func.func_code.co_argcount == 0:
447 cache = []
454 cache = []
448 def f():
455 def f():
449 if len(cache) == 0:
456 if len(cache) == 0:
450 cache.append(func())
457 cache.append(func())
451 return cache[0]
458 return cache[0]
452 return f
459 return f
453 cache = {}
460 cache = {}
454 if func.func_code.co_argcount == 1:
461 if func.func_code.co_argcount == 1:
455 # we gain a small amount of time because
462 # we gain a small amount of time because
456 # we don't need to pack/unpack the list
463 # we don't need to pack/unpack the list
457 def f(arg):
464 def f(arg):
458 if arg not in cache:
465 if arg not in cache:
459 cache[arg] = func(arg)
466 cache[arg] = func(arg)
460 return cache[arg]
467 return cache[arg]
461 else:
468 else:
462 def f(*args):
469 def f(*args):
463 if args not in cache:
470 if args not in cache:
464 cache[args] = func(*args)
471 cache[args] = func(*args)
465 return cache[args]
472 return cache[args]
466
473
467 return f
474 return f
468
475
469 class sortdict(dict):
476 class sortdict(dict):
470 '''a simple sorted dictionary'''
477 '''a simple sorted dictionary'''
471 def __init__(self, data=None):
478 def __init__(self, data=None):
472 self._list = []
479 self._list = []
473 if data:
480 if data:
474 self.update(data)
481 self.update(data)
475 def copy(self):
482 def copy(self):
476 return sortdict(self)
483 return sortdict(self)
477 def __setitem__(self, key, val):
484 def __setitem__(self, key, val):
478 if key in self:
485 if key in self:
479 self._list.remove(key)
486 self._list.remove(key)
480 self._list.append(key)
487 self._list.append(key)
481 dict.__setitem__(self, key, val)
488 dict.__setitem__(self, key, val)
482 def __iter__(self):
489 def __iter__(self):
483 return self._list.__iter__()
490 return self._list.__iter__()
484 def update(self, src):
491 def update(self, src):
485 if isinstance(src, dict):
492 if isinstance(src, dict):
486 src = src.iteritems()
493 src = src.iteritems()
487 for k, v in src:
494 for k, v in src:
488 self[k] = v
495 self[k] = v
489 def clear(self):
496 def clear(self):
490 dict.clear(self)
497 dict.clear(self)
491 self._list = []
498 self._list = []
492 def items(self):
499 def items(self):
493 return [(k, self[k]) for k in self._list]
500 return [(k, self[k]) for k in self._list]
494 def __delitem__(self, key):
501 def __delitem__(self, key):
495 dict.__delitem__(self, key)
502 dict.__delitem__(self, key)
496 self._list.remove(key)
503 self._list.remove(key)
497 def pop(self, key, *args, **kwargs):
504 def pop(self, key, *args, **kwargs):
498 dict.pop(self, key, *args, **kwargs)
505 dict.pop(self, key, *args, **kwargs)
499 try:
506 try:
500 self._list.remove(key)
507 self._list.remove(key)
501 except ValueError:
508 except ValueError:
502 pass
509 pass
503 def keys(self):
510 def keys(self):
504 return self._list
511 return self._list
505 def iterkeys(self):
512 def iterkeys(self):
506 return self._list.__iter__()
513 return self._list.__iter__()
507 def iteritems(self):
514 def iteritems(self):
508 for k in self._list:
515 for k in self._list:
509 yield k, self[k]
516 yield k, self[k]
510 def insert(self, index, key, val):
517 def insert(self, index, key, val):
511 self._list.insert(index, key)
518 self._list.insert(index, key)
512 dict.__setitem__(self, key, val)
519 dict.__setitem__(self, key, val)
513
520
514 class _lrucachenode(object):
521 class _lrucachenode(object):
515 """A node in a doubly linked list.
522 """A node in a doubly linked list.
516
523
517 Holds a reference to nodes on either side as well as a key-value
524 Holds a reference to nodes on either side as well as a key-value
518 pair for the dictionary entry.
525 pair for the dictionary entry.
519 """
526 """
520 __slots__ = ('next', 'prev', 'key', 'value')
527 __slots__ = ('next', 'prev', 'key', 'value')
521
528
522 def __init__(self):
529 def __init__(self):
523 self.next = None
530 self.next = None
524 self.prev = None
531 self.prev = None
525
532
526 self.key = _notset
533 self.key = _notset
527 self.value = None
534 self.value = None
528
535
529 def markempty(self):
536 def markempty(self):
530 """Mark the node as emptied."""
537 """Mark the node as emptied."""
531 self.key = _notset
538 self.key = _notset
532
539
533 class lrucachedict(object):
540 class lrucachedict(object):
534 """Dict that caches most recent accesses and sets.
541 """Dict that caches most recent accesses and sets.
535
542
536 The dict consists of an actual backing dict - indexed by original
543 The dict consists of an actual backing dict - indexed by original
537 key - and a doubly linked circular list defining the order of entries in
544 key - and a doubly linked circular list defining the order of entries in
538 the cache.
545 the cache.
539
546
540 The head node is the newest entry in the cache. If the cache is full,
547 The head node is the newest entry in the cache. If the cache is full,
541 we recycle head.prev and make it the new head. Cache accesses result in
548 we recycle head.prev and make it the new head. Cache accesses result in
542 the node being moved to before the existing head and being marked as the
549 the node being moved to before the existing head and being marked as the
543 new head node.
550 new head node.
544 """
551 """
545 def __init__(self, max):
552 def __init__(self, max):
546 self._cache = {}
553 self._cache = {}
547
554
548 self._head = head = _lrucachenode()
555 self._head = head = _lrucachenode()
549 head.prev = head
556 head.prev = head
550 head.next = head
557 head.next = head
551 self._size = 1
558 self._size = 1
552 self._capacity = max
559 self._capacity = max
553
560
554 def __len__(self):
561 def __len__(self):
555 return len(self._cache)
562 return len(self._cache)
556
563
557 def __contains__(self, k):
564 def __contains__(self, k):
558 return k in self._cache
565 return k in self._cache
559
566
560 def __iter__(self):
567 def __iter__(self):
561 # We don't have to iterate in cache order, but why not.
568 # We don't have to iterate in cache order, but why not.
562 n = self._head
569 n = self._head
563 for i in range(len(self._cache)):
570 for i in range(len(self._cache)):
564 yield n.key
571 yield n.key
565 n = n.next
572 n = n.next
566
573
567 def __getitem__(self, k):
574 def __getitem__(self, k):
568 node = self._cache[k]
575 node = self._cache[k]
569 self._movetohead(node)
576 self._movetohead(node)
570 return node.value
577 return node.value
571
578
572 def __setitem__(self, k, v):
579 def __setitem__(self, k, v):
573 node = self._cache.get(k)
580 node = self._cache.get(k)
574 # Replace existing value and mark as newest.
581 # Replace existing value and mark as newest.
575 if node is not None:
582 if node is not None:
576 node.value = v
583 node.value = v
577 self._movetohead(node)
584 self._movetohead(node)
578 return
585 return
579
586
580 if self._size < self._capacity:
587 if self._size < self._capacity:
581 node = self._addcapacity()
588 node = self._addcapacity()
582 else:
589 else:
583 # Grab the last/oldest item.
590 # Grab the last/oldest item.
584 node = self._head.prev
591 node = self._head.prev
585
592
586 # At capacity. Kill the old entry.
593 # At capacity. Kill the old entry.
587 if node.key is not _notset:
594 if node.key is not _notset:
588 del self._cache[node.key]
595 del self._cache[node.key]
589
596
590 node.key = k
597 node.key = k
591 node.value = v
598 node.value = v
592 self._cache[k] = node
599 self._cache[k] = node
593 # And mark it as newest entry. No need to adjust order since it
600 # And mark it as newest entry. No need to adjust order since it
594 # is already self._head.prev.
601 # is already self._head.prev.
595 self._head = node
602 self._head = node
596
603
597 def __delitem__(self, k):
604 def __delitem__(self, k):
598 node = self._cache.pop(k)
605 node = self._cache.pop(k)
599 node.markempty()
606 node.markempty()
600
607
601 # Temporarily mark as newest item before re-adjusting head to make
608 # Temporarily mark as newest item before re-adjusting head to make
602 # this node the oldest item.
609 # this node the oldest item.
603 self._movetohead(node)
610 self._movetohead(node)
604 self._head = node.next
611 self._head = node.next
605
612
606 # Additional dict methods.
613 # Additional dict methods.
607
614
608 def get(self, k, default=None):
615 def get(self, k, default=None):
609 try:
616 try:
610 return self._cache[k]
617 return self._cache[k]
611 except KeyError:
618 except KeyError:
612 return default
619 return default
613
620
614 def clear(self):
621 def clear(self):
615 n = self._head
622 n = self._head
616 while n.key is not _notset:
623 while n.key is not _notset:
617 n.markempty()
624 n.markempty()
618 n = n.next
625 n = n.next
619
626
620 self._cache.clear()
627 self._cache.clear()
621
628
622 def copy(self):
629 def copy(self):
623 result = lrucachedict(self._capacity)
630 result = lrucachedict(self._capacity)
624 n = self._head.prev
631 n = self._head.prev
625 # Iterate in oldest-to-newest order, so the copy has the right ordering
632 # Iterate in oldest-to-newest order, so the copy has the right ordering
626 for i in range(len(self._cache)):
633 for i in range(len(self._cache)):
627 result[n.key] = n.value
634 result[n.key] = n.value
628 n = n.prev
635 n = n.prev
629 return result
636 return result
630
637
631 def _movetohead(self, node):
638 def _movetohead(self, node):
632 """Mark a node as the newest, making it the new head.
639 """Mark a node as the newest, making it the new head.
633
640
634 When a node is accessed, it becomes the freshest entry in the LRU
641 When a node is accessed, it becomes the freshest entry in the LRU
635 list, which is denoted by self._head.
642 list, which is denoted by self._head.
636
643
637 Visually, let's make ``N`` the new head node (* denotes head):
644 Visually, let's make ``N`` the new head node (* denotes head):
638
645
639 previous/oldest <-> head <-> next/next newest
646 previous/oldest <-> head <-> next/next newest
640
647
641 ----<->--- A* ---<->-----
648 ----<->--- A* ---<->-----
642 | |
649 | |
643 E <-> D <-> N <-> C <-> B
650 E <-> D <-> N <-> C <-> B
644
651
645 To:
652 To:
646
653
647 ----<->--- N* ---<->-----
654 ----<->--- N* ---<->-----
648 | |
655 | |
649 E <-> D <-> C <-> B <-> A
656 E <-> D <-> C <-> B <-> A
650
657
651 This requires the following moves:
658 This requires the following moves:
652
659
653 C.next = D (node.prev.next = node.next)
660 C.next = D (node.prev.next = node.next)
654 D.prev = C (node.next.prev = node.prev)
661 D.prev = C (node.next.prev = node.prev)
655 E.next = N (head.prev.next = node)
662 E.next = N (head.prev.next = node)
656 N.prev = E (node.prev = head.prev)
663 N.prev = E (node.prev = head.prev)
657 N.next = A (node.next = head)
664 N.next = A (node.next = head)
658 A.prev = N (head.prev = node)
665 A.prev = N (head.prev = node)
659 """
666 """
660 head = self._head
667 head = self._head
661 # C.next = D
668 # C.next = D
662 node.prev.next = node.next
669 node.prev.next = node.next
663 # D.prev = C
670 # D.prev = C
664 node.next.prev = node.prev
671 node.next.prev = node.prev
665 # N.prev = E
672 # N.prev = E
666 node.prev = head.prev
673 node.prev = head.prev
667 # N.next = A
674 # N.next = A
668 # It is tempting to do just "head" here, however if node is
675 # It is tempting to do just "head" here, however if node is
669 # adjacent to head, this will do bad things.
676 # adjacent to head, this will do bad things.
670 node.next = head.prev.next
677 node.next = head.prev.next
671 # E.next = N
678 # E.next = N
672 node.next.prev = node
679 node.next.prev = node
673 # A.prev = N
680 # A.prev = N
674 node.prev.next = node
681 node.prev.next = node
675
682
676 self._head = node
683 self._head = node
677
684
678 def _addcapacity(self):
685 def _addcapacity(self):
679 """Add a node to the circular linked list.
686 """Add a node to the circular linked list.
680
687
681 The new node is inserted before the head node.
688 The new node is inserted before the head node.
682 """
689 """
683 head = self._head
690 head = self._head
684 node = _lrucachenode()
691 node = _lrucachenode()
685 head.prev.next = node
692 head.prev.next = node
686 node.prev = head.prev
693 node.prev = head.prev
687 node.next = head
694 node.next = head
688 head.prev = node
695 head.prev = node
689 self._size += 1
696 self._size += 1
690 return node
697 return node
691
698
692 def lrucachefunc(func):
699 def lrucachefunc(func):
693 '''cache most recent results of function calls'''
700 '''cache most recent results of function calls'''
694 cache = {}
701 cache = {}
695 order = collections.deque()
702 order = collections.deque()
696 if func.func_code.co_argcount == 1:
703 if func.func_code.co_argcount == 1:
697 def f(arg):
704 def f(arg):
698 if arg not in cache:
705 if arg not in cache:
699 if len(cache) > 20:
706 if len(cache) > 20:
700 del cache[order.popleft()]
707 del cache[order.popleft()]
701 cache[arg] = func(arg)
708 cache[arg] = func(arg)
702 else:
709 else:
703 order.remove(arg)
710 order.remove(arg)
704 order.append(arg)
711 order.append(arg)
705 return cache[arg]
712 return cache[arg]
706 else:
713 else:
707 def f(*args):
714 def f(*args):
708 if args not in cache:
715 if args not in cache:
709 if len(cache) > 20:
716 if len(cache) > 20:
710 del cache[order.popleft()]
717 del cache[order.popleft()]
711 cache[args] = func(*args)
718 cache[args] = func(*args)
712 else:
719 else:
713 order.remove(args)
720 order.remove(args)
714 order.append(args)
721 order.append(args)
715 return cache[args]
722 return cache[args]
716
723
717 return f
724 return f
718
725
719 class propertycache(object):
726 class propertycache(object):
720 def __init__(self, func):
727 def __init__(self, func):
721 self.func = func
728 self.func = func
722 self.name = func.__name__
729 self.name = func.__name__
723 def __get__(self, obj, type=None):
730 def __get__(self, obj, type=None):
724 result = self.func(obj)
731 result = self.func(obj)
725 self.cachevalue(obj, result)
732 self.cachevalue(obj, result)
726 return result
733 return result
727
734
728 def cachevalue(self, obj, value):
735 def cachevalue(self, obj, value):
729 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
736 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
730 obj.__dict__[self.name] = value
737 obj.__dict__[self.name] = value
731
738
732 def pipefilter(s, cmd):
739 def pipefilter(s, cmd):
733 '''filter string S through command CMD, returning its output'''
740 '''filter string S through command CMD, returning its output'''
734 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
741 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
735 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
742 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
736 pout, perr = p.communicate(s)
743 pout, perr = p.communicate(s)
737 return pout
744 return pout
738
745
739 def tempfilter(s, cmd):
746 def tempfilter(s, cmd):
740 '''filter string S through a pair of temporary files with CMD.
747 '''filter string S through a pair of temporary files with CMD.
741 CMD is used as a template to create the real command to be run,
748 CMD is used as a template to create the real command to be run,
742 with the strings INFILE and OUTFILE replaced by the real names of
749 with the strings INFILE and OUTFILE replaced by the real names of
743 the temporary files generated.'''
750 the temporary files generated.'''
744 inname, outname = None, None
751 inname, outname = None, None
745 try:
752 try:
746 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
753 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
747 fp = os.fdopen(infd, 'wb')
754 fp = os.fdopen(infd, 'wb')
748 fp.write(s)
755 fp.write(s)
749 fp.close()
756 fp.close()
750 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
757 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
751 os.close(outfd)
758 os.close(outfd)
752 cmd = cmd.replace('INFILE', inname)
759 cmd = cmd.replace('INFILE', inname)
753 cmd = cmd.replace('OUTFILE', outname)
760 cmd = cmd.replace('OUTFILE', outname)
754 code = os.system(cmd)
761 code = os.system(cmd)
755 if sys.platform == 'OpenVMS' and code & 1:
762 if sys.platform == 'OpenVMS' and code & 1:
756 code = 0
763 code = 0
757 if code:
764 if code:
758 raise Abort(_("command '%s' failed: %s") %
765 raise Abort(_("command '%s' failed: %s") %
759 (cmd, explainexit(code)))
766 (cmd, explainexit(code)))
760 return readfile(outname)
767 return readfile(outname)
761 finally:
768 finally:
762 try:
769 try:
763 if inname:
770 if inname:
764 os.unlink(inname)
771 os.unlink(inname)
765 except OSError:
772 except OSError:
766 pass
773 pass
767 try:
774 try:
768 if outname:
775 if outname:
769 os.unlink(outname)
776 os.unlink(outname)
770 except OSError:
777 except OSError:
771 pass
778 pass
772
779
773 filtertable = {
780 filtertable = {
774 'tempfile:': tempfilter,
781 'tempfile:': tempfilter,
775 'pipe:': pipefilter,
782 'pipe:': pipefilter,
776 }
783 }
777
784
778 def filter(s, cmd):
785 def filter(s, cmd):
779 "filter a string through a command that transforms its input to its output"
786 "filter a string through a command that transforms its input to its output"
780 for name, fn in filtertable.iteritems():
787 for name, fn in filtertable.iteritems():
781 if cmd.startswith(name):
788 if cmd.startswith(name):
782 return fn(s, cmd[len(name):].lstrip())
789 return fn(s, cmd[len(name):].lstrip())
783 return pipefilter(s, cmd)
790 return pipefilter(s, cmd)
784
791
785 def binary(s):
792 def binary(s):
786 """return true if a string is binary data"""
793 """return true if a string is binary data"""
787 return bool(s and '\0' in s)
794 return bool(s and '\0' in s)
788
795
789 def increasingchunks(source, min=1024, max=65536):
796 def increasingchunks(source, min=1024, max=65536):
790 '''return no less than min bytes per chunk while data remains,
797 '''return no less than min bytes per chunk while data remains,
791 doubling min after each chunk until it reaches max'''
798 doubling min after each chunk until it reaches max'''
792 def log2(x):
799 def log2(x):
793 if not x:
800 if not x:
794 return 0
801 return 0
795 i = 0
802 i = 0
796 while x:
803 while x:
797 x >>= 1
804 x >>= 1
798 i += 1
805 i += 1
799 return i - 1
806 return i - 1
800
807
801 buf = []
808 buf = []
802 blen = 0
809 blen = 0
803 for chunk in source:
810 for chunk in source:
804 buf.append(chunk)
811 buf.append(chunk)
805 blen += len(chunk)
812 blen += len(chunk)
806 if blen >= min:
813 if blen >= min:
807 if min < max:
814 if min < max:
808 min = min << 1
815 min = min << 1
809 nmin = 1 << log2(blen)
816 nmin = 1 << log2(blen)
810 if nmin > min:
817 if nmin > min:
811 min = nmin
818 min = nmin
812 if min > max:
819 if min > max:
813 min = max
820 min = max
814 yield ''.join(buf)
821 yield ''.join(buf)
815 blen = 0
822 blen = 0
816 buf = []
823 buf = []
817 if buf:
824 if buf:
818 yield ''.join(buf)
825 yield ''.join(buf)
819
826
820 Abort = error.Abort
827 Abort = error.Abort
821
828
822 def always(fn):
829 def always(fn):
823 return True
830 return True
824
831
825 def never(fn):
832 def never(fn):
826 return False
833 return False
827
834
828 def nogc(func):
835 def nogc(func):
829 """disable garbage collector
836 """disable garbage collector
830
837
831 Python's garbage collector triggers a GC each time a certain number of
838 Python's garbage collector triggers a GC each time a certain number of
832 container objects (the number being defined by gc.get_threshold()) are
839 container objects (the number being defined by gc.get_threshold()) are
833 allocated even when marked not to be tracked by the collector. Tracking has
840 allocated even when marked not to be tracked by the collector. Tracking has
834 no effect on when GCs are triggered, only on what objects the GC looks
841 no effect on when GCs are triggered, only on what objects the GC looks
835 into. As a workaround, disable GC while building complex (huge)
842 into. As a workaround, disable GC while building complex (huge)
836 containers.
843 containers.
837
844
838 This garbage collector issue have been fixed in 2.7.
845 This garbage collector issue have been fixed in 2.7.
839 """
846 """
840 def wrapper(*args, **kwargs):
847 def wrapper(*args, **kwargs):
841 gcenabled = gc.isenabled()
848 gcenabled = gc.isenabled()
842 gc.disable()
849 gc.disable()
843 try:
850 try:
844 return func(*args, **kwargs)
851 return func(*args, **kwargs)
845 finally:
852 finally:
846 if gcenabled:
853 if gcenabled:
847 gc.enable()
854 gc.enable()
848 return wrapper
855 return wrapper
849
856
850 def pathto(root, n1, n2):
857 def pathto(root, n1, n2):
851 '''return the relative path from one place to another.
858 '''return the relative path from one place to another.
852 root should use os.sep to separate directories
859 root should use os.sep to separate directories
853 n1 should use os.sep to separate directories
860 n1 should use os.sep to separate directories
854 n2 should use "/" to separate directories
861 n2 should use "/" to separate directories
855 returns an os.sep-separated path.
862 returns an os.sep-separated path.
856
863
857 If n1 is a relative path, it's assumed it's
864 If n1 is a relative path, it's assumed it's
858 relative to root.
865 relative to root.
859 n2 should always be relative to root.
866 n2 should always be relative to root.
860 '''
867 '''
861 if not n1:
868 if not n1:
862 return localpath(n2)
869 return localpath(n2)
863 if os.path.isabs(n1):
870 if os.path.isabs(n1):
864 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
871 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
865 return os.path.join(root, localpath(n2))
872 return os.path.join(root, localpath(n2))
866 n2 = '/'.join((pconvert(root), n2))
873 n2 = '/'.join((pconvert(root), n2))
867 a, b = splitpath(n1), n2.split('/')
874 a, b = splitpath(n1), n2.split('/')
868 a.reverse()
875 a.reverse()
869 b.reverse()
876 b.reverse()
870 while a and b and a[-1] == b[-1]:
877 while a and b and a[-1] == b[-1]:
871 a.pop()
878 a.pop()
872 b.pop()
879 b.pop()
873 b.reverse()
880 b.reverse()
874 return os.sep.join((['..'] * len(a)) + b) or '.'
881 return os.sep.join((['..'] * len(a)) + b) or '.'
875
882
876 def mainfrozen():
883 def mainfrozen():
877 """return True if we are a frozen executable.
884 """return True if we are a frozen executable.
878
885
879 The code supports py2exe (most common, Windows only) and tools/freeze
886 The code supports py2exe (most common, Windows only) and tools/freeze
880 (portable, not much used).
887 (portable, not much used).
881 """
888 """
882 return (safehasattr(sys, "frozen") or # new py2exe
889 return (safehasattr(sys, "frozen") or # new py2exe
883 safehasattr(sys, "importers") or # old py2exe
890 safehasattr(sys, "importers") or # old py2exe
884 imp.is_frozen("__main__")) # tools/freeze
891 imp.is_frozen("__main__")) # tools/freeze
885
892
886 # the location of data files matching the source code
893 # the location of data files matching the source code
887 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
894 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
888 # executable version (py2exe) doesn't support __file__
895 # executable version (py2exe) doesn't support __file__
889 datapath = os.path.dirname(sys.executable)
896 datapath = os.path.dirname(sys.executable)
890 else:
897 else:
891 datapath = os.path.dirname(__file__)
898 datapath = os.path.dirname(__file__)
892
899
893 i18n.setdatapath(datapath)
900 i18n.setdatapath(datapath)
894
901
895 _hgexecutable = None
902 _hgexecutable = None
896
903
897 def hgexecutable():
904 def hgexecutable():
898 """return location of the 'hg' executable.
905 """return location of the 'hg' executable.
899
906
900 Defaults to $HG or 'hg' in the search path.
907 Defaults to $HG or 'hg' in the search path.
901 """
908 """
902 if _hgexecutable is None:
909 if _hgexecutable is None:
903 hg = os.environ.get('HG')
910 hg = os.environ.get('HG')
904 mainmod = sys.modules['__main__']
911 mainmod = sys.modules['__main__']
905 if hg:
912 if hg:
906 _sethgexecutable(hg)
913 _sethgexecutable(hg)
907 elif mainfrozen():
914 elif mainfrozen():
908 if getattr(sys, 'frozen', None) == 'macosx_app':
915 if getattr(sys, 'frozen', None) == 'macosx_app':
909 # Env variable set by py2app
916 # Env variable set by py2app
910 _sethgexecutable(os.environ['EXECUTABLEPATH'])
917 _sethgexecutable(os.environ['EXECUTABLEPATH'])
911 else:
918 else:
912 _sethgexecutable(sys.executable)
919 _sethgexecutable(sys.executable)
913 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
920 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
914 _sethgexecutable(mainmod.__file__)
921 _sethgexecutable(mainmod.__file__)
915 else:
922 else:
916 exe = findexe('hg') or os.path.basename(sys.argv[0])
923 exe = findexe('hg') or os.path.basename(sys.argv[0])
917 _sethgexecutable(exe)
924 _sethgexecutable(exe)
918 return _hgexecutable
925 return _hgexecutable
919
926
920 def _sethgexecutable(path):
927 def _sethgexecutable(path):
921 """set location of the 'hg' executable"""
928 """set location of the 'hg' executable"""
922 global _hgexecutable
929 global _hgexecutable
923 _hgexecutable = path
930 _hgexecutable = path
924
931
925 def _isstdout(f):
932 def _isstdout(f):
926 fileno = getattr(f, 'fileno', None)
933 fileno = getattr(f, 'fileno', None)
927 return fileno and fileno() == sys.__stdout__.fileno()
934 return fileno and fileno() == sys.__stdout__.fileno()
928
935
929 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
936 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
930 '''enhanced shell command execution.
937 '''enhanced shell command execution.
931 run with environment maybe modified, maybe in different dir.
938 run with environment maybe modified, maybe in different dir.
932
939
933 if command fails and onerr is None, return status, else raise onerr
940 if command fails and onerr is None, return status, else raise onerr
934 object as exception.
941 object as exception.
935
942
936 if out is specified, it is assumed to be a file-like object that has a
943 if out is specified, it is assumed to be a file-like object that has a
937 write() method. stdout and stderr will be redirected to out.'''
944 write() method. stdout and stderr will be redirected to out.'''
938 if environ is None:
945 if environ is None:
939 environ = {}
946 environ = {}
940 try:
947 try:
941 sys.stdout.flush()
948 sys.stdout.flush()
942 except Exception:
949 except Exception:
943 pass
950 pass
944 def py2shell(val):
951 def py2shell(val):
945 'convert python object into string that is useful to shell'
952 'convert python object into string that is useful to shell'
946 if val is None or val is False:
953 if val is None or val is False:
947 return '0'
954 return '0'
948 if val is True:
955 if val is True:
949 return '1'
956 return '1'
950 return str(val)
957 return str(val)
951 origcmd = cmd
958 origcmd = cmd
952 cmd = quotecommand(cmd)
959 cmd = quotecommand(cmd)
953 if sys.platform == 'plan9' and (sys.version_info[0] == 2
960 if sys.platform == 'plan9' and (sys.version_info[0] == 2
954 and sys.version_info[1] < 7):
961 and sys.version_info[1] < 7):
955 # subprocess kludge to work around issues in half-baked Python
962 # subprocess kludge to work around issues in half-baked Python
956 # ports, notably bichued/python:
963 # ports, notably bichued/python:
957 if not cwd is None:
964 if not cwd is None:
958 os.chdir(cwd)
965 os.chdir(cwd)
959 rc = os.system(cmd)
966 rc = os.system(cmd)
960 else:
967 else:
961 env = dict(os.environ)
968 env = dict(os.environ)
962 env.update((k, py2shell(v)) for k, v in environ.iteritems())
969 env.update((k, py2shell(v)) for k, v in environ.iteritems())
963 env['HG'] = hgexecutable()
970 env['HG'] = hgexecutable()
964 if out is None or _isstdout(out):
971 if out is None or _isstdout(out):
965 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
972 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
966 env=env, cwd=cwd)
973 env=env, cwd=cwd)
967 else:
974 else:
968 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
975 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
969 env=env, cwd=cwd, stdout=subprocess.PIPE,
976 env=env, cwd=cwd, stdout=subprocess.PIPE,
970 stderr=subprocess.STDOUT)
977 stderr=subprocess.STDOUT)
971 while True:
978 while True:
972 line = proc.stdout.readline()
979 line = proc.stdout.readline()
973 if not line:
980 if not line:
974 break
981 break
975 out.write(line)
982 out.write(line)
976 proc.wait()
983 proc.wait()
977 rc = proc.returncode
984 rc = proc.returncode
978 if sys.platform == 'OpenVMS' and rc & 1:
985 if sys.platform == 'OpenVMS' and rc & 1:
979 rc = 0
986 rc = 0
980 if rc and onerr:
987 if rc and onerr:
981 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
988 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
982 explainexit(rc)[0])
989 explainexit(rc)[0])
983 if errprefix:
990 if errprefix:
984 errmsg = '%s: %s' % (errprefix, errmsg)
991 errmsg = '%s: %s' % (errprefix, errmsg)
985 raise onerr(errmsg)
992 raise onerr(errmsg)
986 return rc
993 return rc
987
994
988 def checksignature(func):
995 def checksignature(func):
989 '''wrap a function with code to check for calling errors'''
996 '''wrap a function with code to check for calling errors'''
990 def check(*args, **kwargs):
997 def check(*args, **kwargs):
991 try:
998 try:
992 return func(*args, **kwargs)
999 return func(*args, **kwargs)
993 except TypeError:
1000 except TypeError:
994 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1001 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
995 raise error.SignatureError
1002 raise error.SignatureError
996 raise
1003 raise
997
1004
998 return check
1005 return check
999
1006
1000 def copyfile(src, dest, hardlink=False, copystat=False):
1007 def copyfile(src, dest, hardlink=False, copystat=False):
1001 '''copy a file, preserving mode and optionally other stat info like
1008 '''copy a file, preserving mode and optionally other stat info like
1002 atime/mtime'''
1009 atime/mtime'''
1003 if os.path.lexists(dest):
1010 if os.path.lexists(dest):
1004 unlink(dest)
1011 unlink(dest)
1005 # hardlinks are problematic on CIFS, quietly ignore this flag
1012 # hardlinks are problematic on CIFS, quietly ignore this flag
1006 # until we find a way to work around it cleanly (issue4546)
1013 # until we find a way to work around it cleanly (issue4546)
1007 if False and hardlink:
1014 if False and hardlink:
1008 try:
1015 try:
1009 oslink(src, dest)
1016 oslink(src, dest)
1010 return
1017 return
1011 except (IOError, OSError):
1018 except (IOError, OSError):
1012 pass # fall back to normal copy
1019 pass # fall back to normal copy
1013 if os.path.islink(src):
1020 if os.path.islink(src):
1014 os.symlink(os.readlink(src), dest)
1021 os.symlink(os.readlink(src), dest)
1015 # copytime is ignored for symlinks, but in general copytime isn't needed
1022 # copytime is ignored for symlinks, but in general copytime isn't needed
1016 # for them anyway
1023 # for them anyway
1017 else:
1024 else:
1018 try:
1025 try:
1019 shutil.copyfile(src, dest)
1026 shutil.copyfile(src, dest)
1020 if copystat:
1027 if copystat:
1021 # copystat also copies mode
1028 # copystat also copies mode
1022 shutil.copystat(src, dest)
1029 shutil.copystat(src, dest)
1023 else:
1030 else:
1024 shutil.copymode(src, dest)
1031 shutil.copymode(src, dest)
1025 except shutil.Error as inst:
1032 except shutil.Error as inst:
1026 raise Abort(str(inst))
1033 raise Abort(str(inst))
1027
1034
1028 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1035 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1029 """Copy a directory tree using hardlinks if possible."""
1036 """Copy a directory tree using hardlinks if possible."""
1030 num = 0
1037 num = 0
1031
1038
1032 if hardlink is None:
1039 if hardlink is None:
1033 hardlink = (os.stat(src).st_dev ==
1040 hardlink = (os.stat(src).st_dev ==
1034 os.stat(os.path.dirname(dst)).st_dev)
1041 os.stat(os.path.dirname(dst)).st_dev)
1035 if hardlink:
1042 if hardlink:
1036 topic = _('linking')
1043 topic = _('linking')
1037 else:
1044 else:
1038 topic = _('copying')
1045 topic = _('copying')
1039
1046
1040 if os.path.isdir(src):
1047 if os.path.isdir(src):
1041 os.mkdir(dst)
1048 os.mkdir(dst)
1042 for name, kind in osutil.listdir(src):
1049 for name, kind in osutil.listdir(src):
1043 srcname = os.path.join(src, name)
1050 srcname = os.path.join(src, name)
1044 dstname = os.path.join(dst, name)
1051 dstname = os.path.join(dst, name)
1045 def nprog(t, pos):
1052 def nprog(t, pos):
1046 if pos is not None:
1053 if pos is not None:
1047 return progress(t, pos + num)
1054 return progress(t, pos + num)
1048 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1055 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1049 num += n
1056 num += n
1050 else:
1057 else:
1051 if hardlink:
1058 if hardlink:
1052 try:
1059 try:
1053 oslink(src, dst)
1060 oslink(src, dst)
1054 except (IOError, OSError):
1061 except (IOError, OSError):
1055 hardlink = False
1062 hardlink = False
1056 shutil.copy(src, dst)
1063 shutil.copy(src, dst)
1057 else:
1064 else:
1058 shutil.copy(src, dst)
1065 shutil.copy(src, dst)
1059 num += 1
1066 num += 1
1060 progress(topic, num)
1067 progress(topic, num)
1061 progress(topic, None)
1068 progress(topic, None)
1062
1069
1063 return hardlink, num
1070 return hardlink, num
1064
1071
1065 _winreservednames = '''con prn aux nul
1072 _winreservednames = '''con prn aux nul
1066 com1 com2 com3 com4 com5 com6 com7 com8 com9
1073 com1 com2 com3 com4 com5 com6 com7 com8 com9
1067 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1074 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1068 _winreservedchars = ':*?"<>|'
1075 _winreservedchars = ':*?"<>|'
1069 def checkwinfilename(path):
1076 def checkwinfilename(path):
1070 r'''Check that the base-relative path is a valid filename on Windows.
1077 r'''Check that the base-relative path is a valid filename on Windows.
1071 Returns None if the path is ok, or a UI string describing the problem.
1078 Returns None if the path is ok, or a UI string describing the problem.
1072
1079
1073 >>> checkwinfilename("just/a/normal/path")
1080 >>> checkwinfilename("just/a/normal/path")
1074 >>> checkwinfilename("foo/bar/con.xml")
1081 >>> checkwinfilename("foo/bar/con.xml")
1075 "filename contains 'con', which is reserved on Windows"
1082 "filename contains 'con', which is reserved on Windows"
1076 >>> checkwinfilename("foo/con.xml/bar")
1083 >>> checkwinfilename("foo/con.xml/bar")
1077 "filename contains 'con', which is reserved on Windows"
1084 "filename contains 'con', which is reserved on Windows"
1078 >>> checkwinfilename("foo/bar/xml.con")
1085 >>> checkwinfilename("foo/bar/xml.con")
1079 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1086 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1080 "filename contains 'AUX', which is reserved on Windows"
1087 "filename contains 'AUX', which is reserved on Windows"
1081 >>> checkwinfilename("foo/bar/bla:.txt")
1088 >>> checkwinfilename("foo/bar/bla:.txt")
1082 "filename contains ':', which is reserved on Windows"
1089 "filename contains ':', which is reserved on Windows"
1083 >>> checkwinfilename("foo/bar/b\07la.txt")
1090 >>> checkwinfilename("foo/bar/b\07la.txt")
1084 "filename contains '\\x07', which is invalid on Windows"
1091 "filename contains '\\x07', which is invalid on Windows"
1085 >>> checkwinfilename("foo/bar/bla ")
1092 >>> checkwinfilename("foo/bar/bla ")
1086 "filename ends with ' ', which is not allowed on Windows"
1093 "filename ends with ' ', which is not allowed on Windows"
1087 >>> checkwinfilename("../bar")
1094 >>> checkwinfilename("../bar")
1088 >>> checkwinfilename("foo\\")
1095 >>> checkwinfilename("foo\\")
1089 "filename ends with '\\', which is invalid on Windows"
1096 "filename ends with '\\', which is invalid on Windows"
1090 >>> checkwinfilename("foo\\/bar")
1097 >>> checkwinfilename("foo\\/bar")
1091 "directory name ends with '\\', which is invalid on Windows"
1098 "directory name ends with '\\', which is invalid on Windows"
1092 '''
1099 '''
1093 if path.endswith('\\'):
1100 if path.endswith('\\'):
1094 return _("filename ends with '\\', which is invalid on Windows")
1101 return _("filename ends with '\\', which is invalid on Windows")
1095 if '\\/' in path:
1102 if '\\/' in path:
1096 return _("directory name ends with '\\', which is invalid on Windows")
1103 return _("directory name ends with '\\', which is invalid on Windows")
1097 for n in path.replace('\\', '/').split('/'):
1104 for n in path.replace('\\', '/').split('/'):
1098 if not n:
1105 if not n:
1099 continue
1106 continue
1100 for c in n:
1107 for c in n:
1101 if c in _winreservedchars:
1108 if c in _winreservedchars:
1102 return _("filename contains '%s', which is reserved "
1109 return _("filename contains '%s', which is reserved "
1103 "on Windows") % c
1110 "on Windows") % c
1104 if ord(c) <= 31:
1111 if ord(c) <= 31:
1105 return _("filename contains %r, which is invalid "
1112 return _("filename contains %r, which is invalid "
1106 "on Windows") % c
1113 "on Windows") % c
1107 base = n.split('.')[0]
1114 base = n.split('.')[0]
1108 if base and base.lower() in _winreservednames:
1115 if base and base.lower() in _winreservednames:
1109 return _("filename contains '%s', which is reserved "
1116 return _("filename contains '%s', which is reserved "
1110 "on Windows") % base
1117 "on Windows") % base
1111 t = n[-1]
1118 t = n[-1]
1112 if t in '. ' and n not in '..':
1119 if t in '. ' and n not in '..':
1113 return _("filename ends with '%s', which is not allowed "
1120 return _("filename ends with '%s', which is not allowed "
1114 "on Windows") % t
1121 "on Windows") % t
1115
1122
1116 if os.name == 'nt':
1123 if os.name == 'nt':
1117 checkosfilename = checkwinfilename
1124 checkosfilename = checkwinfilename
1118 else:
1125 else:
1119 checkosfilename = platform.checkosfilename
1126 checkosfilename = platform.checkosfilename
1120
1127
1121 def makelock(info, pathname):
1128 def makelock(info, pathname):
1122 try:
1129 try:
1123 return os.symlink(info, pathname)
1130 return os.symlink(info, pathname)
1124 except OSError as why:
1131 except OSError as why:
1125 if why.errno == errno.EEXIST:
1132 if why.errno == errno.EEXIST:
1126 raise
1133 raise
1127 except AttributeError: # no symlink in os
1134 except AttributeError: # no symlink in os
1128 pass
1135 pass
1129
1136
1130 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1137 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1131 os.write(ld, info)
1138 os.write(ld, info)
1132 os.close(ld)
1139 os.close(ld)
1133
1140
1134 def readlock(pathname):
1141 def readlock(pathname):
1135 try:
1142 try:
1136 return os.readlink(pathname)
1143 return os.readlink(pathname)
1137 except OSError as why:
1144 except OSError as why:
1138 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1145 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1139 raise
1146 raise
1140 except AttributeError: # no symlink in os
1147 except AttributeError: # no symlink in os
1141 pass
1148 pass
1142 fp = posixfile(pathname)
1149 fp = posixfile(pathname)
1143 r = fp.read()
1150 r = fp.read()
1144 fp.close()
1151 fp.close()
1145 return r
1152 return r
1146
1153
1147 def fstat(fp):
1154 def fstat(fp):
1148 '''stat file object that may not have fileno method.'''
1155 '''stat file object that may not have fileno method.'''
1149 try:
1156 try:
1150 return os.fstat(fp.fileno())
1157 return os.fstat(fp.fileno())
1151 except AttributeError:
1158 except AttributeError:
1152 return os.stat(fp.name)
1159 return os.stat(fp.name)
1153
1160
1154 # File system features
1161 # File system features
1155
1162
1156 def checkcase(path):
1163 def checkcase(path):
1157 """
1164 """
1158 Return true if the given path is on a case-sensitive filesystem
1165 Return true if the given path is on a case-sensitive filesystem
1159
1166
1160 Requires a path (like /foo/.hg) ending with a foldable final
1167 Requires a path (like /foo/.hg) ending with a foldable final
1161 directory component.
1168 directory component.
1162 """
1169 """
1163 s1 = os.lstat(path)
1170 s1 = os.lstat(path)
1164 d, b = os.path.split(path)
1171 d, b = os.path.split(path)
1165 b2 = b.upper()
1172 b2 = b.upper()
1166 if b == b2:
1173 if b == b2:
1167 b2 = b.lower()
1174 b2 = b.lower()
1168 if b == b2:
1175 if b == b2:
1169 return True # no evidence against case sensitivity
1176 return True # no evidence against case sensitivity
1170 p2 = os.path.join(d, b2)
1177 p2 = os.path.join(d, b2)
1171 try:
1178 try:
1172 s2 = os.lstat(p2)
1179 s2 = os.lstat(p2)
1173 if s2 == s1:
1180 if s2 == s1:
1174 return False
1181 return False
1175 return True
1182 return True
1176 except OSError:
1183 except OSError:
1177 return True
1184 return True
1178
1185
1179 try:
1186 try:
1180 import re2
1187 import re2
1181 _re2 = None
1188 _re2 = None
1182 except ImportError:
1189 except ImportError:
1183 _re2 = False
1190 _re2 = False
1184
1191
1185 class _re(object):
1192 class _re(object):
1186 def _checkre2(self):
1193 def _checkre2(self):
1187 global _re2
1194 global _re2
1188 try:
1195 try:
1189 # check if match works, see issue3964
1196 # check if match works, see issue3964
1190 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1197 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1191 except ImportError:
1198 except ImportError:
1192 _re2 = False
1199 _re2 = False
1193
1200
1194 def compile(self, pat, flags=0):
1201 def compile(self, pat, flags=0):
1195 '''Compile a regular expression, using re2 if possible
1202 '''Compile a regular expression, using re2 if possible
1196
1203
1197 For best performance, use only re2-compatible regexp features. The
1204 For best performance, use only re2-compatible regexp features. The
1198 only flags from the re module that are re2-compatible are
1205 only flags from the re module that are re2-compatible are
1199 IGNORECASE and MULTILINE.'''
1206 IGNORECASE and MULTILINE.'''
1200 if _re2 is None:
1207 if _re2 is None:
1201 self._checkre2()
1208 self._checkre2()
1202 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1209 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1203 if flags & remod.IGNORECASE:
1210 if flags & remod.IGNORECASE:
1204 pat = '(?i)' + pat
1211 pat = '(?i)' + pat
1205 if flags & remod.MULTILINE:
1212 if flags & remod.MULTILINE:
1206 pat = '(?m)' + pat
1213 pat = '(?m)' + pat
1207 try:
1214 try:
1208 return re2.compile(pat)
1215 return re2.compile(pat)
1209 except re2.error:
1216 except re2.error:
1210 pass
1217 pass
1211 return remod.compile(pat, flags)
1218 return remod.compile(pat, flags)
1212
1219
1213 @propertycache
1220 @propertycache
1214 def escape(self):
1221 def escape(self):
1215 '''Return the version of escape corresponding to self.compile.
1222 '''Return the version of escape corresponding to self.compile.
1216
1223
1217 This is imperfect because whether re2 or re is used for a particular
1224 This is imperfect because whether re2 or re is used for a particular
1218 function depends on the flags, etc, but it's the best we can do.
1225 function depends on the flags, etc, but it's the best we can do.
1219 '''
1226 '''
1220 global _re2
1227 global _re2
1221 if _re2 is None:
1228 if _re2 is None:
1222 self._checkre2()
1229 self._checkre2()
1223 if _re2:
1230 if _re2:
1224 return re2.escape
1231 return re2.escape
1225 else:
1232 else:
1226 return remod.escape
1233 return remod.escape
1227
1234
1228 re = _re()
1235 re = _re()
1229
1236
1230 _fspathcache = {}
1237 _fspathcache = {}
1231 def fspath(name, root):
1238 def fspath(name, root):
1232 '''Get name in the case stored in the filesystem
1239 '''Get name in the case stored in the filesystem
1233
1240
1234 The name should be relative to root, and be normcase-ed for efficiency.
1241 The name should be relative to root, and be normcase-ed for efficiency.
1235
1242
1236 Note that this function is unnecessary, and should not be
1243 Note that this function is unnecessary, and should not be
1237 called, for case-sensitive filesystems (simply because it's expensive).
1244 called, for case-sensitive filesystems (simply because it's expensive).
1238
1245
1239 The root should be normcase-ed, too.
1246 The root should be normcase-ed, too.
1240 '''
1247 '''
1241 def _makefspathcacheentry(dir):
1248 def _makefspathcacheentry(dir):
1242 return dict((normcase(n), n) for n in os.listdir(dir))
1249 return dict((normcase(n), n) for n in os.listdir(dir))
1243
1250
1244 seps = os.sep
1251 seps = os.sep
1245 if os.altsep:
1252 if os.altsep:
1246 seps = seps + os.altsep
1253 seps = seps + os.altsep
1247 # Protect backslashes. This gets silly very quickly.
1254 # Protect backslashes. This gets silly very quickly.
1248 seps.replace('\\','\\\\')
1255 seps.replace('\\','\\\\')
1249 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1256 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1250 dir = os.path.normpath(root)
1257 dir = os.path.normpath(root)
1251 result = []
1258 result = []
1252 for part, sep in pattern.findall(name):
1259 for part, sep in pattern.findall(name):
1253 if sep:
1260 if sep:
1254 result.append(sep)
1261 result.append(sep)
1255 continue
1262 continue
1256
1263
1257 if dir not in _fspathcache:
1264 if dir not in _fspathcache:
1258 _fspathcache[dir] = _makefspathcacheentry(dir)
1265 _fspathcache[dir] = _makefspathcacheentry(dir)
1259 contents = _fspathcache[dir]
1266 contents = _fspathcache[dir]
1260
1267
1261 found = contents.get(part)
1268 found = contents.get(part)
1262 if not found:
1269 if not found:
1263 # retry "once per directory" per "dirstate.walk" which
1270 # retry "once per directory" per "dirstate.walk" which
1264 # may take place for each patches of "hg qpush", for example
1271 # may take place for each patches of "hg qpush", for example
1265 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1272 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1266 found = contents.get(part)
1273 found = contents.get(part)
1267
1274
1268 result.append(found or part)
1275 result.append(found or part)
1269 dir = os.path.join(dir, part)
1276 dir = os.path.join(dir, part)
1270
1277
1271 return ''.join(result)
1278 return ''.join(result)
1272
1279
1273 def checknlink(testfile):
1280 def checknlink(testfile):
1274 '''check whether hardlink count reporting works properly'''
1281 '''check whether hardlink count reporting works properly'''
1275
1282
1276 # testfile may be open, so we need a separate file for checking to
1283 # testfile may be open, so we need a separate file for checking to
1277 # work around issue2543 (or testfile may get lost on Samba shares)
1284 # work around issue2543 (or testfile may get lost on Samba shares)
1278 f1 = testfile + ".hgtmp1"
1285 f1 = testfile + ".hgtmp1"
1279 if os.path.lexists(f1):
1286 if os.path.lexists(f1):
1280 return False
1287 return False
1281 try:
1288 try:
1282 posixfile(f1, 'w').close()
1289 posixfile(f1, 'w').close()
1283 except IOError:
1290 except IOError:
1284 return False
1291 return False
1285
1292
1286 f2 = testfile + ".hgtmp2"
1293 f2 = testfile + ".hgtmp2"
1287 fd = None
1294 fd = None
1288 try:
1295 try:
1289 oslink(f1, f2)
1296 oslink(f1, f2)
1290 # nlinks() may behave differently for files on Windows shares if
1297 # nlinks() may behave differently for files on Windows shares if
1291 # the file is open.
1298 # the file is open.
1292 fd = posixfile(f2)
1299 fd = posixfile(f2)
1293 return nlinks(f2) > 1
1300 return nlinks(f2) > 1
1294 except OSError:
1301 except OSError:
1295 return False
1302 return False
1296 finally:
1303 finally:
1297 if fd is not None:
1304 if fd is not None:
1298 fd.close()
1305 fd.close()
1299 for f in (f1, f2):
1306 for f in (f1, f2):
1300 try:
1307 try:
1301 os.unlink(f)
1308 os.unlink(f)
1302 except OSError:
1309 except OSError:
1303 pass
1310 pass
1304
1311
1305 def endswithsep(path):
1312 def endswithsep(path):
1306 '''Check path ends with os.sep or os.altsep.'''
1313 '''Check path ends with os.sep or os.altsep.'''
1307 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1314 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1308
1315
1309 def splitpath(path):
1316 def splitpath(path):
1310 '''Split path by os.sep.
1317 '''Split path by os.sep.
1311 Note that this function does not use os.altsep because this is
1318 Note that this function does not use os.altsep because this is
1312 an alternative of simple "xxx.split(os.sep)".
1319 an alternative of simple "xxx.split(os.sep)".
1313 It is recommended to use os.path.normpath() before using this
1320 It is recommended to use os.path.normpath() before using this
1314 function if need.'''
1321 function if need.'''
1315 return path.split(os.sep)
1322 return path.split(os.sep)
1316
1323
1317 def gui():
1324 def gui():
1318 '''Are we running in a GUI?'''
1325 '''Are we running in a GUI?'''
1319 if sys.platform == 'darwin':
1326 if sys.platform == 'darwin':
1320 if 'SSH_CONNECTION' in os.environ:
1327 if 'SSH_CONNECTION' in os.environ:
1321 # handle SSH access to a box where the user is logged in
1328 # handle SSH access to a box where the user is logged in
1322 return False
1329 return False
1323 elif getattr(osutil, 'isgui', None):
1330 elif getattr(osutil, 'isgui', None):
1324 # check if a CoreGraphics session is available
1331 # check if a CoreGraphics session is available
1325 return osutil.isgui()
1332 return osutil.isgui()
1326 else:
1333 else:
1327 # pure build; use a safe default
1334 # pure build; use a safe default
1328 return True
1335 return True
1329 else:
1336 else:
1330 return os.name == "nt" or os.environ.get("DISPLAY")
1337 return os.name == "nt" or os.environ.get("DISPLAY")
1331
1338
1332 def mktempcopy(name, emptyok=False, createmode=None):
1339 def mktempcopy(name, emptyok=False, createmode=None):
1333 """Create a temporary file with the same contents from name
1340 """Create a temporary file with the same contents from name
1334
1341
1335 The permission bits are copied from the original file.
1342 The permission bits are copied from the original file.
1336
1343
1337 If the temporary file is going to be truncated immediately, you
1344 If the temporary file is going to be truncated immediately, you
1338 can use emptyok=True as an optimization.
1345 can use emptyok=True as an optimization.
1339
1346
1340 Returns the name of the temporary file.
1347 Returns the name of the temporary file.
1341 """
1348 """
1342 d, fn = os.path.split(name)
1349 d, fn = os.path.split(name)
1343 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1350 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1344 os.close(fd)
1351 os.close(fd)
1345 # Temporary files are created with mode 0600, which is usually not
1352 # Temporary files are created with mode 0600, which is usually not
1346 # what we want. If the original file already exists, just copy
1353 # what we want. If the original file already exists, just copy
1347 # its mode. Otherwise, manually obey umask.
1354 # its mode. Otherwise, manually obey umask.
1348 copymode(name, temp, createmode)
1355 copymode(name, temp, createmode)
1349 if emptyok:
1356 if emptyok:
1350 return temp
1357 return temp
1351 try:
1358 try:
1352 try:
1359 try:
1353 ifp = posixfile(name, "rb")
1360 ifp = posixfile(name, "rb")
1354 except IOError as inst:
1361 except IOError as inst:
1355 if inst.errno == errno.ENOENT:
1362 if inst.errno == errno.ENOENT:
1356 return temp
1363 return temp
1357 if not getattr(inst, 'filename', None):
1364 if not getattr(inst, 'filename', None):
1358 inst.filename = name
1365 inst.filename = name
1359 raise
1366 raise
1360 ofp = posixfile(temp, "wb")
1367 ofp = posixfile(temp, "wb")
1361 for chunk in filechunkiter(ifp):
1368 for chunk in filechunkiter(ifp):
1362 ofp.write(chunk)
1369 ofp.write(chunk)
1363 ifp.close()
1370 ifp.close()
1364 ofp.close()
1371 ofp.close()
1365 except: # re-raises
1372 except: # re-raises
1366 try: os.unlink(temp)
1373 try: os.unlink(temp)
1367 except OSError: pass
1374 except OSError: pass
1368 raise
1375 raise
1369 return temp
1376 return temp
1370
1377
1371 class atomictempfile(object):
1378 class atomictempfile(object):
1372 '''writable file object that atomically updates a file
1379 '''writable file object that atomically updates a file
1373
1380
1374 All writes will go to a temporary copy of the original file. Call
1381 All writes will go to a temporary copy of the original file. Call
1375 close() when you are done writing, and atomictempfile will rename
1382 close() when you are done writing, and atomictempfile will rename
1376 the temporary copy to the original name, making the changes
1383 the temporary copy to the original name, making the changes
1377 visible. If the object is destroyed without being closed, all your
1384 visible. If the object is destroyed without being closed, all your
1378 writes are discarded.
1385 writes are discarded.
1379 '''
1386 '''
1380 def __init__(self, name, mode='w+b', createmode=None):
1387 def __init__(self, name, mode='w+b', createmode=None):
1381 self.__name = name # permanent name
1388 self.__name = name # permanent name
1382 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1389 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1383 createmode=createmode)
1390 createmode=createmode)
1384 self._fp = posixfile(self._tempname, mode)
1391 self._fp = posixfile(self._tempname, mode)
1385
1392
1386 # delegated methods
1393 # delegated methods
1387 self.write = self._fp.write
1394 self.write = self._fp.write
1388 self.seek = self._fp.seek
1395 self.seek = self._fp.seek
1389 self.tell = self._fp.tell
1396 self.tell = self._fp.tell
1390 self.fileno = self._fp.fileno
1397 self.fileno = self._fp.fileno
1391
1398
1392 def close(self):
1399 def close(self):
1393 if not self._fp.closed:
1400 if not self._fp.closed:
1394 self._fp.close()
1401 self._fp.close()
1395 rename(self._tempname, localpath(self.__name))
1402 rename(self._tempname, localpath(self.__name))
1396
1403
1397 def discard(self):
1404 def discard(self):
1398 if not self._fp.closed:
1405 if not self._fp.closed:
1399 try:
1406 try:
1400 os.unlink(self._tempname)
1407 os.unlink(self._tempname)
1401 except OSError:
1408 except OSError:
1402 pass
1409 pass
1403 self._fp.close()
1410 self._fp.close()
1404
1411
1405 def __del__(self):
1412 def __del__(self):
1406 if safehasattr(self, '_fp'): # constructor actually did something
1413 if safehasattr(self, '_fp'): # constructor actually did something
1407 self.discard()
1414 self.discard()
1408
1415
1409 def makedirs(name, mode=None, notindexed=False):
1416 def makedirs(name, mode=None, notindexed=False):
1410 """recursive directory creation with parent mode inheritance"""
1417 """recursive directory creation with parent mode inheritance"""
1411 try:
1418 try:
1412 makedir(name, notindexed)
1419 makedir(name, notindexed)
1413 except OSError as err:
1420 except OSError as err:
1414 if err.errno == errno.EEXIST:
1421 if err.errno == errno.EEXIST:
1415 return
1422 return
1416 if err.errno != errno.ENOENT or not name:
1423 if err.errno != errno.ENOENT or not name:
1417 raise
1424 raise
1418 parent = os.path.dirname(os.path.abspath(name))
1425 parent = os.path.dirname(os.path.abspath(name))
1419 if parent == name:
1426 if parent == name:
1420 raise
1427 raise
1421 makedirs(parent, mode, notindexed)
1428 makedirs(parent, mode, notindexed)
1422 makedir(name, notindexed)
1429 makedir(name, notindexed)
1423 if mode is not None:
1430 if mode is not None:
1424 os.chmod(name, mode)
1431 os.chmod(name, mode)
1425
1432
1426 def ensuredirs(name, mode=None, notindexed=False):
1433 def ensuredirs(name, mode=None, notindexed=False):
1427 """race-safe recursive directory creation
1434 """race-safe recursive directory creation
1428
1435
1429 Newly created directories are marked as "not to be indexed by
1436 Newly created directories are marked as "not to be indexed by
1430 the content indexing service", if ``notindexed`` is specified
1437 the content indexing service", if ``notindexed`` is specified
1431 for "write" mode access.
1438 for "write" mode access.
1432 """
1439 """
1433 if os.path.isdir(name):
1440 if os.path.isdir(name):
1434 return
1441 return
1435 parent = os.path.dirname(os.path.abspath(name))
1442 parent = os.path.dirname(os.path.abspath(name))
1436 if parent != name:
1443 if parent != name:
1437 ensuredirs(parent, mode, notindexed)
1444 ensuredirs(parent, mode, notindexed)
1438 try:
1445 try:
1439 makedir(name, notindexed)
1446 makedir(name, notindexed)
1440 except OSError as err:
1447 except OSError as err:
1441 if err.errno == errno.EEXIST and os.path.isdir(name):
1448 if err.errno == errno.EEXIST and os.path.isdir(name):
1442 # someone else seems to have won a directory creation race
1449 # someone else seems to have won a directory creation race
1443 return
1450 return
1444 raise
1451 raise
1445 if mode is not None:
1452 if mode is not None:
1446 os.chmod(name, mode)
1453 os.chmod(name, mode)
1447
1454
1448 def readfile(path):
1455 def readfile(path):
1449 with open(path, 'rb') as fp:
1456 with open(path, 'rb') as fp:
1450 return fp.read()
1457 return fp.read()
1451
1458
1452 def writefile(path, text):
1459 def writefile(path, text):
1453 with open(path, 'wb') as fp:
1460 with open(path, 'wb') as fp:
1454 fp.write(text)
1461 fp.write(text)
1455
1462
1456 def appendfile(path, text):
1463 def appendfile(path, text):
1457 with open(path, 'ab') as fp:
1464 with open(path, 'ab') as fp:
1458 fp.write(text)
1465 fp.write(text)
1459
1466
1460 class chunkbuffer(object):
1467 class chunkbuffer(object):
1461 """Allow arbitrary sized chunks of data to be efficiently read from an
1468 """Allow arbitrary sized chunks of data to be efficiently read from an
1462 iterator over chunks of arbitrary size."""
1469 iterator over chunks of arbitrary size."""
1463
1470
1464 def __init__(self, in_iter):
1471 def __init__(self, in_iter):
1465 """in_iter is the iterator that's iterating over the input chunks.
1472 """in_iter is the iterator that's iterating over the input chunks.
1466 targetsize is how big a buffer to try to maintain."""
1473 targetsize is how big a buffer to try to maintain."""
1467 def splitbig(chunks):
1474 def splitbig(chunks):
1468 for chunk in chunks:
1475 for chunk in chunks:
1469 if len(chunk) > 2**20:
1476 if len(chunk) > 2**20:
1470 pos = 0
1477 pos = 0
1471 while pos < len(chunk):
1478 while pos < len(chunk):
1472 end = pos + 2 ** 18
1479 end = pos + 2 ** 18
1473 yield chunk[pos:end]
1480 yield chunk[pos:end]
1474 pos = end
1481 pos = end
1475 else:
1482 else:
1476 yield chunk
1483 yield chunk
1477 self.iter = splitbig(in_iter)
1484 self.iter = splitbig(in_iter)
1478 self._queue = collections.deque()
1485 self._queue = collections.deque()
1479 self._chunkoffset = 0
1486 self._chunkoffset = 0
1480
1487
1481 def read(self, l=None):
1488 def read(self, l=None):
1482 """Read L bytes of data from the iterator of chunks of data.
1489 """Read L bytes of data from the iterator of chunks of data.
1483 Returns less than L bytes if the iterator runs dry.
1490 Returns less than L bytes if the iterator runs dry.
1484
1491
1485 If size parameter is omitted, read everything"""
1492 If size parameter is omitted, read everything"""
1486 if l is None:
1493 if l is None:
1487 return ''.join(self.iter)
1494 return ''.join(self.iter)
1488
1495
1489 left = l
1496 left = l
1490 buf = []
1497 buf = []
1491 queue = self._queue
1498 queue = self._queue
1492 while left > 0:
1499 while left > 0:
1493 # refill the queue
1500 # refill the queue
1494 if not queue:
1501 if not queue:
1495 target = 2**18
1502 target = 2**18
1496 for chunk in self.iter:
1503 for chunk in self.iter:
1497 queue.append(chunk)
1504 queue.append(chunk)
1498 target -= len(chunk)
1505 target -= len(chunk)
1499 if target <= 0:
1506 if target <= 0:
1500 break
1507 break
1501 if not queue:
1508 if not queue:
1502 break
1509 break
1503
1510
1504 # The easy way to do this would be to queue.popleft(), modify the
1511 # The easy way to do this would be to queue.popleft(), modify the
1505 # chunk (if necessary), then queue.appendleft(). However, for cases
1512 # chunk (if necessary), then queue.appendleft(). However, for cases
1506 # where we read partial chunk content, this incurs 2 dequeue
1513 # where we read partial chunk content, this incurs 2 dequeue
1507 # mutations and creates a new str for the remaining chunk in the
1514 # mutations and creates a new str for the remaining chunk in the
1508 # queue. Our code below avoids this overhead.
1515 # queue. Our code below avoids this overhead.
1509
1516
1510 chunk = queue[0]
1517 chunk = queue[0]
1511 chunkl = len(chunk)
1518 chunkl = len(chunk)
1512 offset = self._chunkoffset
1519 offset = self._chunkoffset
1513
1520
1514 # Use full chunk.
1521 # Use full chunk.
1515 if offset == 0 and left >= chunkl:
1522 if offset == 0 and left >= chunkl:
1516 left -= chunkl
1523 left -= chunkl
1517 queue.popleft()
1524 queue.popleft()
1518 buf.append(chunk)
1525 buf.append(chunk)
1519 # self._chunkoffset remains at 0.
1526 # self._chunkoffset remains at 0.
1520 continue
1527 continue
1521
1528
1522 chunkremaining = chunkl - offset
1529 chunkremaining = chunkl - offset
1523
1530
1524 # Use all of unconsumed part of chunk.
1531 # Use all of unconsumed part of chunk.
1525 if left >= chunkremaining:
1532 if left >= chunkremaining:
1526 left -= chunkremaining
1533 left -= chunkremaining
1527 queue.popleft()
1534 queue.popleft()
1528 # offset == 0 is enabled by block above, so this won't merely
1535 # offset == 0 is enabled by block above, so this won't merely
1529 # copy via ``chunk[0:]``.
1536 # copy via ``chunk[0:]``.
1530 buf.append(chunk[offset:])
1537 buf.append(chunk[offset:])
1531 self._chunkoffset = 0
1538 self._chunkoffset = 0
1532
1539
1533 # Partial chunk needed.
1540 # Partial chunk needed.
1534 else:
1541 else:
1535 buf.append(chunk[offset:offset + left])
1542 buf.append(chunk[offset:offset + left])
1536 self._chunkoffset += left
1543 self._chunkoffset += left
1537 left -= chunkremaining
1544 left -= chunkremaining
1538
1545
1539 return ''.join(buf)
1546 return ''.join(buf)
1540
1547
1541 def filechunkiter(f, size=65536, limit=None):
1548 def filechunkiter(f, size=65536, limit=None):
1542 """Create a generator that produces the data in the file size
1549 """Create a generator that produces the data in the file size
1543 (default 65536) bytes at a time, up to optional limit (default is
1550 (default 65536) bytes at a time, up to optional limit (default is
1544 to read all data). Chunks may be less than size bytes if the
1551 to read all data). Chunks may be less than size bytes if the
1545 chunk is the last chunk in the file, or the file is a socket or
1552 chunk is the last chunk in the file, or the file is a socket or
1546 some other type of file that sometimes reads less data than is
1553 some other type of file that sometimes reads less data than is
1547 requested."""
1554 requested."""
1548 assert size >= 0
1555 assert size >= 0
1549 assert limit is None or limit >= 0
1556 assert limit is None or limit >= 0
1550 while True:
1557 while True:
1551 if limit is None:
1558 if limit is None:
1552 nbytes = size
1559 nbytes = size
1553 else:
1560 else:
1554 nbytes = min(limit, size)
1561 nbytes = min(limit, size)
1555 s = nbytes and f.read(nbytes)
1562 s = nbytes and f.read(nbytes)
1556 if not s:
1563 if not s:
1557 break
1564 break
1558 if limit:
1565 if limit:
1559 limit -= len(s)
1566 limit -= len(s)
1560 yield s
1567 yield s
1561
1568
1562 def makedate(timestamp=None):
1569 def makedate(timestamp=None):
1563 '''Return a unix timestamp (or the current time) as a (unixtime,
1570 '''Return a unix timestamp (or the current time) as a (unixtime,
1564 offset) tuple based off the local timezone.'''
1571 offset) tuple based off the local timezone.'''
1565 if timestamp is None:
1572 if timestamp is None:
1566 timestamp = time.time()
1573 timestamp = time.time()
1567 if timestamp < 0:
1574 if timestamp < 0:
1568 hint = _("check your clock")
1575 hint = _("check your clock")
1569 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1576 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1570 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1577 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1571 datetime.datetime.fromtimestamp(timestamp))
1578 datetime.datetime.fromtimestamp(timestamp))
1572 tz = delta.days * 86400 + delta.seconds
1579 tz = delta.days * 86400 + delta.seconds
1573 return timestamp, tz
1580 return timestamp, tz
1574
1581
1575 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1582 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1576 """represent a (unixtime, offset) tuple as a localized time.
1583 """represent a (unixtime, offset) tuple as a localized time.
1577 unixtime is seconds since the epoch, and offset is the time zone's
1584 unixtime is seconds since the epoch, and offset is the time zone's
1578 number of seconds away from UTC. if timezone is false, do not
1585 number of seconds away from UTC. if timezone is false, do not
1579 append time zone to string."""
1586 append time zone to string."""
1580 t, tz = date or makedate()
1587 t, tz = date or makedate()
1581 if t < 0:
1588 if t < 0:
1582 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1589 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1583 tz = 0
1590 tz = 0
1584 if "%1" in format or "%2" in format or "%z" in format:
1591 if "%1" in format or "%2" in format or "%z" in format:
1585 sign = (tz > 0) and "-" or "+"
1592 sign = (tz > 0) and "-" or "+"
1586 minutes = abs(tz) // 60
1593 minutes = abs(tz) // 60
1587 q, r = divmod(minutes, 60)
1594 q, r = divmod(minutes, 60)
1588 format = format.replace("%z", "%1%2")
1595 format = format.replace("%z", "%1%2")
1589 format = format.replace("%1", "%c%02d" % (sign, q))
1596 format = format.replace("%1", "%c%02d" % (sign, q))
1590 format = format.replace("%2", "%02d" % r)
1597 format = format.replace("%2", "%02d" % r)
1591 try:
1598 try:
1592 t = time.gmtime(float(t) - tz)
1599 t = time.gmtime(float(t) - tz)
1593 except ValueError:
1600 except ValueError:
1594 # time was out of range
1601 # time was out of range
1595 t = time.gmtime(sys.maxint)
1602 t = time.gmtime(sys.maxint)
1596 s = time.strftime(format, t)
1603 s = time.strftime(format, t)
1597 return s
1604 return s
1598
1605
1599 def shortdate(date=None):
1606 def shortdate(date=None):
1600 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1607 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1601 return datestr(date, format='%Y-%m-%d')
1608 return datestr(date, format='%Y-%m-%d')
1602
1609
1603 def parsetimezone(tz):
1610 def parsetimezone(tz):
1604 """parse a timezone string and return an offset integer"""
1611 """parse a timezone string and return an offset integer"""
1605 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1612 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1606 sign = (tz[0] == "+") and 1 or -1
1613 sign = (tz[0] == "+") and 1 or -1
1607 hours = int(tz[1:3])
1614 hours = int(tz[1:3])
1608 minutes = int(tz[3:5])
1615 minutes = int(tz[3:5])
1609 return -sign * (hours * 60 + minutes) * 60
1616 return -sign * (hours * 60 + minutes) * 60
1610 if tz == "GMT" or tz == "UTC":
1617 if tz == "GMT" or tz == "UTC":
1611 return 0
1618 return 0
1612 return None
1619 return None
1613
1620
1614 def strdate(string, format, defaults=[]):
1621 def strdate(string, format, defaults=[]):
1615 """parse a localized time string and return a (unixtime, offset) tuple.
1622 """parse a localized time string and return a (unixtime, offset) tuple.
1616 if the string cannot be parsed, ValueError is raised."""
1623 if the string cannot be parsed, ValueError is raised."""
1617 # NOTE: unixtime = localunixtime + offset
1624 # NOTE: unixtime = localunixtime + offset
1618 offset, date = parsetimezone(string.split()[-1]), string
1625 offset, date = parsetimezone(string.split()[-1]), string
1619 if offset is not None:
1626 if offset is not None:
1620 date = " ".join(string.split()[:-1])
1627 date = " ".join(string.split()[:-1])
1621
1628
1622 # add missing elements from defaults
1629 # add missing elements from defaults
1623 usenow = False # default to using biased defaults
1630 usenow = False # default to using biased defaults
1624 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1631 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1625 found = [True for p in part if ("%"+p) in format]
1632 found = [True for p in part if ("%"+p) in format]
1626 if not found:
1633 if not found:
1627 date += "@" + defaults[part][usenow]
1634 date += "@" + defaults[part][usenow]
1628 format += "@%" + part[0]
1635 format += "@%" + part[0]
1629 else:
1636 else:
1630 # We've found a specific time element, less specific time
1637 # We've found a specific time element, less specific time
1631 # elements are relative to today
1638 # elements are relative to today
1632 usenow = True
1639 usenow = True
1633
1640
1634 timetuple = time.strptime(date, format)
1641 timetuple = time.strptime(date, format)
1635 localunixtime = int(calendar.timegm(timetuple))
1642 localunixtime = int(calendar.timegm(timetuple))
1636 if offset is None:
1643 if offset is None:
1637 # local timezone
1644 # local timezone
1638 unixtime = int(time.mktime(timetuple))
1645 unixtime = int(time.mktime(timetuple))
1639 offset = unixtime - localunixtime
1646 offset = unixtime - localunixtime
1640 else:
1647 else:
1641 unixtime = localunixtime + offset
1648 unixtime = localunixtime + offset
1642 return unixtime, offset
1649 return unixtime, offset
1643
1650
1644 def parsedate(date, formats=None, bias=None):
1651 def parsedate(date, formats=None, bias=None):
1645 """parse a localized date/time and return a (unixtime, offset) tuple.
1652 """parse a localized date/time and return a (unixtime, offset) tuple.
1646
1653
1647 The date may be a "unixtime offset" string or in one of the specified
1654 The date may be a "unixtime offset" string or in one of the specified
1648 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1655 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1649
1656
1650 >>> parsedate(' today ') == parsedate(\
1657 >>> parsedate(' today ') == parsedate(\
1651 datetime.date.today().strftime('%b %d'))
1658 datetime.date.today().strftime('%b %d'))
1652 True
1659 True
1653 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1660 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1654 datetime.timedelta(days=1)\
1661 datetime.timedelta(days=1)\
1655 ).strftime('%b %d'))
1662 ).strftime('%b %d'))
1656 True
1663 True
1657 >>> now, tz = makedate()
1664 >>> now, tz = makedate()
1658 >>> strnow, strtz = parsedate('now')
1665 >>> strnow, strtz = parsedate('now')
1659 >>> (strnow - now) < 1
1666 >>> (strnow - now) < 1
1660 True
1667 True
1661 >>> tz == strtz
1668 >>> tz == strtz
1662 True
1669 True
1663 """
1670 """
1664 if bias is None:
1671 if bias is None:
1665 bias = {}
1672 bias = {}
1666 if not date:
1673 if not date:
1667 return 0, 0
1674 return 0, 0
1668 if isinstance(date, tuple) and len(date) == 2:
1675 if isinstance(date, tuple) and len(date) == 2:
1669 return date
1676 return date
1670 if not formats:
1677 if not formats:
1671 formats = defaultdateformats
1678 formats = defaultdateformats
1672 date = date.strip()
1679 date = date.strip()
1673
1680
1674 if date == 'now' or date == _('now'):
1681 if date == 'now' or date == _('now'):
1675 return makedate()
1682 return makedate()
1676 if date == 'today' or date == _('today'):
1683 if date == 'today' or date == _('today'):
1677 date = datetime.date.today().strftime('%b %d')
1684 date = datetime.date.today().strftime('%b %d')
1678 elif date == 'yesterday' or date == _('yesterday'):
1685 elif date == 'yesterday' or date == _('yesterday'):
1679 date = (datetime.date.today() -
1686 date = (datetime.date.today() -
1680 datetime.timedelta(days=1)).strftime('%b %d')
1687 datetime.timedelta(days=1)).strftime('%b %d')
1681
1688
1682 try:
1689 try:
1683 when, offset = map(int, date.split(' '))
1690 when, offset = map(int, date.split(' '))
1684 except ValueError:
1691 except ValueError:
1685 # fill out defaults
1692 # fill out defaults
1686 now = makedate()
1693 now = makedate()
1687 defaults = {}
1694 defaults = {}
1688 for part in ("d", "mb", "yY", "HI", "M", "S"):
1695 for part in ("d", "mb", "yY", "HI", "M", "S"):
1689 # this piece is for rounding the specific end of unknowns
1696 # this piece is for rounding the specific end of unknowns
1690 b = bias.get(part)
1697 b = bias.get(part)
1691 if b is None:
1698 if b is None:
1692 if part[0] in "HMS":
1699 if part[0] in "HMS":
1693 b = "00"
1700 b = "00"
1694 else:
1701 else:
1695 b = "0"
1702 b = "0"
1696
1703
1697 # this piece is for matching the generic end to today's date
1704 # this piece is for matching the generic end to today's date
1698 n = datestr(now, "%" + part[0])
1705 n = datestr(now, "%" + part[0])
1699
1706
1700 defaults[part] = (b, n)
1707 defaults[part] = (b, n)
1701
1708
1702 for format in formats:
1709 for format in formats:
1703 try:
1710 try:
1704 when, offset = strdate(date, format, defaults)
1711 when, offset = strdate(date, format, defaults)
1705 except (ValueError, OverflowError):
1712 except (ValueError, OverflowError):
1706 pass
1713 pass
1707 else:
1714 else:
1708 break
1715 break
1709 else:
1716 else:
1710 raise Abort(_('invalid date: %r') % date)
1717 raise Abort(_('invalid date: %r') % date)
1711 # validate explicit (probably user-specified) date and
1718 # validate explicit (probably user-specified) date and
1712 # time zone offset. values must fit in signed 32 bits for
1719 # time zone offset. values must fit in signed 32 bits for
1713 # current 32-bit linux runtimes. timezones go from UTC-12
1720 # current 32-bit linux runtimes. timezones go from UTC-12
1714 # to UTC+14
1721 # to UTC+14
1715 if abs(when) > 0x7fffffff:
1722 if abs(when) > 0x7fffffff:
1716 raise Abort(_('date exceeds 32 bits: %d') % when)
1723 raise Abort(_('date exceeds 32 bits: %d') % when)
1717 if when < 0:
1724 if when < 0:
1718 raise Abort(_('negative date value: %d') % when)
1725 raise Abort(_('negative date value: %d') % when)
1719 if offset < -50400 or offset > 43200:
1726 if offset < -50400 or offset > 43200:
1720 raise Abort(_('impossible time zone offset: %d') % offset)
1727 raise Abort(_('impossible time zone offset: %d') % offset)
1721 return when, offset
1728 return when, offset
1722
1729
1723 def matchdate(date):
1730 def matchdate(date):
1724 """Return a function that matches a given date match specifier
1731 """Return a function that matches a given date match specifier
1725
1732
1726 Formats include:
1733 Formats include:
1727
1734
1728 '{date}' match a given date to the accuracy provided
1735 '{date}' match a given date to the accuracy provided
1729
1736
1730 '<{date}' on or before a given date
1737 '<{date}' on or before a given date
1731
1738
1732 '>{date}' on or after a given date
1739 '>{date}' on or after a given date
1733
1740
1734 >>> p1 = parsedate("10:29:59")
1741 >>> p1 = parsedate("10:29:59")
1735 >>> p2 = parsedate("10:30:00")
1742 >>> p2 = parsedate("10:30:00")
1736 >>> p3 = parsedate("10:30:59")
1743 >>> p3 = parsedate("10:30:59")
1737 >>> p4 = parsedate("10:31:00")
1744 >>> p4 = parsedate("10:31:00")
1738 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1745 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1739 >>> f = matchdate("10:30")
1746 >>> f = matchdate("10:30")
1740 >>> f(p1[0])
1747 >>> f(p1[0])
1741 False
1748 False
1742 >>> f(p2[0])
1749 >>> f(p2[0])
1743 True
1750 True
1744 >>> f(p3[0])
1751 >>> f(p3[0])
1745 True
1752 True
1746 >>> f(p4[0])
1753 >>> f(p4[0])
1747 False
1754 False
1748 >>> f(p5[0])
1755 >>> f(p5[0])
1749 False
1756 False
1750 """
1757 """
1751
1758
1752 def lower(date):
1759 def lower(date):
1753 d = {'mb': "1", 'd': "1"}
1760 d = {'mb': "1", 'd': "1"}
1754 return parsedate(date, extendeddateformats, d)[0]
1761 return parsedate(date, extendeddateformats, d)[0]
1755
1762
1756 def upper(date):
1763 def upper(date):
1757 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1764 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1758 for days in ("31", "30", "29"):
1765 for days in ("31", "30", "29"):
1759 try:
1766 try:
1760 d["d"] = days
1767 d["d"] = days
1761 return parsedate(date, extendeddateformats, d)[0]
1768 return parsedate(date, extendeddateformats, d)[0]
1762 except Abort:
1769 except Abort:
1763 pass
1770 pass
1764 d["d"] = "28"
1771 d["d"] = "28"
1765 return parsedate(date, extendeddateformats, d)[0]
1772 return parsedate(date, extendeddateformats, d)[0]
1766
1773
1767 date = date.strip()
1774 date = date.strip()
1768
1775
1769 if not date:
1776 if not date:
1770 raise Abort(_("dates cannot consist entirely of whitespace"))
1777 raise Abort(_("dates cannot consist entirely of whitespace"))
1771 elif date[0] == "<":
1778 elif date[0] == "<":
1772 if not date[1:]:
1779 if not date[1:]:
1773 raise Abort(_("invalid day spec, use '<DATE'"))
1780 raise Abort(_("invalid day spec, use '<DATE'"))
1774 when = upper(date[1:])
1781 when = upper(date[1:])
1775 return lambda x: x <= when
1782 return lambda x: x <= when
1776 elif date[0] == ">":
1783 elif date[0] == ">":
1777 if not date[1:]:
1784 if not date[1:]:
1778 raise Abort(_("invalid day spec, use '>DATE'"))
1785 raise Abort(_("invalid day spec, use '>DATE'"))
1779 when = lower(date[1:])
1786 when = lower(date[1:])
1780 return lambda x: x >= when
1787 return lambda x: x >= when
1781 elif date[0] == "-":
1788 elif date[0] == "-":
1782 try:
1789 try:
1783 days = int(date[1:])
1790 days = int(date[1:])
1784 except ValueError:
1791 except ValueError:
1785 raise Abort(_("invalid day spec: %s") % date[1:])
1792 raise Abort(_("invalid day spec: %s") % date[1:])
1786 if days < 0:
1793 if days < 0:
1787 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1794 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1788 % date[1:])
1795 % date[1:])
1789 when = makedate()[0] - days * 3600 * 24
1796 when = makedate()[0] - days * 3600 * 24
1790 return lambda x: x >= when
1797 return lambda x: x >= when
1791 elif " to " in date:
1798 elif " to " in date:
1792 a, b = date.split(" to ")
1799 a, b = date.split(" to ")
1793 start, stop = lower(a), upper(b)
1800 start, stop = lower(a), upper(b)
1794 return lambda x: x >= start and x <= stop
1801 return lambda x: x >= start and x <= stop
1795 else:
1802 else:
1796 start, stop = lower(date), upper(date)
1803 start, stop = lower(date), upper(date)
1797 return lambda x: x >= start and x <= stop
1804 return lambda x: x >= start and x <= stop
1798
1805
1799 def stringmatcher(pattern):
1806 def stringmatcher(pattern):
1800 """
1807 """
1801 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1808 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1802 returns the matcher name, pattern, and matcher function.
1809 returns the matcher name, pattern, and matcher function.
1803 missing or unknown prefixes are treated as literal matches.
1810 missing or unknown prefixes are treated as literal matches.
1804
1811
1805 helper for tests:
1812 helper for tests:
1806 >>> def test(pattern, *tests):
1813 >>> def test(pattern, *tests):
1807 ... kind, pattern, matcher = stringmatcher(pattern)
1814 ... kind, pattern, matcher = stringmatcher(pattern)
1808 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1815 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1809
1816
1810 exact matching (no prefix):
1817 exact matching (no prefix):
1811 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1818 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1812 ('literal', 'abcdefg', [False, False, True])
1819 ('literal', 'abcdefg', [False, False, True])
1813
1820
1814 regex matching ('re:' prefix)
1821 regex matching ('re:' prefix)
1815 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1822 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1816 ('re', 'a.+b', [False, False, True])
1823 ('re', 'a.+b', [False, False, True])
1817
1824
1818 force exact matches ('literal:' prefix)
1825 force exact matches ('literal:' prefix)
1819 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1826 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1820 ('literal', 're:foobar', [False, True])
1827 ('literal', 're:foobar', [False, True])
1821
1828
1822 unknown prefixes are ignored and treated as literals
1829 unknown prefixes are ignored and treated as literals
1823 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1830 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1824 ('literal', 'foo:bar', [False, False, True])
1831 ('literal', 'foo:bar', [False, False, True])
1825 """
1832 """
1826 if pattern.startswith('re:'):
1833 if pattern.startswith('re:'):
1827 pattern = pattern[3:]
1834 pattern = pattern[3:]
1828 try:
1835 try:
1829 regex = remod.compile(pattern)
1836 regex = remod.compile(pattern)
1830 except remod.error as e:
1837 except remod.error as e:
1831 raise error.ParseError(_('invalid regular expression: %s')
1838 raise error.ParseError(_('invalid regular expression: %s')
1832 % e)
1839 % e)
1833 return 're', pattern, regex.search
1840 return 're', pattern, regex.search
1834 elif pattern.startswith('literal:'):
1841 elif pattern.startswith('literal:'):
1835 pattern = pattern[8:]
1842 pattern = pattern[8:]
1836 return 'literal', pattern, pattern.__eq__
1843 return 'literal', pattern, pattern.__eq__
1837
1844
1838 def shortuser(user):
1845 def shortuser(user):
1839 """Return a short representation of a user name or email address."""
1846 """Return a short representation of a user name or email address."""
1840 f = user.find('@')
1847 f = user.find('@')
1841 if f >= 0:
1848 if f >= 0:
1842 user = user[:f]
1849 user = user[:f]
1843 f = user.find('<')
1850 f = user.find('<')
1844 if f >= 0:
1851 if f >= 0:
1845 user = user[f + 1:]
1852 user = user[f + 1:]
1846 f = user.find(' ')
1853 f = user.find(' ')
1847 if f >= 0:
1854 if f >= 0:
1848 user = user[:f]
1855 user = user[:f]
1849 f = user.find('.')
1856 f = user.find('.')
1850 if f >= 0:
1857 if f >= 0:
1851 user = user[:f]
1858 user = user[:f]
1852 return user
1859 return user
1853
1860
1854 def emailuser(user):
1861 def emailuser(user):
1855 """Return the user portion of an email address."""
1862 """Return the user portion of an email address."""
1856 f = user.find('@')
1863 f = user.find('@')
1857 if f >= 0:
1864 if f >= 0:
1858 user = user[:f]
1865 user = user[:f]
1859 f = user.find('<')
1866 f = user.find('<')
1860 if f >= 0:
1867 if f >= 0:
1861 user = user[f + 1:]
1868 user = user[f + 1:]
1862 return user
1869 return user
1863
1870
1864 def email(author):
1871 def email(author):
1865 '''get email of author.'''
1872 '''get email of author.'''
1866 r = author.find('>')
1873 r = author.find('>')
1867 if r == -1:
1874 if r == -1:
1868 r = None
1875 r = None
1869 return author[author.find('<') + 1:r]
1876 return author[author.find('<') + 1:r]
1870
1877
1871 def ellipsis(text, maxlength=400):
1878 def ellipsis(text, maxlength=400):
1872 """Trim string to at most maxlength (default: 400) columns in display."""
1879 """Trim string to at most maxlength (default: 400) columns in display."""
1873 return encoding.trim(text, maxlength, ellipsis='...')
1880 return encoding.trim(text, maxlength, ellipsis='...')
1874
1881
1875 def unitcountfn(*unittable):
1882 def unitcountfn(*unittable):
1876 '''return a function that renders a readable count of some quantity'''
1883 '''return a function that renders a readable count of some quantity'''
1877
1884
1878 def go(count):
1885 def go(count):
1879 for multiplier, divisor, format in unittable:
1886 for multiplier, divisor, format in unittable:
1880 if count >= divisor * multiplier:
1887 if count >= divisor * multiplier:
1881 return format % (count / float(divisor))
1888 return format % (count / float(divisor))
1882 return unittable[-1][2] % count
1889 return unittable[-1][2] % count
1883
1890
1884 return go
1891 return go
1885
1892
1886 bytecount = unitcountfn(
1893 bytecount = unitcountfn(
1887 (100, 1 << 30, _('%.0f GB')),
1894 (100, 1 << 30, _('%.0f GB')),
1888 (10, 1 << 30, _('%.1f GB')),
1895 (10, 1 << 30, _('%.1f GB')),
1889 (1, 1 << 30, _('%.2f GB')),
1896 (1, 1 << 30, _('%.2f GB')),
1890 (100, 1 << 20, _('%.0f MB')),
1897 (100, 1 << 20, _('%.0f MB')),
1891 (10, 1 << 20, _('%.1f MB')),
1898 (10, 1 << 20, _('%.1f MB')),
1892 (1, 1 << 20, _('%.2f MB')),
1899 (1, 1 << 20, _('%.2f MB')),
1893 (100, 1 << 10, _('%.0f KB')),
1900 (100, 1 << 10, _('%.0f KB')),
1894 (10, 1 << 10, _('%.1f KB')),
1901 (10, 1 << 10, _('%.1f KB')),
1895 (1, 1 << 10, _('%.2f KB')),
1902 (1, 1 << 10, _('%.2f KB')),
1896 (1, 1, _('%.0f bytes')),
1903 (1, 1, _('%.0f bytes')),
1897 )
1904 )
1898
1905
1899 def uirepr(s):
1906 def uirepr(s):
1900 # Avoid double backslash in Windows path repr()
1907 # Avoid double backslash in Windows path repr()
1901 return repr(s).replace('\\\\', '\\')
1908 return repr(s).replace('\\\\', '\\')
1902
1909
1903 # delay import of textwrap
1910 # delay import of textwrap
1904 def MBTextWrapper(**kwargs):
1911 def MBTextWrapper(**kwargs):
1905 class tw(textwrap.TextWrapper):
1912 class tw(textwrap.TextWrapper):
1906 """
1913 """
1907 Extend TextWrapper for width-awareness.
1914 Extend TextWrapper for width-awareness.
1908
1915
1909 Neither number of 'bytes' in any encoding nor 'characters' is
1916 Neither number of 'bytes' in any encoding nor 'characters' is
1910 appropriate to calculate terminal columns for specified string.
1917 appropriate to calculate terminal columns for specified string.
1911
1918
1912 Original TextWrapper implementation uses built-in 'len()' directly,
1919 Original TextWrapper implementation uses built-in 'len()' directly,
1913 so overriding is needed to use width information of each characters.
1920 so overriding is needed to use width information of each characters.
1914
1921
1915 In addition, characters classified into 'ambiguous' width are
1922 In addition, characters classified into 'ambiguous' width are
1916 treated as wide in East Asian area, but as narrow in other.
1923 treated as wide in East Asian area, but as narrow in other.
1917
1924
1918 This requires use decision to determine width of such characters.
1925 This requires use decision to determine width of such characters.
1919 """
1926 """
1920 def _cutdown(self, ucstr, space_left):
1927 def _cutdown(self, ucstr, space_left):
1921 l = 0
1928 l = 0
1922 colwidth = encoding.ucolwidth
1929 colwidth = encoding.ucolwidth
1923 for i in xrange(len(ucstr)):
1930 for i in xrange(len(ucstr)):
1924 l += colwidth(ucstr[i])
1931 l += colwidth(ucstr[i])
1925 if space_left < l:
1932 if space_left < l:
1926 return (ucstr[:i], ucstr[i:])
1933 return (ucstr[:i], ucstr[i:])
1927 return ucstr, ''
1934 return ucstr, ''
1928
1935
1929 # overriding of base class
1936 # overriding of base class
1930 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1937 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1931 space_left = max(width - cur_len, 1)
1938 space_left = max(width - cur_len, 1)
1932
1939
1933 if self.break_long_words:
1940 if self.break_long_words:
1934 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1941 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1935 cur_line.append(cut)
1942 cur_line.append(cut)
1936 reversed_chunks[-1] = res
1943 reversed_chunks[-1] = res
1937 elif not cur_line:
1944 elif not cur_line:
1938 cur_line.append(reversed_chunks.pop())
1945 cur_line.append(reversed_chunks.pop())
1939
1946
1940 # this overriding code is imported from TextWrapper of Python 2.6
1947 # this overriding code is imported from TextWrapper of Python 2.6
1941 # to calculate columns of string by 'encoding.ucolwidth()'
1948 # to calculate columns of string by 'encoding.ucolwidth()'
1942 def _wrap_chunks(self, chunks):
1949 def _wrap_chunks(self, chunks):
1943 colwidth = encoding.ucolwidth
1950 colwidth = encoding.ucolwidth
1944
1951
1945 lines = []
1952 lines = []
1946 if self.width <= 0:
1953 if self.width <= 0:
1947 raise ValueError("invalid width %r (must be > 0)" % self.width)
1954 raise ValueError("invalid width %r (must be > 0)" % self.width)
1948
1955
1949 # Arrange in reverse order so items can be efficiently popped
1956 # Arrange in reverse order so items can be efficiently popped
1950 # from a stack of chucks.
1957 # from a stack of chucks.
1951 chunks.reverse()
1958 chunks.reverse()
1952
1959
1953 while chunks:
1960 while chunks:
1954
1961
1955 # Start the list of chunks that will make up the current line.
1962 # Start the list of chunks that will make up the current line.
1956 # cur_len is just the length of all the chunks in cur_line.
1963 # cur_len is just the length of all the chunks in cur_line.
1957 cur_line = []
1964 cur_line = []
1958 cur_len = 0
1965 cur_len = 0
1959
1966
1960 # Figure out which static string will prefix this line.
1967 # Figure out which static string will prefix this line.
1961 if lines:
1968 if lines:
1962 indent = self.subsequent_indent
1969 indent = self.subsequent_indent
1963 else:
1970 else:
1964 indent = self.initial_indent
1971 indent = self.initial_indent
1965
1972
1966 # Maximum width for this line.
1973 # Maximum width for this line.
1967 width = self.width - len(indent)
1974 width = self.width - len(indent)
1968
1975
1969 # First chunk on line is whitespace -- drop it, unless this
1976 # First chunk on line is whitespace -- drop it, unless this
1970 # is the very beginning of the text (i.e. no lines started yet).
1977 # is the very beginning of the text (i.e. no lines started yet).
1971 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1978 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1972 del chunks[-1]
1979 del chunks[-1]
1973
1980
1974 while chunks:
1981 while chunks:
1975 l = colwidth(chunks[-1])
1982 l = colwidth(chunks[-1])
1976
1983
1977 # Can at least squeeze this chunk onto the current line.
1984 # Can at least squeeze this chunk onto the current line.
1978 if cur_len + l <= width:
1985 if cur_len + l <= width:
1979 cur_line.append(chunks.pop())
1986 cur_line.append(chunks.pop())
1980 cur_len += l
1987 cur_len += l
1981
1988
1982 # Nope, this line is full.
1989 # Nope, this line is full.
1983 else:
1990 else:
1984 break
1991 break
1985
1992
1986 # The current line is full, and the next chunk is too big to
1993 # The current line is full, and the next chunk is too big to
1987 # fit on *any* line (not just this one).
1994 # fit on *any* line (not just this one).
1988 if chunks and colwidth(chunks[-1]) > width:
1995 if chunks and colwidth(chunks[-1]) > width:
1989 self._handle_long_word(chunks, cur_line, cur_len, width)
1996 self._handle_long_word(chunks, cur_line, cur_len, width)
1990
1997
1991 # If the last chunk on this line is all whitespace, drop it.
1998 # If the last chunk on this line is all whitespace, drop it.
1992 if (self.drop_whitespace and
1999 if (self.drop_whitespace and
1993 cur_line and cur_line[-1].strip() == ''):
2000 cur_line and cur_line[-1].strip() == ''):
1994 del cur_line[-1]
2001 del cur_line[-1]
1995
2002
1996 # Convert current line back to a string and store it in list
2003 # Convert current line back to a string and store it in list
1997 # of all lines (return value).
2004 # of all lines (return value).
1998 if cur_line:
2005 if cur_line:
1999 lines.append(indent + ''.join(cur_line))
2006 lines.append(indent + ''.join(cur_line))
2000
2007
2001 return lines
2008 return lines
2002
2009
2003 global MBTextWrapper
2010 global MBTextWrapper
2004 MBTextWrapper = tw
2011 MBTextWrapper = tw
2005 return tw(**kwargs)
2012 return tw(**kwargs)
2006
2013
2007 def wrap(line, width, initindent='', hangindent=''):
2014 def wrap(line, width, initindent='', hangindent=''):
2008 maxindent = max(len(hangindent), len(initindent))
2015 maxindent = max(len(hangindent), len(initindent))
2009 if width <= maxindent:
2016 if width <= maxindent:
2010 # adjust for weird terminal size
2017 # adjust for weird terminal size
2011 width = max(78, maxindent + 1)
2018 width = max(78, maxindent + 1)
2012 line = line.decode(encoding.encoding, encoding.encodingmode)
2019 line = line.decode(encoding.encoding, encoding.encodingmode)
2013 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2020 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2014 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2021 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2015 wrapper = MBTextWrapper(width=width,
2022 wrapper = MBTextWrapper(width=width,
2016 initial_indent=initindent,
2023 initial_indent=initindent,
2017 subsequent_indent=hangindent)
2024 subsequent_indent=hangindent)
2018 return wrapper.fill(line).encode(encoding.encoding)
2025 return wrapper.fill(line).encode(encoding.encoding)
2019
2026
2020 def iterlines(iterator):
2027 def iterlines(iterator):
2021 for chunk in iterator:
2028 for chunk in iterator:
2022 for line in chunk.splitlines():
2029 for line in chunk.splitlines():
2023 yield line
2030 yield line
2024
2031
2025 def expandpath(path):
2032 def expandpath(path):
2026 return os.path.expanduser(os.path.expandvars(path))
2033 return os.path.expanduser(os.path.expandvars(path))
2027
2034
2028 def hgcmd():
2035 def hgcmd():
2029 """Return the command used to execute current hg
2036 """Return the command used to execute current hg
2030
2037
2031 This is different from hgexecutable() because on Windows we want
2038 This is different from hgexecutable() because on Windows we want
2032 to avoid things opening new shell windows like batch files, so we
2039 to avoid things opening new shell windows like batch files, so we
2033 get either the python call or current executable.
2040 get either the python call or current executable.
2034 """
2041 """
2035 if mainfrozen():
2042 if mainfrozen():
2036 if getattr(sys, 'frozen', None) == 'macosx_app':
2043 if getattr(sys, 'frozen', None) == 'macosx_app':
2037 # Env variable set by py2app
2044 # Env variable set by py2app
2038 return [os.environ['EXECUTABLEPATH']]
2045 return [os.environ['EXECUTABLEPATH']]
2039 else:
2046 else:
2040 return [sys.executable]
2047 return [sys.executable]
2041 return gethgcmd()
2048 return gethgcmd()
2042
2049
2043 def rundetached(args, condfn):
2050 def rundetached(args, condfn):
2044 """Execute the argument list in a detached process.
2051 """Execute the argument list in a detached process.
2045
2052
2046 condfn is a callable which is called repeatedly and should return
2053 condfn is a callable which is called repeatedly and should return
2047 True once the child process is known to have started successfully.
2054 True once the child process is known to have started successfully.
2048 At this point, the child process PID is returned. If the child
2055 At this point, the child process PID is returned. If the child
2049 process fails to start or finishes before condfn() evaluates to
2056 process fails to start or finishes before condfn() evaluates to
2050 True, return -1.
2057 True, return -1.
2051 """
2058 """
2052 # Windows case is easier because the child process is either
2059 # Windows case is easier because the child process is either
2053 # successfully starting and validating the condition or exiting
2060 # successfully starting and validating the condition or exiting
2054 # on failure. We just poll on its PID. On Unix, if the child
2061 # on failure. We just poll on its PID. On Unix, if the child
2055 # process fails to start, it will be left in a zombie state until
2062 # process fails to start, it will be left in a zombie state until
2056 # the parent wait on it, which we cannot do since we expect a long
2063 # the parent wait on it, which we cannot do since we expect a long
2057 # running process on success. Instead we listen for SIGCHLD telling
2064 # running process on success. Instead we listen for SIGCHLD telling
2058 # us our child process terminated.
2065 # us our child process terminated.
2059 terminated = set()
2066 terminated = set()
2060 def handler(signum, frame):
2067 def handler(signum, frame):
2061 terminated.add(os.wait())
2068 terminated.add(os.wait())
2062 prevhandler = None
2069 prevhandler = None
2063 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2070 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2064 if SIGCHLD is not None:
2071 if SIGCHLD is not None:
2065 prevhandler = signal.signal(SIGCHLD, handler)
2072 prevhandler = signal.signal(SIGCHLD, handler)
2066 try:
2073 try:
2067 pid = spawndetached(args)
2074 pid = spawndetached(args)
2068 while not condfn():
2075 while not condfn():
2069 if ((pid in terminated or not testpid(pid))
2076 if ((pid in terminated or not testpid(pid))
2070 and not condfn()):
2077 and not condfn()):
2071 return -1
2078 return -1
2072 time.sleep(0.1)
2079 time.sleep(0.1)
2073 return pid
2080 return pid
2074 finally:
2081 finally:
2075 if prevhandler is not None:
2082 if prevhandler is not None:
2076 signal.signal(signal.SIGCHLD, prevhandler)
2083 signal.signal(signal.SIGCHLD, prevhandler)
2077
2084
2078 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2085 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2079 """Return the result of interpolating items in the mapping into string s.
2086 """Return the result of interpolating items in the mapping into string s.
2080
2087
2081 prefix is a single character string, or a two character string with
2088 prefix is a single character string, or a two character string with
2082 a backslash as the first character if the prefix needs to be escaped in
2089 a backslash as the first character if the prefix needs to be escaped in
2083 a regular expression.
2090 a regular expression.
2084
2091
2085 fn is an optional function that will be applied to the replacement text
2092 fn is an optional function that will be applied to the replacement text
2086 just before replacement.
2093 just before replacement.
2087
2094
2088 escape_prefix is an optional flag that allows using doubled prefix for
2095 escape_prefix is an optional flag that allows using doubled prefix for
2089 its escaping.
2096 its escaping.
2090 """
2097 """
2091 fn = fn or (lambda s: s)
2098 fn = fn or (lambda s: s)
2092 patterns = '|'.join(mapping.keys())
2099 patterns = '|'.join(mapping.keys())
2093 if escape_prefix:
2100 if escape_prefix:
2094 patterns += '|' + prefix
2101 patterns += '|' + prefix
2095 if len(prefix) > 1:
2102 if len(prefix) > 1:
2096 prefix_char = prefix[1:]
2103 prefix_char = prefix[1:]
2097 else:
2104 else:
2098 prefix_char = prefix
2105 prefix_char = prefix
2099 mapping[prefix_char] = prefix_char
2106 mapping[prefix_char] = prefix_char
2100 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2107 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2101 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2108 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2102
2109
2103 def getport(port):
2110 def getport(port):
2104 """Return the port for a given network service.
2111 """Return the port for a given network service.
2105
2112
2106 If port is an integer, it's returned as is. If it's a string, it's
2113 If port is an integer, it's returned as is. If it's a string, it's
2107 looked up using socket.getservbyname(). If there's no matching
2114 looked up using socket.getservbyname(). If there's no matching
2108 service, error.Abort is raised.
2115 service, error.Abort is raised.
2109 """
2116 """
2110 try:
2117 try:
2111 return int(port)
2118 return int(port)
2112 except ValueError:
2119 except ValueError:
2113 pass
2120 pass
2114
2121
2115 try:
2122 try:
2116 return socket.getservbyname(port)
2123 return socket.getservbyname(port)
2117 except socket.error:
2124 except socket.error:
2118 raise Abort(_("no port number associated with service '%s'") % port)
2125 raise Abort(_("no port number associated with service '%s'") % port)
2119
2126
2120 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2127 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2121 '0': False, 'no': False, 'false': False, 'off': False,
2128 '0': False, 'no': False, 'false': False, 'off': False,
2122 'never': False}
2129 'never': False}
2123
2130
2124 def parsebool(s):
2131 def parsebool(s):
2125 """Parse s into a boolean.
2132 """Parse s into a boolean.
2126
2133
2127 If s is not a valid boolean, returns None.
2134 If s is not a valid boolean, returns None.
2128 """
2135 """
2129 return _booleans.get(s.lower(), None)
2136 return _booleans.get(s.lower(), None)
2130
2137
2131 _hexdig = '0123456789ABCDEFabcdef'
2138 _hexdig = '0123456789ABCDEFabcdef'
2132 _hextochr = dict((a + b, chr(int(a + b, 16)))
2139 _hextochr = dict((a + b, chr(int(a + b, 16)))
2133 for a in _hexdig for b in _hexdig)
2140 for a in _hexdig for b in _hexdig)
2134
2141
2135 def _urlunquote(s):
2142 def _urlunquote(s):
2136 """Decode HTTP/HTML % encoding.
2143 """Decode HTTP/HTML % encoding.
2137
2144
2138 >>> _urlunquote('abc%20def')
2145 >>> _urlunquote('abc%20def')
2139 'abc def'
2146 'abc def'
2140 """
2147 """
2141 res = s.split('%')
2148 res = s.split('%')
2142 # fastpath
2149 # fastpath
2143 if len(res) == 1:
2150 if len(res) == 1:
2144 return s
2151 return s
2145 s = res[0]
2152 s = res[0]
2146 for item in res[1:]:
2153 for item in res[1:]:
2147 try:
2154 try:
2148 s += _hextochr[item[:2]] + item[2:]
2155 s += _hextochr[item[:2]] + item[2:]
2149 except KeyError:
2156 except KeyError:
2150 s += '%' + item
2157 s += '%' + item
2151 except UnicodeDecodeError:
2158 except UnicodeDecodeError:
2152 s += unichr(int(item[:2], 16)) + item[2:]
2159 s += unichr(int(item[:2], 16)) + item[2:]
2153 return s
2160 return s
2154
2161
2155 class url(object):
2162 class url(object):
2156 r"""Reliable URL parser.
2163 r"""Reliable URL parser.
2157
2164
2158 This parses URLs and provides attributes for the following
2165 This parses URLs and provides attributes for the following
2159 components:
2166 components:
2160
2167
2161 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2168 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2162
2169
2163 Missing components are set to None. The only exception is
2170 Missing components are set to None. The only exception is
2164 fragment, which is set to '' if present but empty.
2171 fragment, which is set to '' if present but empty.
2165
2172
2166 If parsefragment is False, fragment is included in query. If
2173 If parsefragment is False, fragment is included in query. If
2167 parsequery is False, query is included in path. If both are
2174 parsequery is False, query is included in path. If both are
2168 False, both fragment and query are included in path.
2175 False, both fragment and query are included in path.
2169
2176
2170 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2177 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2171
2178
2172 Note that for backward compatibility reasons, bundle URLs do not
2179 Note that for backward compatibility reasons, bundle URLs do not
2173 take host names. That means 'bundle://../' has a path of '../'.
2180 take host names. That means 'bundle://../' has a path of '../'.
2174
2181
2175 Examples:
2182 Examples:
2176
2183
2177 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2184 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2178 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2185 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2179 >>> url('ssh://[::1]:2200//home/joe/repo')
2186 >>> url('ssh://[::1]:2200//home/joe/repo')
2180 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2187 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2181 >>> url('file:///home/joe/repo')
2188 >>> url('file:///home/joe/repo')
2182 <url scheme: 'file', path: '/home/joe/repo'>
2189 <url scheme: 'file', path: '/home/joe/repo'>
2183 >>> url('file:///c:/temp/foo/')
2190 >>> url('file:///c:/temp/foo/')
2184 <url scheme: 'file', path: 'c:/temp/foo/'>
2191 <url scheme: 'file', path: 'c:/temp/foo/'>
2185 >>> url('bundle:foo')
2192 >>> url('bundle:foo')
2186 <url scheme: 'bundle', path: 'foo'>
2193 <url scheme: 'bundle', path: 'foo'>
2187 >>> url('bundle://../foo')
2194 >>> url('bundle://../foo')
2188 <url scheme: 'bundle', path: '../foo'>
2195 <url scheme: 'bundle', path: '../foo'>
2189 >>> url(r'c:\foo\bar')
2196 >>> url(r'c:\foo\bar')
2190 <url path: 'c:\\foo\\bar'>
2197 <url path: 'c:\\foo\\bar'>
2191 >>> url(r'\\blah\blah\blah')
2198 >>> url(r'\\blah\blah\blah')
2192 <url path: '\\\\blah\\blah\\blah'>
2199 <url path: '\\\\blah\\blah\\blah'>
2193 >>> url(r'\\blah\blah\blah#baz')
2200 >>> url(r'\\blah\blah\blah#baz')
2194 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2201 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2195 >>> url(r'file:///C:\users\me')
2202 >>> url(r'file:///C:\users\me')
2196 <url scheme: 'file', path: 'C:\\users\\me'>
2203 <url scheme: 'file', path: 'C:\\users\\me'>
2197
2204
2198 Authentication credentials:
2205 Authentication credentials:
2199
2206
2200 >>> url('ssh://joe:xyz@x/repo')
2207 >>> url('ssh://joe:xyz@x/repo')
2201 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2208 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2202 >>> url('ssh://joe@x/repo')
2209 >>> url('ssh://joe@x/repo')
2203 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2210 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2204
2211
2205 Query strings and fragments:
2212 Query strings and fragments:
2206
2213
2207 >>> url('http://host/a?b#c')
2214 >>> url('http://host/a?b#c')
2208 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2215 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2209 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2216 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2210 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2217 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2211 """
2218 """
2212
2219
2213 _safechars = "!~*'()+"
2220 _safechars = "!~*'()+"
2214 _safepchars = "/!~*'()+:\\"
2221 _safepchars = "/!~*'()+:\\"
2215 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2222 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2216
2223
2217 def __init__(self, path, parsequery=True, parsefragment=True):
2224 def __init__(self, path, parsequery=True, parsefragment=True):
2218 # We slowly chomp away at path until we have only the path left
2225 # We slowly chomp away at path until we have only the path left
2219 self.scheme = self.user = self.passwd = self.host = None
2226 self.scheme = self.user = self.passwd = self.host = None
2220 self.port = self.path = self.query = self.fragment = None
2227 self.port = self.path = self.query = self.fragment = None
2221 self._localpath = True
2228 self._localpath = True
2222 self._hostport = ''
2229 self._hostport = ''
2223 self._origpath = path
2230 self._origpath = path
2224
2231
2225 if parsefragment and '#' in path:
2232 if parsefragment and '#' in path:
2226 path, self.fragment = path.split('#', 1)
2233 path, self.fragment = path.split('#', 1)
2227 if not path:
2234 if not path:
2228 path = None
2235 path = None
2229
2236
2230 # special case for Windows drive letters and UNC paths
2237 # special case for Windows drive letters and UNC paths
2231 if hasdriveletter(path) or path.startswith(r'\\'):
2238 if hasdriveletter(path) or path.startswith(r'\\'):
2232 self.path = path
2239 self.path = path
2233 return
2240 return
2234
2241
2235 # For compatibility reasons, we can't handle bundle paths as
2242 # For compatibility reasons, we can't handle bundle paths as
2236 # normal URLS
2243 # normal URLS
2237 if path.startswith('bundle:'):
2244 if path.startswith('bundle:'):
2238 self.scheme = 'bundle'
2245 self.scheme = 'bundle'
2239 path = path[7:]
2246 path = path[7:]
2240 if path.startswith('//'):
2247 if path.startswith('//'):
2241 path = path[2:]
2248 path = path[2:]
2242 self.path = path
2249 self.path = path
2243 return
2250 return
2244
2251
2245 if self._matchscheme(path):
2252 if self._matchscheme(path):
2246 parts = path.split(':', 1)
2253 parts = path.split(':', 1)
2247 if parts[0]:
2254 if parts[0]:
2248 self.scheme, path = parts
2255 self.scheme, path = parts
2249 self._localpath = False
2256 self._localpath = False
2250
2257
2251 if not path:
2258 if not path:
2252 path = None
2259 path = None
2253 if self._localpath:
2260 if self._localpath:
2254 self.path = ''
2261 self.path = ''
2255 return
2262 return
2256 else:
2263 else:
2257 if self._localpath:
2264 if self._localpath:
2258 self.path = path
2265 self.path = path
2259 return
2266 return
2260
2267
2261 if parsequery and '?' in path:
2268 if parsequery and '?' in path:
2262 path, self.query = path.split('?', 1)
2269 path, self.query = path.split('?', 1)
2263 if not path:
2270 if not path:
2264 path = None
2271 path = None
2265 if not self.query:
2272 if not self.query:
2266 self.query = None
2273 self.query = None
2267
2274
2268 # // is required to specify a host/authority
2275 # // is required to specify a host/authority
2269 if path and path.startswith('//'):
2276 if path and path.startswith('//'):
2270 parts = path[2:].split('/', 1)
2277 parts = path[2:].split('/', 1)
2271 if len(parts) > 1:
2278 if len(parts) > 1:
2272 self.host, path = parts
2279 self.host, path = parts
2273 else:
2280 else:
2274 self.host = parts[0]
2281 self.host = parts[0]
2275 path = None
2282 path = None
2276 if not self.host:
2283 if not self.host:
2277 self.host = None
2284 self.host = None
2278 # path of file:///d is /d
2285 # path of file:///d is /d
2279 # path of file:///d:/ is d:/, not /d:/
2286 # path of file:///d:/ is d:/, not /d:/
2280 if path and not hasdriveletter(path):
2287 if path and not hasdriveletter(path):
2281 path = '/' + path
2288 path = '/' + path
2282
2289
2283 if self.host and '@' in self.host:
2290 if self.host and '@' in self.host:
2284 self.user, self.host = self.host.rsplit('@', 1)
2291 self.user, self.host = self.host.rsplit('@', 1)
2285 if ':' in self.user:
2292 if ':' in self.user:
2286 self.user, self.passwd = self.user.split(':', 1)
2293 self.user, self.passwd = self.user.split(':', 1)
2287 if not self.host:
2294 if not self.host:
2288 self.host = None
2295 self.host = None
2289
2296
2290 # Don't split on colons in IPv6 addresses without ports
2297 # Don't split on colons in IPv6 addresses without ports
2291 if (self.host and ':' in self.host and
2298 if (self.host and ':' in self.host and
2292 not (self.host.startswith('[') and self.host.endswith(']'))):
2299 not (self.host.startswith('[') and self.host.endswith(']'))):
2293 self._hostport = self.host
2300 self._hostport = self.host
2294 self.host, self.port = self.host.rsplit(':', 1)
2301 self.host, self.port = self.host.rsplit(':', 1)
2295 if not self.host:
2302 if not self.host:
2296 self.host = None
2303 self.host = None
2297
2304
2298 if (self.host and self.scheme == 'file' and
2305 if (self.host and self.scheme == 'file' and
2299 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2306 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2300 raise Abort(_('file:// URLs can only refer to localhost'))
2307 raise Abort(_('file:// URLs can only refer to localhost'))
2301
2308
2302 self.path = path
2309 self.path = path
2303
2310
2304 # leave the query string escaped
2311 # leave the query string escaped
2305 for a in ('user', 'passwd', 'host', 'port',
2312 for a in ('user', 'passwd', 'host', 'port',
2306 'path', 'fragment'):
2313 'path', 'fragment'):
2307 v = getattr(self, a)
2314 v = getattr(self, a)
2308 if v is not None:
2315 if v is not None:
2309 setattr(self, a, _urlunquote(v))
2316 setattr(self, a, _urlunquote(v))
2310
2317
2311 def __repr__(self):
2318 def __repr__(self):
2312 attrs = []
2319 attrs = []
2313 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2320 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2314 'query', 'fragment'):
2321 'query', 'fragment'):
2315 v = getattr(self, a)
2322 v = getattr(self, a)
2316 if v is not None:
2323 if v is not None:
2317 attrs.append('%s: %r' % (a, v))
2324 attrs.append('%s: %r' % (a, v))
2318 return '<url %s>' % ', '.join(attrs)
2325 return '<url %s>' % ', '.join(attrs)
2319
2326
2320 def __str__(self):
2327 def __str__(self):
2321 r"""Join the URL's components back into a URL string.
2328 r"""Join the URL's components back into a URL string.
2322
2329
2323 Examples:
2330 Examples:
2324
2331
2325 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2332 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2326 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2333 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2327 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2334 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2328 'http://user:pw@host:80/?foo=bar&baz=42'
2335 'http://user:pw@host:80/?foo=bar&baz=42'
2329 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2336 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2330 'http://user:pw@host:80/?foo=bar%3dbaz'
2337 'http://user:pw@host:80/?foo=bar%3dbaz'
2331 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2338 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2332 'ssh://user:pw@[::1]:2200//home/joe#'
2339 'ssh://user:pw@[::1]:2200//home/joe#'
2333 >>> str(url('http://localhost:80//'))
2340 >>> str(url('http://localhost:80//'))
2334 'http://localhost:80//'
2341 'http://localhost:80//'
2335 >>> str(url('http://localhost:80/'))
2342 >>> str(url('http://localhost:80/'))
2336 'http://localhost:80/'
2343 'http://localhost:80/'
2337 >>> str(url('http://localhost:80'))
2344 >>> str(url('http://localhost:80'))
2338 'http://localhost:80/'
2345 'http://localhost:80/'
2339 >>> str(url('bundle:foo'))
2346 >>> str(url('bundle:foo'))
2340 'bundle:foo'
2347 'bundle:foo'
2341 >>> str(url('bundle://../foo'))
2348 >>> str(url('bundle://../foo'))
2342 'bundle:../foo'
2349 'bundle:../foo'
2343 >>> str(url('path'))
2350 >>> str(url('path'))
2344 'path'
2351 'path'
2345 >>> str(url('file:///tmp/foo/bar'))
2352 >>> str(url('file:///tmp/foo/bar'))
2346 'file:///tmp/foo/bar'
2353 'file:///tmp/foo/bar'
2347 >>> str(url('file:///c:/tmp/foo/bar'))
2354 >>> str(url('file:///c:/tmp/foo/bar'))
2348 'file:///c:/tmp/foo/bar'
2355 'file:///c:/tmp/foo/bar'
2349 >>> print url(r'bundle:foo\bar')
2356 >>> print url(r'bundle:foo\bar')
2350 bundle:foo\bar
2357 bundle:foo\bar
2351 >>> print url(r'file:///D:\data\hg')
2358 >>> print url(r'file:///D:\data\hg')
2352 file:///D:\data\hg
2359 file:///D:\data\hg
2353 """
2360 """
2354 if self._localpath:
2361 if self._localpath:
2355 s = self.path
2362 s = self.path
2356 if self.scheme == 'bundle':
2363 if self.scheme == 'bundle':
2357 s = 'bundle:' + s
2364 s = 'bundle:' + s
2358 if self.fragment:
2365 if self.fragment:
2359 s += '#' + self.fragment
2366 s += '#' + self.fragment
2360 return s
2367 return s
2361
2368
2362 s = self.scheme + ':'
2369 s = self.scheme + ':'
2363 if self.user or self.passwd or self.host:
2370 if self.user or self.passwd or self.host:
2364 s += '//'
2371 s += '//'
2365 elif self.scheme and (not self.path or self.path.startswith('/')
2372 elif self.scheme and (not self.path or self.path.startswith('/')
2366 or hasdriveletter(self.path)):
2373 or hasdriveletter(self.path)):
2367 s += '//'
2374 s += '//'
2368 if hasdriveletter(self.path):
2375 if hasdriveletter(self.path):
2369 s += '/'
2376 s += '/'
2370 if self.user:
2377 if self.user:
2371 s += urllib.quote(self.user, safe=self._safechars)
2378 s += urllib.quote(self.user, safe=self._safechars)
2372 if self.passwd:
2379 if self.passwd:
2373 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2380 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2374 if self.user or self.passwd:
2381 if self.user or self.passwd:
2375 s += '@'
2382 s += '@'
2376 if self.host:
2383 if self.host:
2377 if not (self.host.startswith('[') and self.host.endswith(']')):
2384 if not (self.host.startswith('[') and self.host.endswith(']')):
2378 s += urllib.quote(self.host)
2385 s += urllib.quote(self.host)
2379 else:
2386 else:
2380 s += self.host
2387 s += self.host
2381 if self.port:
2388 if self.port:
2382 s += ':' + urllib.quote(self.port)
2389 s += ':' + urllib.quote(self.port)
2383 if self.host:
2390 if self.host:
2384 s += '/'
2391 s += '/'
2385 if self.path:
2392 if self.path:
2386 # TODO: similar to the query string, we should not unescape the
2393 # TODO: similar to the query string, we should not unescape the
2387 # path when we store it, the path might contain '%2f' = '/',
2394 # path when we store it, the path might contain '%2f' = '/',
2388 # which we should *not* escape.
2395 # which we should *not* escape.
2389 s += urllib.quote(self.path, safe=self._safepchars)
2396 s += urllib.quote(self.path, safe=self._safepchars)
2390 if self.query:
2397 if self.query:
2391 # we store the query in escaped form.
2398 # we store the query in escaped form.
2392 s += '?' + self.query
2399 s += '?' + self.query
2393 if self.fragment is not None:
2400 if self.fragment is not None:
2394 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2401 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2395 return s
2402 return s
2396
2403
2397 def authinfo(self):
2404 def authinfo(self):
2398 user, passwd = self.user, self.passwd
2405 user, passwd = self.user, self.passwd
2399 try:
2406 try:
2400 self.user, self.passwd = None, None
2407 self.user, self.passwd = None, None
2401 s = str(self)
2408 s = str(self)
2402 finally:
2409 finally:
2403 self.user, self.passwd = user, passwd
2410 self.user, self.passwd = user, passwd
2404 if not self.user:
2411 if not self.user:
2405 return (s, None)
2412 return (s, None)
2406 # authinfo[1] is passed to urllib2 password manager, and its
2413 # authinfo[1] is passed to urllib2 password manager, and its
2407 # URIs must not contain credentials. The host is passed in the
2414 # URIs must not contain credentials. The host is passed in the
2408 # URIs list because Python < 2.4.3 uses only that to search for
2415 # URIs list because Python < 2.4.3 uses only that to search for
2409 # a password.
2416 # a password.
2410 return (s, (None, (s, self.host),
2417 return (s, (None, (s, self.host),
2411 self.user, self.passwd or ''))
2418 self.user, self.passwd or ''))
2412
2419
2413 def isabs(self):
2420 def isabs(self):
2414 if self.scheme and self.scheme != 'file':
2421 if self.scheme and self.scheme != 'file':
2415 return True # remote URL
2422 return True # remote URL
2416 if hasdriveletter(self.path):
2423 if hasdriveletter(self.path):
2417 return True # absolute for our purposes - can't be joined()
2424 return True # absolute for our purposes - can't be joined()
2418 if self.path.startswith(r'\\'):
2425 if self.path.startswith(r'\\'):
2419 return True # Windows UNC path
2426 return True # Windows UNC path
2420 if self.path.startswith('/'):
2427 if self.path.startswith('/'):
2421 return True # POSIX-style
2428 return True # POSIX-style
2422 return False
2429 return False
2423
2430
2424 def localpath(self):
2431 def localpath(self):
2425 if self.scheme == 'file' or self.scheme == 'bundle':
2432 if self.scheme == 'file' or self.scheme == 'bundle':
2426 path = self.path or '/'
2433 path = self.path or '/'
2427 # For Windows, we need to promote hosts containing drive
2434 # For Windows, we need to promote hosts containing drive
2428 # letters to paths with drive letters.
2435 # letters to paths with drive letters.
2429 if hasdriveletter(self._hostport):
2436 if hasdriveletter(self._hostport):
2430 path = self._hostport + '/' + self.path
2437 path = self._hostport + '/' + self.path
2431 elif (self.host is not None and self.path
2438 elif (self.host is not None and self.path
2432 and not hasdriveletter(path)):
2439 and not hasdriveletter(path)):
2433 path = '/' + path
2440 path = '/' + path
2434 return path
2441 return path
2435 return self._origpath
2442 return self._origpath
2436
2443
2437 def islocal(self):
2444 def islocal(self):
2438 '''whether localpath will return something that posixfile can open'''
2445 '''whether localpath will return something that posixfile can open'''
2439 return (not self.scheme or self.scheme == 'file'
2446 return (not self.scheme or self.scheme == 'file'
2440 or self.scheme == 'bundle')
2447 or self.scheme == 'bundle')
2441
2448
2442 def hasscheme(path):
2449 def hasscheme(path):
2443 return bool(url(path).scheme)
2450 return bool(url(path).scheme)
2444
2451
2445 def hasdriveletter(path):
2452 def hasdriveletter(path):
2446 return path and path[1:2] == ':' and path[0:1].isalpha()
2453 return path and path[1:2] == ':' and path[0:1].isalpha()
2447
2454
2448 def urllocalpath(path):
2455 def urllocalpath(path):
2449 return url(path, parsequery=False, parsefragment=False).localpath()
2456 return url(path, parsequery=False, parsefragment=False).localpath()
2450
2457
2451 def hidepassword(u):
2458 def hidepassword(u):
2452 '''hide user credential in a url string'''
2459 '''hide user credential in a url string'''
2453 u = url(u)
2460 u = url(u)
2454 if u.passwd:
2461 if u.passwd:
2455 u.passwd = '***'
2462 u.passwd = '***'
2456 return str(u)
2463 return str(u)
2457
2464
2458 def removeauth(u):
2465 def removeauth(u):
2459 '''remove all authentication information from a url string'''
2466 '''remove all authentication information from a url string'''
2460 u = url(u)
2467 u = url(u)
2461 u.user = u.passwd = None
2468 u.user = u.passwd = None
2462 return str(u)
2469 return str(u)
2463
2470
2464 def isatty(fp):
2471 def isatty(fp):
2465 try:
2472 try:
2466 return fp.isatty()
2473 return fp.isatty()
2467 except AttributeError:
2474 except AttributeError:
2468 return False
2475 return False
2469
2476
2470 timecount = unitcountfn(
2477 timecount = unitcountfn(
2471 (1, 1e3, _('%.0f s')),
2478 (1, 1e3, _('%.0f s')),
2472 (100, 1, _('%.1f s')),
2479 (100, 1, _('%.1f s')),
2473 (10, 1, _('%.2f s')),
2480 (10, 1, _('%.2f s')),
2474 (1, 1, _('%.3f s')),
2481 (1, 1, _('%.3f s')),
2475 (100, 0.001, _('%.1f ms')),
2482 (100, 0.001, _('%.1f ms')),
2476 (10, 0.001, _('%.2f ms')),
2483 (10, 0.001, _('%.2f ms')),
2477 (1, 0.001, _('%.3f ms')),
2484 (1, 0.001, _('%.3f ms')),
2478 (100, 0.000001, _('%.1f us')),
2485 (100, 0.000001, _('%.1f us')),
2479 (10, 0.000001, _('%.2f us')),
2486 (10, 0.000001, _('%.2f us')),
2480 (1, 0.000001, _('%.3f us')),
2487 (1, 0.000001, _('%.3f us')),
2481 (100, 0.000000001, _('%.1f ns')),
2488 (100, 0.000000001, _('%.1f ns')),
2482 (10, 0.000000001, _('%.2f ns')),
2489 (10, 0.000000001, _('%.2f ns')),
2483 (1, 0.000000001, _('%.3f ns')),
2490 (1, 0.000000001, _('%.3f ns')),
2484 )
2491 )
2485
2492
2486 _timenesting = [0]
2493 _timenesting = [0]
2487
2494
2488 def timed(func):
2495 def timed(func):
2489 '''Report the execution time of a function call to stderr.
2496 '''Report the execution time of a function call to stderr.
2490
2497
2491 During development, use as a decorator when you need to measure
2498 During development, use as a decorator when you need to measure
2492 the cost of a function, e.g. as follows:
2499 the cost of a function, e.g. as follows:
2493
2500
2494 @util.timed
2501 @util.timed
2495 def foo(a, b, c):
2502 def foo(a, b, c):
2496 pass
2503 pass
2497 '''
2504 '''
2498
2505
2499 def wrapper(*args, **kwargs):
2506 def wrapper(*args, **kwargs):
2500 start = time.time()
2507 start = time.time()
2501 indent = 2
2508 indent = 2
2502 _timenesting[0] += indent
2509 _timenesting[0] += indent
2503 try:
2510 try:
2504 return func(*args, **kwargs)
2511 return func(*args, **kwargs)
2505 finally:
2512 finally:
2506 elapsed = time.time() - start
2513 elapsed = time.time() - start
2507 _timenesting[0] -= indent
2514 _timenesting[0] -= indent
2508 sys.stderr.write('%s%s: %s\n' %
2515 sys.stderr.write('%s%s: %s\n' %
2509 (' ' * _timenesting[0], func.__name__,
2516 (' ' * _timenesting[0], func.__name__,
2510 timecount(elapsed)))
2517 timecount(elapsed)))
2511 return wrapper
2518 return wrapper
2512
2519
2513 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2520 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2514 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2521 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2515
2522
2516 def sizetoint(s):
2523 def sizetoint(s):
2517 '''Convert a space specifier to a byte count.
2524 '''Convert a space specifier to a byte count.
2518
2525
2519 >>> sizetoint('30')
2526 >>> sizetoint('30')
2520 30
2527 30
2521 >>> sizetoint('2.2kb')
2528 >>> sizetoint('2.2kb')
2522 2252
2529 2252
2523 >>> sizetoint('6M')
2530 >>> sizetoint('6M')
2524 6291456
2531 6291456
2525 '''
2532 '''
2526 t = s.strip().lower()
2533 t = s.strip().lower()
2527 try:
2534 try:
2528 for k, u in _sizeunits:
2535 for k, u in _sizeunits:
2529 if t.endswith(k):
2536 if t.endswith(k):
2530 return int(float(t[:-len(k)]) * u)
2537 return int(float(t[:-len(k)]) * u)
2531 return int(t)
2538 return int(t)
2532 except ValueError:
2539 except ValueError:
2533 raise error.ParseError(_("couldn't parse size: %s") % s)
2540 raise error.ParseError(_("couldn't parse size: %s") % s)
2534
2541
2535 class hooks(object):
2542 class hooks(object):
2536 '''A collection of hook functions that can be used to extend a
2543 '''A collection of hook functions that can be used to extend a
2537 function's behavior. Hooks are called in lexicographic order,
2544 function's behavior. Hooks are called in lexicographic order,
2538 based on the names of their sources.'''
2545 based on the names of their sources.'''
2539
2546
2540 def __init__(self):
2547 def __init__(self):
2541 self._hooks = []
2548 self._hooks = []
2542
2549
2543 def add(self, source, hook):
2550 def add(self, source, hook):
2544 self._hooks.append((source, hook))
2551 self._hooks.append((source, hook))
2545
2552
2546 def __call__(self, *args):
2553 def __call__(self, *args):
2547 self._hooks.sort(key=lambda x: x[0])
2554 self._hooks.sort(key=lambda x: x[0])
2548 results = []
2555 results = []
2549 for source, hook in self._hooks:
2556 for source, hook in self._hooks:
2550 results.append(hook(*args))
2557 results.append(hook(*args))
2551 return results
2558 return results
2552
2559
2553 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2560 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2554 '''Yields lines for a nicely formatted stacktrace.
2561 '''Yields lines for a nicely formatted stacktrace.
2555 Skips the 'skip' last entries.
2562 Skips the 'skip' last entries.
2556 Each file+linenumber is formatted according to fileline.
2563 Each file+linenumber is formatted according to fileline.
2557 Each line is formatted according to line.
2564 Each line is formatted according to line.
2558 If line is None, it yields:
2565 If line is None, it yields:
2559 length of longest filepath+line number,
2566 length of longest filepath+line number,
2560 filepath+linenumber,
2567 filepath+linenumber,
2561 function
2568 function
2562
2569
2563 Not be used in production code but very convenient while developing.
2570 Not be used in production code but very convenient while developing.
2564 '''
2571 '''
2565 entries = [(fileline % (fn, ln), func)
2572 entries = [(fileline % (fn, ln), func)
2566 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2573 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2567 if entries:
2574 if entries:
2568 fnmax = max(len(entry[0]) for entry in entries)
2575 fnmax = max(len(entry[0]) for entry in entries)
2569 for fnln, func in entries:
2576 for fnln, func in entries:
2570 if line is None:
2577 if line is None:
2571 yield (fnmax, fnln, func)
2578 yield (fnmax, fnln, func)
2572 else:
2579 else:
2573 yield line % (fnmax, fnln, func)
2580 yield line % (fnmax, fnln, func)
2574
2581
2575 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2582 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2576 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2583 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2577 Skips the 'skip' last entries. By default it will flush stdout first.
2584 Skips the 'skip' last entries. By default it will flush stdout first.
2578 It can be used everywhere and intentionally does not require an ui object.
2585 It can be used everywhere and intentionally does not require an ui object.
2579 Not be used in production code but very convenient while developing.
2586 Not be used in production code but very convenient while developing.
2580 '''
2587 '''
2581 if otherf:
2588 if otherf:
2582 otherf.flush()
2589 otherf.flush()
2583 f.write('%s at:\n' % msg)
2590 f.write('%s at:\n' % msg)
2584 for line in getstackframes(skip + 1):
2591 for line in getstackframes(skip + 1):
2585 f.write(line)
2592 f.write(line)
2586 f.flush()
2593 f.flush()
2587
2594
2588 class dirs(object):
2595 class dirs(object):
2589 '''a multiset of directory names from a dirstate or manifest'''
2596 '''a multiset of directory names from a dirstate or manifest'''
2590
2597
2591 def __init__(self, map, skip=None):
2598 def __init__(self, map, skip=None):
2592 self._dirs = {}
2599 self._dirs = {}
2593 addpath = self.addpath
2600 addpath = self.addpath
2594 if safehasattr(map, 'iteritems') and skip is not None:
2601 if safehasattr(map, 'iteritems') and skip is not None:
2595 for f, s in map.iteritems():
2602 for f, s in map.iteritems():
2596 if s[0] != skip:
2603 if s[0] != skip:
2597 addpath(f)
2604 addpath(f)
2598 else:
2605 else:
2599 for f in map:
2606 for f in map:
2600 addpath(f)
2607 addpath(f)
2601
2608
2602 def addpath(self, path):
2609 def addpath(self, path):
2603 dirs = self._dirs
2610 dirs = self._dirs
2604 for base in finddirs(path):
2611 for base in finddirs(path):
2605 if base in dirs:
2612 if base in dirs:
2606 dirs[base] += 1
2613 dirs[base] += 1
2607 return
2614 return
2608 dirs[base] = 1
2615 dirs[base] = 1
2609
2616
2610 def delpath(self, path):
2617 def delpath(self, path):
2611 dirs = self._dirs
2618 dirs = self._dirs
2612 for base in finddirs(path):
2619 for base in finddirs(path):
2613 if dirs[base] > 1:
2620 if dirs[base] > 1:
2614 dirs[base] -= 1
2621 dirs[base] -= 1
2615 return
2622 return
2616 del dirs[base]
2623 del dirs[base]
2617
2624
2618 def __iter__(self):
2625 def __iter__(self):
2619 return self._dirs.iterkeys()
2626 return self._dirs.iterkeys()
2620
2627
2621 def __contains__(self, d):
2628 def __contains__(self, d):
2622 return d in self._dirs
2629 return d in self._dirs
2623
2630
2624 if safehasattr(parsers, 'dirs'):
2631 if safehasattr(parsers, 'dirs'):
2625 dirs = parsers.dirs
2632 dirs = parsers.dirs
2626
2633
2627 def finddirs(path):
2634 def finddirs(path):
2628 pos = path.rfind('/')
2635 pos = path.rfind('/')
2629 while pos != -1:
2636 while pos != -1:
2630 yield path[:pos]
2637 yield path[:pos]
2631 pos = path.rfind('/', 0, pos)
2638 pos = path.rfind('/', 0, pos)
2632
2639
2633 # compression utility
2640 # compression utility
2634
2641
2635 class nocompress(object):
2642 class nocompress(object):
2636 def compress(self, x):
2643 def compress(self, x):
2637 return x
2644 return x
2638 def flush(self):
2645 def flush(self):
2639 return ""
2646 return ""
2640
2647
2641 compressors = {
2648 compressors = {
2642 None: nocompress,
2649 None: nocompress,
2643 # lambda to prevent early import
2650 # lambda to prevent early import
2644 'BZ': lambda: bz2.BZ2Compressor(),
2651 'BZ': lambda: bz2.BZ2Compressor(),
2645 'GZ': lambda: zlib.compressobj(),
2652 'GZ': lambda: zlib.compressobj(),
2646 }
2653 }
2647 # also support the old form by courtesies
2654 # also support the old form by courtesies
2648 compressors['UN'] = compressors[None]
2655 compressors['UN'] = compressors[None]
2649
2656
2650 def _makedecompressor(decompcls):
2657 def _makedecompressor(decompcls):
2651 def generator(f):
2658 def generator(f):
2652 d = decompcls()
2659 d = decompcls()
2653 for chunk in filechunkiter(f):
2660 for chunk in filechunkiter(f):
2654 yield d.decompress(chunk)
2661 yield d.decompress(chunk)
2655 def func(fh):
2662 def func(fh):
2656 return chunkbuffer(generator(fh))
2663 return chunkbuffer(generator(fh))
2657 return func
2664 return func
2658
2665
2659 class ctxmanager(object):
2666 class ctxmanager(object):
2660 '''A context manager for use in 'with' blocks to allow multiple
2667 '''A context manager for use in 'with' blocks to allow multiple
2661 contexts to be entered at once. This is both safer and more
2668 contexts to be entered at once. This is both safer and more
2662 flexible than contextlib.nested.
2669 flexible than contextlib.nested.
2663
2670
2664 Once Mercurial supports Python 2.7+, this will become mostly
2671 Once Mercurial supports Python 2.7+, this will become mostly
2665 unnecessary.
2672 unnecessary.
2666 '''
2673 '''
2667
2674
2668 def __init__(self, *args):
2675 def __init__(self, *args):
2669 '''Accepts a list of no-argument functions that return context
2676 '''Accepts a list of no-argument functions that return context
2670 managers. These will be invoked at __call__ time.'''
2677 managers. These will be invoked at __call__ time.'''
2671 self._pending = args
2678 self._pending = args
2672 self._atexit = []
2679 self._atexit = []
2673
2680
2674 def __enter__(self):
2681 def __enter__(self):
2675 return self
2682 return self
2676
2683
2677 def enter(self):
2684 def enter(self):
2678 '''Create and enter context managers in the order in which they were
2685 '''Create and enter context managers in the order in which they were
2679 passed to the constructor.'''
2686 passed to the constructor.'''
2680 values = []
2687 values = []
2681 for func in self._pending:
2688 for func in self._pending:
2682 obj = func()
2689 obj = func()
2683 values.append(obj.__enter__())
2690 values.append(obj.__enter__())
2684 self._atexit.append(obj.__exit__)
2691 self._atexit.append(obj.__exit__)
2685 del self._pending
2692 del self._pending
2686 return values
2693 return values
2687
2694
2688 def atexit(self, func, *args, **kwargs):
2695 def atexit(self, func, *args, **kwargs):
2689 '''Add a function to call when this context manager exits. The
2696 '''Add a function to call when this context manager exits. The
2690 ordering of multiple atexit calls is unspecified, save that
2697 ordering of multiple atexit calls is unspecified, save that
2691 they will happen before any __exit__ functions.'''
2698 they will happen before any __exit__ functions.'''
2692 def wrapper(exc_type, exc_val, exc_tb):
2699 def wrapper(exc_type, exc_val, exc_tb):
2693 func(*args, **kwargs)
2700 func(*args, **kwargs)
2694 self._atexit.append(wrapper)
2701 self._atexit.append(wrapper)
2695 return func
2702 return func
2696
2703
2697 def __exit__(self, exc_type, exc_val, exc_tb):
2704 def __exit__(self, exc_type, exc_val, exc_tb):
2698 '''Context managers are exited in the reverse order from which
2705 '''Context managers are exited in the reverse order from which
2699 they were created.'''
2706 they were created.'''
2700 received = exc_type is not None
2707 received = exc_type is not None
2701 suppressed = False
2708 suppressed = False
2702 pending = None
2709 pending = None
2703 self._atexit.reverse()
2710 self._atexit.reverse()
2704 for exitfunc in self._atexit:
2711 for exitfunc in self._atexit:
2705 try:
2712 try:
2706 if exitfunc(exc_type, exc_val, exc_tb):
2713 if exitfunc(exc_type, exc_val, exc_tb):
2707 suppressed = True
2714 suppressed = True
2708 exc_type = None
2715 exc_type = None
2709 exc_val = None
2716 exc_val = None
2710 exc_tb = None
2717 exc_tb = None
2711 except BaseException:
2718 except BaseException:
2712 pending = sys.exc_info()
2719 pending = sys.exc_info()
2713 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2720 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2714 del self._atexit
2721 del self._atexit
2715 if pending:
2722 if pending:
2716 raise exc_val
2723 raise exc_val
2717 return received and suppressed
2724 return received and suppressed
2718
2725
2719 def _bz2():
2726 def _bz2():
2720 d = bz2.BZ2Decompressor()
2727 d = bz2.BZ2Decompressor()
2721 # Bzip2 stream start with BZ, but we stripped it.
2728 # Bzip2 stream start with BZ, but we stripped it.
2722 # we put it back for good measure.
2729 # we put it back for good measure.
2723 d.decompress('BZ')
2730 d.decompress('BZ')
2724 return d
2731 return d
2725
2732
2726 decompressors = {None: lambda fh: fh,
2733 decompressors = {None: lambda fh: fh,
2727 '_truncatedBZ': _makedecompressor(_bz2),
2734 '_truncatedBZ': _makedecompressor(_bz2),
2728 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2735 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2729 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2736 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2730 }
2737 }
2731 # also support the old form by courtesies
2738 # also support the old form by courtesies
2732 decompressors['UN'] = decompressors[None]
2739 decompressors['UN'] = decompressors[None]
2733
2740
2734 # convenient shortcut
2741 # convenient shortcut
2735 dst = debugstacktrace
2742 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now