##// END OF EJS Templates
util: introduce unpacker...
Matt Mackall -
r23789:94951db8 default
parent child Browse files
Show More
@@ -1,2214 +1,2223 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 import i18n
16 import i18n
17 _ = i18n._
17 _ = i18n._
18 import error, osutil, encoding
18 import error, osutil, encoding
19 import errno, shutil, sys, tempfile, traceback
19 import errno, shutil, sys, tempfile, traceback
20 import re as remod
20 import re as remod
21 import os, time, datetime, calendar, textwrap, signal, collections
21 import os, time, datetime, calendar, textwrap, signal, collections
22 import imp, socket, urllib
22 import imp, socket, urllib, struct
23 import gc
23 import gc
24
24
25 if os.name == 'nt':
25 if os.name == 'nt':
26 import windows as platform
26 import windows as platform
27 else:
27 else:
28 import posix as platform
28 import posix as platform
29
29
30 cachestat = platform.cachestat
30 cachestat = platform.cachestat
31 checkexec = platform.checkexec
31 checkexec = platform.checkexec
32 checklink = platform.checklink
32 checklink = platform.checklink
33 copymode = platform.copymode
33 copymode = platform.copymode
34 executablepath = platform.executablepath
34 executablepath = platform.executablepath
35 expandglobs = platform.expandglobs
35 expandglobs = platform.expandglobs
36 explainexit = platform.explainexit
36 explainexit = platform.explainexit
37 findexe = platform.findexe
37 findexe = platform.findexe
38 gethgcmd = platform.gethgcmd
38 gethgcmd = platform.gethgcmd
39 getuser = platform.getuser
39 getuser = platform.getuser
40 groupmembers = platform.groupmembers
40 groupmembers = platform.groupmembers
41 groupname = platform.groupname
41 groupname = platform.groupname
42 hidewindow = platform.hidewindow
42 hidewindow = platform.hidewindow
43 isexec = platform.isexec
43 isexec = platform.isexec
44 isowner = platform.isowner
44 isowner = platform.isowner
45 localpath = platform.localpath
45 localpath = platform.localpath
46 lookupreg = platform.lookupreg
46 lookupreg = platform.lookupreg
47 makedir = platform.makedir
47 makedir = platform.makedir
48 nlinks = platform.nlinks
48 nlinks = platform.nlinks
49 normpath = platform.normpath
49 normpath = platform.normpath
50 normcase = platform.normcase
50 normcase = platform.normcase
51 openhardlinks = platform.openhardlinks
51 openhardlinks = platform.openhardlinks
52 oslink = platform.oslink
52 oslink = platform.oslink
53 parsepatchoutput = platform.parsepatchoutput
53 parsepatchoutput = platform.parsepatchoutput
54 pconvert = platform.pconvert
54 pconvert = platform.pconvert
55 popen = platform.popen
55 popen = platform.popen
56 posixfile = platform.posixfile
56 posixfile = platform.posixfile
57 quotecommand = platform.quotecommand
57 quotecommand = platform.quotecommand
58 readpipe = platform.readpipe
58 readpipe = platform.readpipe
59 rename = platform.rename
59 rename = platform.rename
60 samedevice = platform.samedevice
60 samedevice = platform.samedevice
61 samefile = platform.samefile
61 samefile = platform.samefile
62 samestat = platform.samestat
62 samestat = platform.samestat
63 setbinary = platform.setbinary
63 setbinary = platform.setbinary
64 setflags = platform.setflags
64 setflags = platform.setflags
65 setsignalhandler = platform.setsignalhandler
65 setsignalhandler = platform.setsignalhandler
66 shellquote = platform.shellquote
66 shellquote = platform.shellquote
67 spawndetached = platform.spawndetached
67 spawndetached = platform.spawndetached
68 split = platform.split
68 split = platform.split
69 sshargs = platform.sshargs
69 sshargs = platform.sshargs
70 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
70 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
71 statisexec = platform.statisexec
71 statisexec = platform.statisexec
72 statislink = platform.statislink
72 statislink = platform.statislink
73 termwidth = platform.termwidth
73 termwidth = platform.termwidth
74 testpid = platform.testpid
74 testpid = platform.testpid
75 umask = platform.umask
75 umask = platform.umask
76 unlink = platform.unlink
76 unlink = platform.unlink
77 unlinkpath = platform.unlinkpath
77 unlinkpath = platform.unlinkpath
78 username = platform.username
78 username = platform.username
79
79
80 # Python compatibility
80 # Python compatibility
81
81
82 _notset = object()
82 _notset = object()
83
83
84 def safehasattr(thing, attr):
84 def safehasattr(thing, attr):
85 return getattr(thing, attr, _notset) is not _notset
85 return getattr(thing, attr, _notset) is not _notset
86
86
87 def sha1(s=''):
87 def sha1(s=''):
88 '''
88 '''
89 Low-overhead wrapper around Python's SHA support
89 Low-overhead wrapper around Python's SHA support
90
90
91 >>> f = _fastsha1
91 >>> f = _fastsha1
92 >>> a = sha1()
92 >>> a = sha1()
93 >>> a = f()
93 >>> a = f()
94 >>> a.hexdigest()
94 >>> a.hexdigest()
95 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
95 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
96 '''
96 '''
97
97
98 return _fastsha1(s)
98 return _fastsha1(s)
99
99
100 def _fastsha1(s=''):
100 def _fastsha1(s=''):
101 # This function will import sha1 from hashlib or sha (whichever is
101 # This function will import sha1 from hashlib or sha (whichever is
102 # available) and overwrite itself with it on the first call.
102 # available) and overwrite itself with it on the first call.
103 # Subsequent calls will go directly to the imported function.
103 # Subsequent calls will go directly to the imported function.
104 if sys.version_info >= (2, 5):
104 if sys.version_info >= (2, 5):
105 from hashlib import sha1 as _sha1
105 from hashlib import sha1 as _sha1
106 else:
106 else:
107 from sha import sha as _sha1
107 from sha import sha as _sha1
108 global _fastsha1, sha1
108 global _fastsha1, sha1
109 _fastsha1 = sha1 = _sha1
109 _fastsha1 = sha1 = _sha1
110 return _sha1(s)
110 return _sha1(s)
111
111
112 def md5(s=''):
112 def md5(s=''):
113 try:
113 try:
114 from hashlib import md5 as _md5
114 from hashlib import md5 as _md5
115 except ImportError:
115 except ImportError:
116 from md5 import md5 as _md5
116 from md5 import md5 as _md5
117 global md5
117 global md5
118 md5 = _md5
118 md5 = _md5
119 return _md5(s)
119 return _md5(s)
120
120
121 DIGESTS = {
121 DIGESTS = {
122 'md5': md5,
122 'md5': md5,
123 'sha1': sha1,
123 'sha1': sha1,
124 }
124 }
125 # List of digest types from strongest to weakest
125 # List of digest types from strongest to weakest
126 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
126 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
127
127
128 try:
128 try:
129 import hashlib
129 import hashlib
130 DIGESTS.update({
130 DIGESTS.update({
131 'sha512': hashlib.sha512,
131 'sha512': hashlib.sha512,
132 })
132 })
133 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
133 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
134 except ImportError:
134 except ImportError:
135 pass
135 pass
136
136
137 for k in DIGESTS_BY_STRENGTH:
137 for k in DIGESTS_BY_STRENGTH:
138 assert k in DIGESTS
138 assert k in DIGESTS
139
139
140 class digester(object):
140 class digester(object):
141 """helper to compute digests.
141 """helper to compute digests.
142
142
143 This helper can be used to compute one or more digests given their name.
143 This helper can be used to compute one or more digests given their name.
144
144
145 >>> d = digester(['md5', 'sha1'])
145 >>> d = digester(['md5', 'sha1'])
146 >>> d.update('foo')
146 >>> d.update('foo')
147 >>> [k for k in sorted(d)]
147 >>> [k for k in sorted(d)]
148 ['md5', 'sha1']
148 ['md5', 'sha1']
149 >>> d['md5']
149 >>> d['md5']
150 'acbd18db4cc2f85cedef654fccc4a4d8'
150 'acbd18db4cc2f85cedef654fccc4a4d8'
151 >>> d['sha1']
151 >>> d['sha1']
152 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
152 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
153 >>> digester.preferred(['md5', 'sha1'])
153 >>> digester.preferred(['md5', 'sha1'])
154 'sha1'
154 'sha1'
155 """
155 """
156
156
157 def __init__(self, digests, s=''):
157 def __init__(self, digests, s=''):
158 self._hashes = {}
158 self._hashes = {}
159 for k in digests:
159 for k in digests:
160 if k not in DIGESTS:
160 if k not in DIGESTS:
161 raise Abort(_('unknown digest type: %s') % k)
161 raise Abort(_('unknown digest type: %s') % k)
162 self._hashes[k] = DIGESTS[k]()
162 self._hashes[k] = DIGESTS[k]()
163 if s:
163 if s:
164 self.update(s)
164 self.update(s)
165
165
166 def update(self, data):
166 def update(self, data):
167 for h in self._hashes.values():
167 for h in self._hashes.values():
168 h.update(data)
168 h.update(data)
169
169
170 def __getitem__(self, key):
170 def __getitem__(self, key):
171 if key not in DIGESTS:
171 if key not in DIGESTS:
172 raise Abort(_('unknown digest type: %s') % k)
172 raise Abort(_('unknown digest type: %s') % k)
173 return self._hashes[key].hexdigest()
173 return self._hashes[key].hexdigest()
174
174
175 def __iter__(self):
175 def __iter__(self):
176 return iter(self._hashes)
176 return iter(self._hashes)
177
177
178 @staticmethod
178 @staticmethod
179 def preferred(supported):
179 def preferred(supported):
180 """returns the strongest digest type in both supported and DIGESTS."""
180 """returns the strongest digest type in both supported and DIGESTS."""
181
181
182 for k in DIGESTS_BY_STRENGTH:
182 for k in DIGESTS_BY_STRENGTH:
183 if k in supported:
183 if k in supported:
184 return k
184 return k
185 return None
185 return None
186
186
187 class digestchecker(object):
187 class digestchecker(object):
188 """file handle wrapper that additionally checks content against a given
188 """file handle wrapper that additionally checks content against a given
189 size and digests.
189 size and digests.
190
190
191 d = digestchecker(fh, size, {'md5': '...'})
191 d = digestchecker(fh, size, {'md5': '...'})
192
192
193 When multiple digests are given, all of them are validated.
193 When multiple digests are given, all of them are validated.
194 """
194 """
195
195
196 def __init__(self, fh, size, digests):
196 def __init__(self, fh, size, digests):
197 self._fh = fh
197 self._fh = fh
198 self._size = size
198 self._size = size
199 self._got = 0
199 self._got = 0
200 self._digests = dict(digests)
200 self._digests = dict(digests)
201 self._digester = digester(self._digests.keys())
201 self._digester = digester(self._digests.keys())
202
202
203 def read(self, length=-1):
203 def read(self, length=-1):
204 content = self._fh.read(length)
204 content = self._fh.read(length)
205 self._digester.update(content)
205 self._digester.update(content)
206 self._got += len(content)
206 self._got += len(content)
207 return content
207 return content
208
208
209 def validate(self):
209 def validate(self):
210 if self._size != self._got:
210 if self._size != self._got:
211 raise Abort(_('size mismatch: expected %d, got %d') %
211 raise Abort(_('size mismatch: expected %d, got %d') %
212 (self._size, self._got))
212 (self._size, self._got))
213 for k, v in self._digests.items():
213 for k, v in self._digests.items():
214 if v != self._digester[k]:
214 if v != self._digester[k]:
215 # i18n: first parameter is a digest name
215 # i18n: first parameter is a digest name
216 raise Abort(_('%s mismatch: expected %s, got %s') %
216 raise Abort(_('%s mismatch: expected %s, got %s') %
217 (k, v, self._digester[k]))
217 (k, v, self._digester[k]))
218
218
219 try:
219 try:
220 buffer = buffer
220 buffer = buffer
221 except NameError:
221 except NameError:
222 if sys.version_info[0] < 3:
222 if sys.version_info[0] < 3:
223 def buffer(sliceable, offset=0):
223 def buffer(sliceable, offset=0):
224 return sliceable[offset:]
224 return sliceable[offset:]
225 else:
225 else:
226 def buffer(sliceable, offset=0):
226 def buffer(sliceable, offset=0):
227 return memoryview(sliceable)[offset:]
227 return memoryview(sliceable)[offset:]
228
228
229 import subprocess
229 import subprocess
230 closefds = os.name == 'posix'
230 closefds = os.name == 'posix'
231
231
232 def unpacker(fmt):
233 """create a struct unpacker for the specified format"""
234 try:
235 # 2.5+
236 return struct.Struct(fmt).unpack
237 except NameError:
238 # 2.4
239 return lambda buf: struct.unpack(fmt)
240
232 def popen2(cmd, env=None, newlines=False):
241 def popen2(cmd, env=None, newlines=False):
233 # Setting bufsize to -1 lets the system decide the buffer size.
242 # Setting bufsize to -1 lets the system decide the buffer size.
234 # The default for bufsize is 0, meaning unbuffered. This leads to
243 # The default for bufsize is 0, meaning unbuffered. This leads to
235 # poor performance on Mac OS X: http://bugs.python.org/issue4194
244 # poor performance on Mac OS X: http://bugs.python.org/issue4194
236 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
245 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
237 close_fds=closefds,
246 close_fds=closefds,
238 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
247 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
239 universal_newlines=newlines,
248 universal_newlines=newlines,
240 env=env)
249 env=env)
241 return p.stdin, p.stdout
250 return p.stdin, p.stdout
242
251
243 def popen3(cmd, env=None, newlines=False):
252 def popen3(cmd, env=None, newlines=False):
244 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
253 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
245 return stdin, stdout, stderr
254 return stdin, stdout, stderr
246
255
247 def popen4(cmd, env=None, newlines=False):
256 def popen4(cmd, env=None, newlines=False):
248 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
257 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
249 close_fds=closefds,
258 close_fds=closefds,
250 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
259 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
251 stderr=subprocess.PIPE,
260 stderr=subprocess.PIPE,
252 universal_newlines=newlines,
261 universal_newlines=newlines,
253 env=env)
262 env=env)
254 return p.stdin, p.stdout, p.stderr, p
263 return p.stdin, p.stdout, p.stderr, p
255
264
256 def version():
265 def version():
257 """Return version information if available."""
266 """Return version information if available."""
258 try:
267 try:
259 import __version__
268 import __version__
260 return __version__.version
269 return __version__.version
261 except ImportError:
270 except ImportError:
262 return 'unknown'
271 return 'unknown'
263
272
264 # used by parsedate
273 # used by parsedate
265 defaultdateformats = (
274 defaultdateformats = (
266 '%Y-%m-%d %H:%M:%S',
275 '%Y-%m-%d %H:%M:%S',
267 '%Y-%m-%d %I:%M:%S%p',
276 '%Y-%m-%d %I:%M:%S%p',
268 '%Y-%m-%d %H:%M',
277 '%Y-%m-%d %H:%M',
269 '%Y-%m-%d %I:%M%p',
278 '%Y-%m-%d %I:%M%p',
270 '%Y-%m-%d',
279 '%Y-%m-%d',
271 '%m-%d',
280 '%m-%d',
272 '%m/%d',
281 '%m/%d',
273 '%m/%d/%y',
282 '%m/%d/%y',
274 '%m/%d/%Y',
283 '%m/%d/%Y',
275 '%a %b %d %H:%M:%S %Y',
284 '%a %b %d %H:%M:%S %Y',
276 '%a %b %d %I:%M:%S%p %Y',
285 '%a %b %d %I:%M:%S%p %Y',
277 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
286 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
278 '%b %d %H:%M:%S %Y',
287 '%b %d %H:%M:%S %Y',
279 '%b %d %I:%M:%S%p %Y',
288 '%b %d %I:%M:%S%p %Y',
280 '%b %d %H:%M:%S',
289 '%b %d %H:%M:%S',
281 '%b %d %I:%M:%S%p',
290 '%b %d %I:%M:%S%p',
282 '%b %d %H:%M',
291 '%b %d %H:%M',
283 '%b %d %I:%M%p',
292 '%b %d %I:%M%p',
284 '%b %d %Y',
293 '%b %d %Y',
285 '%b %d',
294 '%b %d',
286 '%H:%M:%S',
295 '%H:%M:%S',
287 '%I:%M:%S%p',
296 '%I:%M:%S%p',
288 '%H:%M',
297 '%H:%M',
289 '%I:%M%p',
298 '%I:%M%p',
290 )
299 )
291
300
292 extendeddateformats = defaultdateformats + (
301 extendeddateformats = defaultdateformats + (
293 "%Y",
302 "%Y",
294 "%Y-%m",
303 "%Y-%m",
295 "%b",
304 "%b",
296 "%b %Y",
305 "%b %Y",
297 )
306 )
298
307
299 def cachefunc(func):
308 def cachefunc(func):
300 '''cache the result of function calls'''
309 '''cache the result of function calls'''
301 # XXX doesn't handle keywords args
310 # XXX doesn't handle keywords args
302 if func.func_code.co_argcount == 0:
311 if func.func_code.co_argcount == 0:
303 cache = []
312 cache = []
304 def f():
313 def f():
305 if len(cache) == 0:
314 if len(cache) == 0:
306 cache.append(func())
315 cache.append(func())
307 return cache[0]
316 return cache[0]
308 return f
317 return f
309 cache = {}
318 cache = {}
310 if func.func_code.co_argcount == 1:
319 if func.func_code.co_argcount == 1:
311 # we gain a small amount of time because
320 # we gain a small amount of time because
312 # we don't need to pack/unpack the list
321 # we don't need to pack/unpack the list
313 def f(arg):
322 def f(arg):
314 if arg not in cache:
323 if arg not in cache:
315 cache[arg] = func(arg)
324 cache[arg] = func(arg)
316 return cache[arg]
325 return cache[arg]
317 else:
326 else:
318 def f(*args):
327 def f(*args):
319 if args not in cache:
328 if args not in cache:
320 cache[args] = func(*args)
329 cache[args] = func(*args)
321 return cache[args]
330 return cache[args]
322
331
323 return f
332 return f
324
333
325 try:
334 try:
326 collections.deque.remove
335 collections.deque.remove
327 deque = collections.deque
336 deque = collections.deque
328 except AttributeError:
337 except AttributeError:
329 # python 2.4 lacks deque.remove
338 # python 2.4 lacks deque.remove
330 class deque(collections.deque):
339 class deque(collections.deque):
331 def remove(self, val):
340 def remove(self, val):
332 for i, v in enumerate(self):
341 for i, v in enumerate(self):
333 if v == val:
342 if v == val:
334 del self[i]
343 del self[i]
335 break
344 break
336
345
337 class sortdict(dict):
346 class sortdict(dict):
338 '''a simple sorted dictionary'''
347 '''a simple sorted dictionary'''
339 def __init__(self, data=None):
348 def __init__(self, data=None):
340 self._list = []
349 self._list = []
341 if data:
350 if data:
342 self.update(data)
351 self.update(data)
343 def copy(self):
352 def copy(self):
344 return sortdict(self)
353 return sortdict(self)
345 def __setitem__(self, key, val):
354 def __setitem__(self, key, val):
346 if key in self:
355 if key in self:
347 self._list.remove(key)
356 self._list.remove(key)
348 self._list.append(key)
357 self._list.append(key)
349 dict.__setitem__(self, key, val)
358 dict.__setitem__(self, key, val)
350 def __iter__(self):
359 def __iter__(self):
351 return self._list.__iter__()
360 return self._list.__iter__()
352 def update(self, src):
361 def update(self, src):
353 for k in src:
362 for k in src:
354 self[k] = src[k]
363 self[k] = src[k]
355 def clear(self):
364 def clear(self):
356 dict.clear(self)
365 dict.clear(self)
357 self._list = []
366 self._list = []
358 def items(self):
367 def items(self):
359 return [(k, self[k]) for k in self._list]
368 return [(k, self[k]) for k in self._list]
360 def __delitem__(self, key):
369 def __delitem__(self, key):
361 dict.__delitem__(self, key)
370 dict.__delitem__(self, key)
362 self._list.remove(key)
371 self._list.remove(key)
363 def pop(self, key, *args, **kwargs):
372 def pop(self, key, *args, **kwargs):
364 dict.pop(self, key, *args, **kwargs)
373 dict.pop(self, key, *args, **kwargs)
365 try:
374 try:
366 self._list.remove(key)
375 self._list.remove(key)
367 except ValueError:
376 except ValueError:
368 pass
377 pass
369 def keys(self):
378 def keys(self):
370 return self._list
379 return self._list
371 def iterkeys(self):
380 def iterkeys(self):
372 return self._list.__iter__()
381 return self._list.__iter__()
373 def iteritems(self):
382 def iteritems(self):
374 for k in self._list:
383 for k in self._list:
375 yield k, self[k]
384 yield k, self[k]
376 def insert(self, index, key, val):
385 def insert(self, index, key, val):
377 self._list.insert(index, key)
386 self._list.insert(index, key)
378 dict.__setitem__(self, key, val)
387 dict.__setitem__(self, key, val)
379
388
380 class lrucachedict(object):
389 class lrucachedict(object):
381 '''cache most recent gets from or sets to this dictionary'''
390 '''cache most recent gets from or sets to this dictionary'''
382 def __init__(self, maxsize):
391 def __init__(self, maxsize):
383 self._cache = {}
392 self._cache = {}
384 self._maxsize = maxsize
393 self._maxsize = maxsize
385 self._order = deque()
394 self._order = deque()
386
395
387 def __getitem__(self, key):
396 def __getitem__(self, key):
388 value = self._cache[key]
397 value = self._cache[key]
389 self._order.remove(key)
398 self._order.remove(key)
390 self._order.append(key)
399 self._order.append(key)
391 return value
400 return value
392
401
393 def __setitem__(self, key, value):
402 def __setitem__(self, key, value):
394 if key not in self._cache:
403 if key not in self._cache:
395 if len(self._cache) >= self._maxsize:
404 if len(self._cache) >= self._maxsize:
396 del self._cache[self._order.popleft()]
405 del self._cache[self._order.popleft()]
397 else:
406 else:
398 self._order.remove(key)
407 self._order.remove(key)
399 self._cache[key] = value
408 self._cache[key] = value
400 self._order.append(key)
409 self._order.append(key)
401
410
402 def __contains__(self, key):
411 def __contains__(self, key):
403 return key in self._cache
412 return key in self._cache
404
413
405 def clear(self):
414 def clear(self):
406 self._cache.clear()
415 self._cache.clear()
407 self._order = deque()
416 self._order = deque()
408
417
409 def lrucachefunc(func):
418 def lrucachefunc(func):
410 '''cache most recent results of function calls'''
419 '''cache most recent results of function calls'''
411 cache = {}
420 cache = {}
412 order = deque()
421 order = deque()
413 if func.func_code.co_argcount == 1:
422 if func.func_code.co_argcount == 1:
414 def f(arg):
423 def f(arg):
415 if arg not in cache:
424 if arg not in cache:
416 if len(cache) > 20:
425 if len(cache) > 20:
417 del cache[order.popleft()]
426 del cache[order.popleft()]
418 cache[arg] = func(arg)
427 cache[arg] = func(arg)
419 else:
428 else:
420 order.remove(arg)
429 order.remove(arg)
421 order.append(arg)
430 order.append(arg)
422 return cache[arg]
431 return cache[arg]
423 else:
432 else:
424 def f(*args):
433 def f(*args):
425 if args not in cache:
434 if args not in cache:
426 if len(cache) > 20:
435 if len(cache) > 20:
427 del cache[order.popleft()]
436 del cache[order.popleft()]
428 cache[args] = func(*args)
437 cache[args] = func(*args)
429 else:
438 else:
430 order.remove(args)
439 order.remove(args)
431 order.append(args)
440 order.append(args)
432 return cache[args]
441 return cache[args]
433
442
434 return f
443 return f
435
444
436 class propertycache(object):
445 class propertycache(object):
437 def __init__(self, func):
446 def __init__(self, func):
438 self.func = func
447 self.func = func
439 self.name = func.__name__
448 self.name = func.__name__
440 def __get__(self, obj, type=None):
449 def __get__(self, obj, type=None):
441 result = self.func(obj)
450 result = self.func(obj)
442 self.cachevalue(obj, result)
451 self.cachevalue(obj, result)
443 return result
452 return result
444
453
445 def cachevalue(self, obj, value):
454 def cachevalue(self, obj, value):
446 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
455 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
447 obj.__dict__[self.name] = value
456 obj.__dict__[self.name] = value
448
457
449 def pipefilter(s, cmd):
458 def pipefilter(s, cmd):
450 '''filter string S through command CMD, returning its output'''
459 '''filter string S through command CMD, returning its output'''
451 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
460 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
452 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
461 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
453 pout, perr = p.communicate(s)
462 pout, perr = p.communicate(s)
454 return pout
463 return pout
455
464
456 def tempfilter(s, cmd):
465 def tempfilter(s, cmd):
457 '''filter string S through a pair of temporary files with CMD.
466 '''filter string S through a pair of temporary files with CMD.
458 CMD is used as a template to create the real command to be run,
467 CMD is used as a template to create the real command to be run,
459 with the strings INFILE and OUTFILE replaced by the real names of
468 with the strings INFILE and OUTFILE replaced by the real names of
460 the temporary files generated.'''
469 the temporary files generated.'''
461 inname, outname = None, None
470 inname, outname = None, None
462 try:
471 try:
463 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
472 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
464 fp = os.fdopen(infd, 'wb')
473 fp = os.fdopen(infd, 'wb')
465 fp.write(s)
474 fp.write(s)
466 fp.close()
475 fp.close()
467 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
476 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
468 os.close(outfd)
477 os.close(outfd)
469 cmd = cmd.replace('INFILE', inname)
478 cmd = cmd.replace('INFILE', inname)
470 cmd = cmd.replace('OUTFILE', outname)
479 cmd = cmd.replace('OUTFILE', outname)
471 code = os.system(cmd)
480 code = os.system(cmd)
472 if sys.platform == 'OpenVMS' and code & 1:
481 if sys.platform == 'OpenVMS' and code & 1:
473 code = 0
482 code = 0
474 if code:
483 if code:
475 raise Abort(_("command '%s' failed: %s") %
484 raise Abort(_("command '%s' failed: %s") %
476 (cmd, explainexit(code)))
485 (cmd, explainexit(code)))
477 fp = open(outname, 'rb')
486 fp = open(outname, 'rb')
478 r = fp.read()
487 r = fp.read()
479 fp.close()
488 fp.close()
480 return r
489 return r
481 finally:
490 finally:
482 try:
491 try:
483 if inname:
492 if inname:
484 os.unlink(inname)
493 os.unlink(inname)
485 except OSError:
494 except OSError:
486 pass
495 pass
487 try:
496 try:
488 if outname:
497 if outname:
489 os.unlink(outname)
498 os.unlink(outname)
490 except OSError:
499 except OSError:
491 pass
500 pass
492
501
493 filtertable = {
502 filtertable = {
494 'tempfile:': tempfilter,
503 'tempfile:': tempfilter,
495 'pipe:': pipefilter,
504 'pipe:': pipefilter,
496 }
505 }
497
506
498 def filter(s, cmd):
507 def filter(s, cmd):
499 "filter a string through a command that transforms its input to its output"
508 "filter a string through a command that transforms its input to its output"
500 for name, fn in filtertable.iteritems():
509 for name, fn in filtertable.iteritems():
501 if cmd.startswith(name):
510 if cmd.startswith(name):
502 return fn(s, cmd[len(name):].lstrip())
511 return fn(s, cmd[len(name):].lstrip())
503 return pipefilter(s, cmd)
512 return pipefilter(s, cmd)
504
513
505 def binary(s):
514 def binary(s):
506 """return true if a string is binary data"""
515 """return true if a string is binary data"""
507 return bool(s and '\0' in s)
516 return bool(s and '\0' in s)
508
517
509 def increasingchunks(source, min=1024, max=65536):
518 def increasingchunks(source, min=1024, max=65536):
510 '''return no less than min bytes per chunk while data remains,
519 '''return no less than min bytes per chunk while data remains,
511 doubling min after each chunk until it reaches max'''
520 doubling min after each chunk until it reaches max'''
512 def log2(x):
521 def log2(x):
513 if not x:
522 if not x:
514 return 0
523 return 0
515 i = 0
524 i = 0
516 while x:
525 while x:
517 x >>= 1
526 x >>= 1
518 i += 1
527 i += 1
519 return i - 1
528 return i - 1
520
529
521 buf = []
530 buf = []
522 blen = 0
531 blen = 0
523 for chunk in source:
532 for chunk in source:
524 buf.append(chunk)
533 buf.append(chunk)
525 blen += len(chunk)
534 blen += len(chunk)
526 if blen >= min:
535 if blen >= min:
527 if min < max:
536 if min < max:
528 min = min << 1
537 min = min << 1
529 nmin = 1 << log2(blen)
538 nmin = 1 << log2(blen)
530 if nmin > min:
539 if nmin > min:
531 min = nmin
540 min = nmin
532 if min > max:
541 if min > max:
533 min = max
542 min = max
534 yield ''.join(buf)
543 yield ''.join(buf)
535 blen = 0
544 blen = 0
536 buf = []
545 buf = []
537 if buf:
546 if buf:
538 yield ''.join(buf)
547 yield ''.join(buf)
539
548
540 Abort = error.Abort
549 Abort = error.Abort
541
550
542 def always(fn):
551 def always(fn):
543 return True
552 return True
544
553
545 def never(fn):
554 def never(fn):
546 return False
555 return False
547
556
548 def nogc(func):
557 def nogc(func):
549 """disable garbage collector
558 """disable garbage collector
550
559
551 Python's garbage collector triggers a GC each time a certain number of
560 Python's garbage collector triggers a GC each time a certain number of
552 container objects (the number being defined by gc.get_threshold()) are
561 container objects (the number being defined by gc.get_threshold()) are
553 allocated even when marked not to be tracked by the collector. Tracking has
562 allocated even when marked not to be tracked by the collector. Tracking has
554 no effect on when GCs are triggered, only on what objects the GC looks
563 no effect on when GCs are triggered, only on what objects the GC looks
555 into. As a workaround, disable GC while building complex (huge)
564 into. As a workaround, disable GC while building complex (huge)
556 containers.
565 containers.
557
566
558 This garbage collector issue have been fixed in 2.7.
567 This garbage collector issue have been fixed in 2.7.
559 """
568 """
560 def wrapper(*args, **kwargs):
569 def wrapper(*args, **kwargs):
561 gcenabled = gc.isenabled()
570 gcenabled = gc.isenabled()
562 gc.disable()
571 gc.disable()
563 try:
572 try:
564 return func(*args, **kwargs)
573 return func(*args, **kwargs)
565 finally:
574 finally:
566 if gcenabled:
575 if gcenabled:
567 gc.enable()
576 gc.enable()
568 return wrapper
577 return wrapper
569
578
570 def pathto(root, n1, n2):
579 def pathto(root, n1, n2):
571 '''return the relative path from one place to another.
580 '''return the relative path from one place to another.
572 root should use os.sep to separate directories
581 root should use os.sep to separate directories
573 n1 should use os.sep to separate directories
582 n1 should use os.sep to separate directories
574 n2 should use "/" to separate directories
583 n2 should use "/" to separate directories
575 returns an os.sep-separated path.
584 returns an os.sep-separated path.
576
585
577 If n1 is a relative path, it's assumed it's
586 If n1 is a relative path, it's assumed it's
578 relative to root.
587 relative to root.
579 n2 should always be relative to root.
588 n2 should always be relative to root.
580 '''
589 '''
581 if not n1:
590 if not n1:
582 return localpath(n2)
591 return localpath(n2)
583 if os.path.isabs(n1):
592 if os.path.isabs(n1):
584 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
593 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
585 return os.path.join(root, localpath(n2))
594 return os.path.join(root, localpath(n2))
586 n2 = '/'.join((pconvert(root), n2))
595 n2 = '/'.join((pconvert(root), n2))
587 a, b = splitpath(n1), n2.split('/')
596 a, b = splitpath(n1), n2.split('/')
588 a.reverse()
597 a.reverse()
589 b.reverse()
598 b.reverse()
590 while a and b and a[-1] == b[-1]:
599 while a and b and a[-1] == b[-1]:
591 a.pop()
600 a.pop()
592 b.pop()
601 b.pop()
593 b.reverse()
602 b.reverse()
594 return os.sep.join((['..'] * len(a)) + b) or '.'
603 return os.sep.join((['..'] * len(a)) + b) or '.'
595
604
596 def mainfrozen():
605 def mainfrozen():
597 """return True if we are a frozen executable.
606 """return True if we are a frozen executable.
598
607
599 The code supports py2exe (most common, Windows only) and tools/freeze
608 The code supports py2exe (most common, Windows only) and tools/freeze
600 (portable, not much used).
609 (portable, not much used).
601 """
610 """
602 return (safehasattr(sys, "frozen") or # new py2exe
611 return (safehasattr(sys, "frozen") or # new py2exe
603 safehasattr(sys, "importers") or # old py2exe
612 safehasattr(sys, "importers") or # old py2exe
604 imp.is_frozen("__main__")) # tools/freeze
613 imp.is_frozen("__main__")) # tools/freeze
605
614
606 # the location of data files matching the source code
615 # the location of data files matching the source code
607 if mainfrozen():
616 if mainfrozen():
608 # executable version (py2exe) doesn't support __file__
617 # executable version (py2exe) doesn't support __file__
609 datapath = os.path.dirname(sys.executable)
618 datapath = os.path.dirname(sys.executable)
610 else:
619 else:
611 datapath = os.path.dirname(__file__)
620 datapath = os.path.dirname(__file__)
612
621
613 i18n.setdatapath(datapath)
622 i18n.setdatapath(datapath)
614
623
615 _hgexecutable = None
624 _hgexecutable = None
616
625
617 def hgexecutable():
626 def hgexecutable():
618 """return location of the 'hg' executable.
627 """return location of the 'hg' executable.
619
628
620 Defaults to $HG or 'hg' in the search path.
629 Defaults to $HG or 'hg' in the search path.
621 """
630 """
622 if _hgexecutable is None:
631 if _hgexecutable is None:
623 hg = os.environ.get('HG')
632 hg = os.environ.get('HG')
624 mainmod = sys.modules['__main__']
633 mainmod = sys.modules['__main__']
625 if hg:
634 if hg:
626 _sethgexecutable(hg)
635 _sethgexecutable(hg)
627 elif mainfrozen():
636 elif mainfrozen():
628 _sethgexecutable(sys.executable)
637 _sethgexecutable(sys.executable)
629 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
638 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
630 _sethgexecutable(mainmod.__file__)
639 _sethgexecutable(mainmod.__file__)
631 else:
640 else:
632 exe = findexe('hg') or os.path.basename(sys.argv[0])
641 exe = findexe('hg') or os.path.basename(sys.argv[0])
633 _sethgexecutable(exe)
642 _sethgexecutable(exe)
634 return _hgexecutable
643 return _hgexecutable
635
644
636 def _sethgexecutable(path):
645 def _sethgexecutable(path):
637 """set location of the 'hg' executable"""
646 """set location of the 'hg' executable"""
638 global _hgexecutable
647 global _hgexecutable
639 _hgexecutable = path
648 _hgexecutable = path
640
649
641 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
650 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
642 '''enhanced shell command execution.
651 '''enhanced shell command execution.
643 run with environment maybe modified, maybe in different dir.
652 run with environment maybe modified, maybe in different dir.
644
653
645 if command fails and onerr is None, return status, else raise onerr
654 if command fails and onerr is None, return status, else raise onerr
646 object as exception.
655 object as exception.
647
656
648 if out is specified, it is assumed to be a file-like object that has a
657 if out is specified, it is assumed to be a file-like object that has a
649 write() method. stdout and stderr will be redirected to out.'''
658 write() method. stdout and stderr will be redirected to out.'''
650 try:
659 try:
651 sys.stdout.flush()
660 sys.stdout.flush()
652 except Exception:
661 except Exception:
653 pass
662 pass
654 def py2shell(val):
663 def py2shell(val):
655 'convert python object into string that is useful to shell'
664 'convert python object into string that is useful to shell'
656 if val is None or val is False:
665 if val is None or val is False:
657 return '0'
666 return '0'
658 if val is True:
667 if val is True:
659 return '1'
668 return '1'
660 return str(val)
669 return str(val)
661 origcmd = cmd
670 origcmd = cmd
662 cmd = quotecommand(cmd)
671 cmd = quotecommand(cmd)
663 if sys.platform == 'plan9' and (sys.version_info[0] == 2
672 if sys.platform == 'plan9' and (sys.version_info[0] == 2
664 and sys.version_info[1] < 7):
673 and sys.version_info[1] < 7):
665 # subprocess kludge to work around issues in half-baked Python
674 # subprocess kludge to work around issues in half-baked Python
666 # ports, notably bichued/python:
675 # ports, notably bichued/python:
667 if not cwd is None:
676 if not cwd is None:
668 os.chdir(cwd)
677 os.chdir(cwd)
669 rc = os.system(cmd)
678 rc = os.system(cmd)
670 else:
679 else:
671 env = dict(os.environ)
680 env = dict(os.environ)
672 env.update((k, py2shell(v)) for k, v in environ.iteritems())
681 env.update((k, py2shell(v)) for k, v in environ.iteritems())
673 env['HG'] = hgexecutable()
682 env['HG'] = hgexecutable()
674 if out is None or out == sys.__stdout__:
683 if out is None or out == sys.__stdout__:
675 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
684 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
676 env=env, cwd=cwd)
685 env=env, cwd=cwd)
677 else:
686 else:
678 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
687 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
679 env=env, cwd=cwd, stdout=subprocess.PIPE,
688 env=env, cwd=cwd, stdout=subprocess.PIPE,
680 stderr=subprocess.STDOUT)
689 stderr=subprocess.STDOUT)
681 while True:
690 while True:
682 line = proc.stdout.readline()
691 line = proc.stdout.readline()
683 if not line:
692 if not line:
684 break
693 break
685 out.write(line)
694 out.write(line)
686 proc.wait()
695 proc.wait()
687 rc = proc.returncode
696 rc = proc.returncode
688 if sys.platform == 'OpenVMS' and rc & 1:
697 if sys.platform == 'OpenVMS' and rc & 1:
689 rc = 0
698 rc = 0
690 if rc and onerr:
699 if rc and onerr:
691 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
700 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
692 explainexit(rc)[0])
701 explainexit(rc)[0])
693 if errprefix:
702 if errprefix:
694 errmsg = '%s: %s' % (errprefix, errmsg)
703 errmsg = '%s: %s' % (errprefix, errmsg)
695 raise onerr(errmsg)
704 raise onerr(errmsg)
696 return rc
705 return rc
697
706
698 def checksignature(func):
707 def checksignature(func):
699 '''wrap a function with code to check for calling errors'''
708 '''wrap a function with code to check for calling errors'''
700 def check(*args, **kwargs):
709 def check(*args, **kwargs):
701 try:
710 try:
702 return func(*args, **kwargs)
711 return func(*args, **kwargs)
703 except TypeError:
712 except TypeError:
704 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
713 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
705 raise error.SignatureError
714 raise error.SignatureError
706 raise
715 raise
707
716
708 return check
717 return check
709
718
710 def copyfile(src, dest):
719 def copyfile(src, dest):
711 "copy a file, preserving mode and atime/mtime"
720 "copy a file, preserving mode and atime/mtime"
712 if os.path.lexists(dest):
721 if os.path.lexists(dest):
713 unlink(dest)
722 unlink(dest)
714 if os.path.islink(src):
723 if os.path.islink(src):
715 os.symlink(os.readlink(src), dest)
724 os.symlink(os.readlink(src), dest)
716 else:
725 else:
717 try:
726 try:
718 shutil.copyfile(src, dest)
727 shutil.copyfile(src, dest)
719 shutil.copymode(src, dest)
728 shutil.copymode(src, dest)
720 except shutil.Error, inst:
729 except shutil.Error, inst:
721 raise Abort(str(inst))
730 raise Abort(str(inst))
722
731
723 def copyfiles(src, dst, hardlink=None):
732 def copyfiles(src, dst, hardlink=None):
724 """Copy a directory tree using hardlinks if possible"""
733 """Copy a directory tree using hardlinks if possible"""
725
734
726 if hardlink is None:
735 if hardlink is None:
727 hardlink = (os.stat(src).st_dev ==
736 hardlink = (os.stat(src).st_dev ==
728 os.stat(os.path.dirname(dst)).st_dev)
737 os.stat(os.path.dirname(dst)).st_dev)
729
738
730 num = 0
739 num = 0
731 if os.path.isdir(src):
740 if os.path.isdir(src):
732 os.mkdir(dst)
741 os.mkdir(dst)
733 for name, kind in osutil.listdir(src):
742 for name, kind in osutil.listdir(src):
734 srcname = os.path.join(src, name)
743 srcname = os.path.join(src, name)
735 dstname = os.path.join(dst, name)
744 dstname = os.path.join(dst, name)
736 hardlink, n = copyfiles(srcname, dstname, hardlink)
745 hardlink, n = copyfiles(srcname, dstname, hardlink)
737 num += n
746 num += n
738 else:
747 else:
739 if hardlink:
748 if hardlink:
740 try:
749 try:
741 oslink(src, dst)
750 oslink(src, dst)
742 except (IOError, OSError):
751 except (IOError, OSError):
743 hardlink = False
752 hardlink = False
744 shutil.copy(src, dst)
753 shutil.copy(src, dst)
745 else:
754 else:
746 shutil.copy(src, dst)
755 shutil.copy(src, dst)
747 num += 1
756 num += 1
748
757
749 return hardlink, num
758 return hardlink, num
750
759
751 _winreservednames = '''con prn aux nul
760 _winreservednames = '''con prn aux nul
752 com1 com2 com3 com4 com5 com6 com7 com8 com9
761 com1 com2 com3 com4 com5 com6 com7 com8 com9
753 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
762 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
754 _winreservedchars = ':*?"<>|'
763 _winreservedchars = ':*?"<>|'
755 def checkwinfilename(path):
764 def checkwinfilename(path):
756 r'''Check that the base-relative path is a valid filename on Windows.
765 r'''Check that the base-relative path is a valid filename on Windows.
757 Returns None if the path is ok, or a UI string describing the problem.
766 Returns None if the path is ok, or a UI string describing the problem.
758
767
759 >>> checkwinfilename("just/a/normal/path")
768 >>> checkwinfilename("just/a/normal/path")
760 >>> checkwinfilename("foo/bar/con.xml")
769 >>> checkwinfilename("foo/bar/con.xml")
761 "filename contains 'con', which is reserved on Windows"
770 "filename contains 'con', which is reserved on Windows"
762 >>> checkwinfilename("foo/con.xml/bar")
771 >>> checkwinfilename("foo/con.xml/bar")
763 "filename contains 'con', which is reserved on Windows"
772 "filename contains 'con', which is reserved on Windows"
764 >>> checkwinfilename("foo/bar/xml.con")
773 >>> checkwinfilename("foo/bar/xml.con")
765 >>> checkwinfilename("foo/bar/AUX/bla.txt")
774 >>> checkwinfilename("foo/bar/AUX/bla.txt")
766 "filename contains 'AUX', which is reserved on Windows"
775 "filename contains 'AUX', which is reserved on Windows"
767 >>> checkwinfilename("foo/bar/bla:.txt")
776 >>> checkwinfilename("foo/bar/bla:.txt")
768 "filename contains ':', which is reserved on Windows"
777 "filename contains ':', which is reserved on Windows"
769 >>> checkwinfilename("foo/bar/b\07la.txt")
778 >>> checkwinfilename("foo/bar/b\07la.txt")
770 "filename contains '\\x07', which is invalid on Windows"
779 "filename contains '\\x07', which is invalid on Windows"
771 >>> checkwinfilename("foo/bar/bla ")
780 >>> checkwinfilename("foo/bar/bla ")
772 "filename ends with ' ', which is not allowed on Windows"
781 "filename ends with ' ', which is not allowed on Windows"
773 >>> checkwinfilename("../bar")
782 >>> checkwinfilename("../bar")
774 >>> checkwinfilename("foo\\")
783 >>> checkwinfilename("foo\\")
775 "filename ends with '\\', which is invalid on Windows"
784 "filename ends with '\\', which is invalid on Windows"
776 >>> checkwinfilename("foo\\/bar")
785 >>> checkwinfilename("foo\\/bar")
777 "directory name ends with '\\', which is invalid on Windows"
786 "directory name ends with '\\', which is invalid on Windows"
778 '''
787 '''
779 if path.endswith('\\'):
788 if path.endswith('\\'):
780 return _("filename ends with '\\', which is invalid on Windows")
789 return _("filename ends with '\\', which is invalid on Windows")
781 if '\\/' in path:
790 if '\\/' in path:
782 return _("directory name ends with '\\', which is invalid on Windows")
791 return _("directory name ends with '\\', which is invalid on Windows")
783 for n in path.replace('\\', '/').split('/'):
792 for n in path.replace('\\', '/').split('/'):
784 if not n:
793 if not n:
785 continue
794 continue
786 for c in n:
795 for c in n:
787 if c in _winreservedchars:
796 if c in _winreservedchars:
788 return _("filename contains '%s', which is reserved "
797 return _("filename contains '%s', which is reserved "
789 "on Windows") % c
798 "on Windows") % c
790 if ord(c) <= 31:
799 if ord(c) <= 31:
791 return _("filename contains %r, which is invalid "
800 return _("filename contains %r, which is invalid "
792 "on Windows") % c
801 "on Windows") % c
793 base = n.split('.')[0]
802 base = n.split('.')[0]
794 if base and base.lower() in _winreservednames:
803 if base and base.lower() in _winreservednames:
795 return _("filename contains '%s', which is reserved "
804 return _("filename contains '%s', which is reserved "
796 "on Windows") % base
805 "on Windows") % base
797 t = n[-1]
806 t = n[-1]
798 if t in '. ' and n not in '..':
807 if t in '. ' and n not in '..':
799 return _("filename ends with '%s', which is not allowed "
808 return _("filename ends with '%s', which is not allowed "
800 "on Windows") % t
809 "on Windows") % t
801
810
802 if os.name == 'nt':
811 if os.name == 'nt':
803 checkosfilename = checkwinfilename
812 checkosfilename = checkwinfilename
804 else:
813 else:
805 checkosfilename = platform.checkosfilename
814 checkosfilename = platform.checkosfilename
806
815
807 def makelock(info, pathname):
816 def makelock(info, pathname):
808 try:
817 try:
809 return os.symlink(info, pathname)
818 return os.symlink(info, pathname)
810 except OSError, why:
819 except OSError, why:
811 if why.errno == errno.EEXIST:
820 if why.errno == errno.EEXIST:
812 raise
821 raise
813 except AttributeError: # no symlink in os
822 except AttributeError: # no symlink in os
814 pass
823 pass
815
824
816 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
825 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
817 os.write(ld, info)
826 os.write(ld, info)
818 os.close(ld)
827 os.close(ld)
819
828
820 def readlock(pathname):
829 def readlock(pathname):
821 try:
830 try:
822 return os.readlink(pathname)
831 return os.readlink(pathname)
823 except OSError, why:
832 except OSError, why:
824 if why.errno not in (errno.EINVAL, errno.ENOSYS):
833 if why.errno not in (errno.EINVAL, errno.ENOSYS):
825 raise
834 raise
826 except AttributeError: # no symlink in os
835 except AttributeError: # no symlink in os
827 pass
836 pass
828 fp = posixfile(pathname)
837 fp = posixfile(pathname)
829 r = fp.read()
838 r = fp.read()
830 fp.close()
839 fp.close()
831 return r
840 return r
832
841
833 def fstat(fp):
842 def fstat(fp):
834 '''stat file object that may not have fileno method.'''
843 '''stat file object that may not have fileno method.'''
835 try:
844 try:
836 return os.fstat(fp.fileno())
845 return os.fstat(fp.fileno())
837 except AttributeError:
846 except AttributeError:
838 return os.stat(fp.name)
847 return os.stat(fp.name)
839
848
840 # File system features
849 # File system features
841
850
842 def checkcase(path):
851 def checkcase(path):
843 """
852 """
844 Return true if the given path is on a case-sensitive filesystem
853 Return true if the given path is on a case-sensitive filesystem
845
854
846 Requires a path (like /foo/.hg) ending with a foldable final
855 Requires a path (like /foo/.hg) ending with a foldable final
847 directory component.
856 directory component.
848 """
857 """
849 s1 = os.stat(path)
858 s1 = os.stat(path)
850 d, b = os.path.split(path)
859 d, b = os.path.split(path)
851 b2 = b.upper()
860 b2 = b.upper()
852 if b == b2:
861 if b == b2:
853 b2 = b.lower()
862 b2 = b.lower()
854 if b == b2:
863 if b == b2:
855 return True # no evidence against case sensitivity
864 return True # no evidence against case sensitivity
856 p2 = os.path.join(d, b2)
865 p2 = os.path.join(d, b2)
857 try:
866 try:
858 s2 = os.stat(p2)
867 s2 = os.stat(p2)
859 if s2 == s1:
868 if s2 == s1:
860 return False
869 return False
861 return True
870 return True
862 except OSError:
871 except OSError:
863 return True
872 return True
864
873
865 try:
874 try:
866 import re2
875 import re2
867 _re2 = None
876 _re2 = None
868 except ImportError:
877 except ImportError:
869 _re2 = False
878 _re2 = False
870
879
871 class _re(object):
880 class _re(object):
872 def _checkre2(self):
881 def _checkre2(self):
873 global _re2
882 global _re2
874 try:
883 try:
875 # check if match works, see issue3964
884 # check if match works, see issue3964
876 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
885 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
877 except ImportError:
886 except ImportError:
878 _re2 = False
887 _re2 = False
879
888
880 def compile(self, pat, flags=0):
889 def compile(self, pat, flags=0):
881 '''Compile a regular expression, using re2 if possible
890 '''Compile a regular expression, using re2 if possible
882
891
883 For best performance, use only re2-compatible regexp features. The
892 For best performance, use only re2-compatible regexp features. The
884 only flags from the re module that are re2-compatible are
893 only flags from the re module that are re2-compatible are
885 IGNORECASE and MULTILINE.'''
894 IGNORECASE and MULTILINE.'''
886 if _re2 is None:
895 if _re2 is None:
887 self._checkre2()
896 self._checkre2()
888 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
897 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
889 if flags & remod.IGNORECASE:
898 if flags & remod.IGNORECASE:
890 pat = '(?i)' + pat
899 pat = '(?i)' + pat
891 if flags & remod.MULTILINE:
900 if flags & remod.MULTILINE:
892 pat = '(?m)' + pat
901 pat = '(?m)' + pat
893 try:
902 try:
894 return re2.compile(pat)
903 return re2.compile(pat)
895 except re2.error:
904 except re2.error:
896 pass
905 pass
897 return remod.compile(pat, flags)
906 return remod.compile(pat, flags)
898
907
899 @propertycache
908 @propertycache
900 def escape(self):
909 def escape(self):
901 '''Return the version of escape corresponding to self.compile.
910 '''Return the version of escape corresponding to self.compile.
902
911
903 This is imperfect because whether re2 or re is used for a particular
912 This is imperfect because whether re2 or re is used for a particular
904 function depends on the flags, etc, but it's the best we can do.
913 function depends on the flags, etc, but it's the best we can do.
905 '''
914 '''
906 global _re2
915 global _re2
907 if _re2 is None:
916 if _re2 is None:
908 self._checkre2()
917 self._checkre2()
909 if _re2:
918 if _re2:
910 return re2.escape
919 return re2.escape
911 else:
920 else:
912 return remod.escape
921 return remod.escape
913
922
914 re = _re()
923 re = _re()
915
924
916 _fspathcache = {}
925 _fspathcache = {}
917 def fspath(name, root):
926 def fspath(name, root):
918 '''Get name in the case stored in the filesystem
927 '''Get name in the case stored in the filesystem
919
928
920 The name should be relative to root, and be normcase-ed for efficiency.
929 The name should be relative to root, and be normcase-ed for efficiency.
921
930
922 Note that this function is unnecessary, and should not be
931 Note that this function is unnecessary, and should not be
923 called, for case-sensitive filesystems (simply because it's expensive).
932 called, for case-sensitive filesystems (simply because it's expensive).
924
933
925 The root should be normcase-ed, too.
934 The root should be normcase-ed, too.
926 '''
935 '''
927 def _makefspathcacheentry(dir):
936 def _makefspathcacheentry(dir):
928 return dict((normcase(n), n) for n in os.listdir(dir))
937 return dict((normcase(n), n) for n in os.listdir(dir))
929
938
930 seps = os.sep
939 seps = os.sep
931 if os.altsep:
940 if os.altsep:
932 seps = seps + os.altsep
941 seps = seps + os.altsep
933 # Protect backslashes. This gets silly very quickly.
942 # Protect backslashes. This gets silly very quickly.
934 seps.replace('\\','\\\\')
943 seps.replace('\\','\\\\')
935 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
944 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
936 dir = os.path.normpath(root)
945 dir = os.path.normpath(root)
937 result = []
946 result = []
938 for part, sep in pattern.findall(name):
947 for part, sep in pattern.findall(name):
939 if sep:
948 if sep:
940 result.append(sep)
949 result.append(sep)
941 continue
950 continue
942
951
943 if dir not in _fspathcache:
952 if dir not in _fspathcache:
944 _fspathcache[dir] = _makefspathcacheentry(dir)
953 _fspathcache[dir] = _makefspathcacheentry(dir)
945 contents = _fspathcache[dir]
954 contents = _fspathcache[dir]
946
955
947 found = contents.get(part)
956 found = contents.get(part)
948 if not found:
957 if not found:
949 # retry "once per directory" per "dirstate.walk" which
958 # retry "once per directory" per "dirstate.walk" which
950 # may take place for each patches of "hg qpush", for example
959 # may take place for each patches of "hg qpush", for example
951 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
960 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
952 found = contents.get(part)
961 found = contents.get(part)
953
962
954 result.append(found or part)
963 result.append(found or part)
955 dir = os.path.join(dir, part)
964 dir = os.path.join(dir, part)
956
965
957 return ''.join(result)
966 return ''.join(result)
958
967
959 def checknlink(testfile):
968 def checknlink(testfile):
960 '''check whether hardlink count reporting works properly'''
969 '''check whether hardlink count reporting works properly'''
961
970
962 # testfile may be open, so we need a separate file for checking to
971 # testfile may be open, so we need a separate file for checking to
963 # work around issue2543 (or testfile may get lost on Samba shares)
972 # work around issue2543 (or testfile may get lost on Samba shares)
964 f1 = testfile + ".hgtmp1"
973 f1 = testfile + ".hgtmp1"
965 if os.path.lexists(f1):
974 if os.path.lexists(f1):
966 return False
975 return False
967 try:
976 try:
968 posixfile(f1, 'w').close()
977 posixfile(f1, 'w').close()
969 except IOError:
978 except IOError:
970 return False
979 return False
971
980
972 f2 = testfile + ".hgtmp2"
981 f2 = testfile + ".hgtmp2"
973 fd = None
982 fd = None
974 try:
983 try:
975 try:
984 try:
976 oslink(f1, f2)
985 oslink(f1, f2)
977 except OSError:
986 except OSError:
978 return False
987 return False
979
988
980 # nlinks() may behave differently for files on Windows shares if
989 # nlinks() may behave differently for files on Windows shares if
981 # the file is open.
990 # the file is open.
982 fd = posixfile(f2)
991 fd = posixfile(f2)
983 return nlinks(f2) > 1
992 return nlinks(f2) > 1
984 finally:
993 finally:
985 if fd is not None:
994 if fd is not None:
986 fd.close()
995 fd.close()
987 for f in (f1, f2):
996 for f in (f1, f2):
988 try:
997 try:
989 os.unlink(f)
998 os.unlink(f)
990 except OSError:
999 except OSError:
991 pass
1000 pass
992
1001
993 def endswithsep(path):
1002 def endswithsep(path):
994 '''Check path ends with os.sep or os.altsep.'''
1003 '''Check path ends with os.sep or os.altsep.'''
995 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1004 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
996
1005
997 def splitpath(path):
1006 def splitpath(path):
998 '''Split path by os.sep.
1007 '''Split path by os.sep.
999 Note that this function does not use os.altsep because this is
1008 Note that this function does not use os.altsep because this is
1000 an alternative of simple "xxx.split(os.sep)".
1009 an alternative of simple "xxx.split(os.sep)".
1001 It is recommended to use os.path.normpath() before using this
1010 It is recommended to use os.path.normpath() before using this
1002 function if need.'''
1011 function if need.'''
1003 return path.split(os.sep)
1012 return path.split(os.sep)
1004
1013
1005 def gui():
1014 def gui():
1006 '''Are we running in a GUI?'''
1015 '''Are we running in a GUI?'''
1007 if sys.platform == 'darwin':
1016 if sys.platform == 'darwin':
1008 if 'SSH_CONNECTION' in os.environ:
1017 if 'SSH_CONNECTION' in os.environ:
1009 # handle SSH access to a box where the user is logged in
1018 # handle SSH access to a box where the user is logged in
1010 return False
1019 return False
1011 elif getattr(osutil, 'isgui', None):
1020 elif getattr(osutil, 'isgui', None):
1012 # check if a CoreGraphics session is available
1021 # check if a CoreGraphics session is available
1013 return osutil.isgui()
1022 return osutil.isgui()
1014 else:
1023 else:
1015 # pure build; use a safe default
1024 # pure build; use a safe default
1016 return True
1025 return True
1017 else:
1026 else:
1018 return os.name == "nt" or os.environ.get("DISPLAY")
1027 return os.name == "nt" or os.environ.get("DISPLAY")
1019
1028
1020 def mktempcopy(name, emptyok=False, createmode=None):
1029 def mktempcopy(name, emptyok=False, createmode=None):
1021 """Create a temporary file with the same contents from name
1030 """Create a temporary file with the same contents from name
1022
1031
1023 The permission bits are copied from the original file.
1032 The permission bits are copied from the original file.
1024
1033
1025 If the temporary file is going to be truncated immediately, you
1034 If the temporary file is going to be truncated immediately, you
1026 can use emptyok=True as an optimization.
1035 can use emptyok=True as an optimization.
1027
1036
1028 Returns the name of the temporary file.
1037 Returns the name of the temporary file.
1029 """
1038 """
1030 d, fn = os.path.split(name)
1039 d, fn = os.path.split(name)
1031 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1040 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1032 os.close(fd)
1041 os.close(fd)
1033 # Temporary files are created with mode 0600, which is usually not
1042 # Temporary files are created with mode 0600, which is usually not
1034 # what we want. If the original file already exists, just copy
1043 # what we want. If the original file already exists, just copy
1035 # its mode. Otherwise, manually obey umask.
1044 # its mode. Otherwise, manually obey umask.
1036 copymode(name, temp, createmode)
1045 copymode(name, temp, createmode)
1037 if emptyok:
1046 if emptyok:
1038 return temp
1047 return temp
1039 try:
1048 try:
1040 try:
1049 try:
1041 ifp = posixfile(name, "rb")
1050 ifp = posixfile(name, "rb")
1042 except IOError, inst:
1051 except IOError, inst:
1043 if inst.errno == errno.ENOENT:
1052 if inst.errno == errno.ENOENT:
1044 return temp
1053 return temp
1045 if not getattr(inst, 'filename', None):
1054 if not getattr(inst, 'filename', None):
1046 inst.filename = name
1055 inst.filename = name
1047 raise
1056 raise
1048 ofp = posixfile(temp, "wb")
1057 ofp = posixfile(temp, "wb")
1049 for chunk in filechunkiter(ifp):
1058 for chunk in filechunkiter(ifp):
1050 ofp.write(chunk)
1059 ofp.write(chunk)
1051 ifp.close()
1060 ifp.close()
1052 ofp.close()
1061 ofp.close()
1053 except: # re-raises
1062 except: # re-raises
1054 try: os.unlink(temp)
1063 try: os.unlink(temp)
1055 except OSError: pass
1064 except OSError: pass
1056 raise
1065 raise
1057 return temp
1066 return temp
1058
1067
1059 class atomictempfile(object):
1068 class atomictempfile(object):
1060 '''writable file object that atomically updates a file
1069 '''writable file object that atomically updates a file
1061
1070
1062 All writes will go to a temporary copy of the original file. Call
1071 All writes will go to a temporary copy of the original file. Call
1063 close() when you are done writing, and atomictempfile will rename
1072 close() when you are done writing, and atomictempfile will rename
1064 the temporary copy to the original name, making the changes
1073 the temporary copy to the original name, making the changes
1065 visible. If the object is destroyed without being closed, all your
1074 visible. If the object is destroyed without being closed, all your
1066 writes are discarded.
1075 writes are discarded.
1067 '''
1076 '''
1068 def __init__(self, name, mode='w+b', createmode=None):
1077 def __init__(self, name, mode='w+b', createmode=None):
1069 self.__name = name # permanent name
1078 self.__name = name # permanent name
1070 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1079 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1071 createmode=createmode)
1080 createmode=createmode)
1072 self._fp = posixfile(self._tempname, mode)
1081 self._fp = posixfile(self._tempname, mode)
1073
1082
1074 # delegated methods
1083 # delegated methods
1075 self.write = self._fp.write
1084 self.write = self._fp.write
1076 self.seek = self._fp.seek
1085 self.seek = self._fp.seek
1077 self.tell = self._fp.tell
1086 self.tell = self._fp.tell
1078 self.fileno = self._fp.fileno
1087 self.fileno = self._fp.fileno
1079
1088
1080 def close(self):
1089 def close(self):
1081 if not self._fp.closed:
1090 if not self._fp.closed:
1082 self._fp.close()
1091 self._fp.close()
1083 rename(self._tempname, localpath(self.__name))
1092 rename(self._tempname, localpath(self.__name))
1084
1093
1085 def discard(self):
1094 def discard(self):
1086 if not self._fp.closed:
1095 if not self._fp.closed:
1087 try:
1096 try:
1088 os.unlink(self._tempname)
1097 os.unlink(self._tempname)
1089 except OSError:
1098 except OSError:
1090 pass
1099 pass
1091 self._fp.close()
1100 self._fp.close()
1092
1101
1093 def __del__(self):
1102 def __del__(self):
1094 if safehasattr(self, '_fp'): # constructor actually did something
1103 if safehasattr(self, '_fp'): # constructor actually did something
1095 self.discard()
1104 self.discard()
1096
1105
1097 def makedirs(name, mode=None, notindexed=False):
1106 def makedirs(name, mode=None, notindexed=False):
1098 """recursive directory creation with parent mode inheritance"""
1107 """recursive directory creation with parent mode inheritance"""
1099 try:
1108 try:
1100 makedir(name, notindexed)
1109 makedir(name, notindexed)
1101 except OSError, err:
1110 except OSError, err:
1102 if err.errno == errno.EEXIST:
1111 if err.errno == errno.EEXIST:
1103 return
1112 return
1104 if err.errno != errno.ENOENT or not name:
1113 if err.errno != errno.ENOENT or not name:
1105 raise
1114 raise
1106 parent = os.path.dirname(os.path.abspath(name))
1115 parent = os.path.dirname(os.path.abspath(name))
1107 if parent == name:
1116 if parent == name:
1108 raise
1117 raise
1109 makedirs(parent, mode, notindexed)
1118 makedirs(parent, mode, notindexed)
1110 makedir(name, notindexed)
1119 makedir(name, notindexed)
1111 if mode is not None:
1120 if mode is not None:
1112 os.chmod(name, mode)
1121 os.chmod(name, mode)
1113
1122
1114 def ensuredirs(name, mode=None, notindexed=False):
1123 def ensuredirs(name, mode=None, notindexed=False):
1115 """race-safe recursive directory creation
1124 """race-safe recursive directory creation
1116
1125
1117 Newly created directories are marked as "not to be indexed by
1126 Newly created directories are marked as "not to be indexed by
1118 the content indexing service", if ``notindexed`` is specified
1127 the content indexing service", if ``notindexed`` is specified
1119 for "write" mode access.
1128 for "write" mode access.
1120 """
1129 """
1121 if os.path.isdir(name):
1130 if os.path.isdir(name):
1122 return
1131 return
1123 parent = os.path.dirname(os.path.abspath(name))
1132 parent = os.path.dirname(os.path.abspath(name))
1124 if parent != name:
1133 if parent != name:
1125 ensuredirs(parent, mode, notindexed)
1134 ensuredirs(parent, mode, notindexed)
1126 try:
1135 try:
1127 makedir(name, notindexed)
1136 makedir(name, notindexed)
1128 except OSError, err:
1137 except OSError, err:
1129 if err.errno == errno.EEXIST and os.path.isdir(name):
1138 if err.errno == errno.EEXIST and os.path.isdir(name):
1130 # someone else seems to have won a directory creation race
1139 # someone else seems to have won a directory creation race
1131 return
1140 return
1132 raise
1141 raise
1133 if mode is not None:
1142 if mode is not None:
1134 os.chmod(name, mode)
1143 os.chmod(name, mode)
1135
1144
1136 def readfile(path):
1145 def readfile(path):
1137 fp = open(path, 'rb')
1146 fp = open(path, 'rb')
1138 try:
1147 try:
1139 return fp.read()
1148 return fp.read()
1140 finally:
1149 finally:
1141 fp.close()
1150 fp.close()
1142
1151
1143 def writefile(path, text):
1152 def writefile(path, text):
1144 fp = open(path, 'wb')
1153 fp = open(path, 'wb')
1145 try:
1154 try:
1146 fp.write(text)
1155 fp.write(text)
1147 finally:
1156 finally:
1148 fp.close()
1157 fp.close()
1149
1158
1150 def appendfile(path, text):
1159 def appendfile(path, text):
1151 fp = open(path, 'ab')
1160 fp = open(path, 'ab')
1152 try:
1161 try:
1153 fp.write(text)
1162 fp.write(text)
1154 finally:
1163 finally:
1155 fp.close()
1164 fp.close()
1156
1165
1157 class chunkbuffer(object):
1166 class chunkbuffer(object):
1158 """Allow arbitrary sized chunks of data to be efficiently read from an
1167 """Allow arbitrary sized chunks of data to be efficiently read from an
1159 iterator over chunks of arbitrary size."""
1168 iterator over chunks of arbitrary size."""
1160
1169
1161 def __init__(self, in_iter):
1170 def __init__(self, in_iter):
1162 """in_iter is the iterator that's iterating over the input chunks.
1171 """in_iter is the iterator that's iterating over the input chunks.
1163 targetsize is how big a buffer to try to maintain."""
1172 targetsize is how big a buffer to try to maintain."""
1164 def splitbig(chunks):
1173 def splitbig(chunks):
1165 for chunk in chunks:
1174 for chunk in chunks:
1166 if len(chunk) > 2**20:
1175 if len(chunk) > 2**20:
1167 pos = 0
1176 pos = 0
1168 while pos < len(chunk):
1177 while pos < len(chunk):
1169 end = pos + 2 ** 18
1178 end = pos + 2 ** 18
1170 yield chunk[pos:end]
1179 yield chunk[pos:end]
1171 pos = end
1180 pos = end
1172 else:
1181 else:
1173 yield chunk
1182 yield chunk
1174 self.iter = splitbig(in_iter)
1183 self.iter = splitbig(in_iter)
1175 self._queue = deque()
1184 self._queue = deque()
1176
1185
1177 def read(self, l=None):
1186 def read(self, l=None):
1178 """Read L bytes of data from the iterator of chunks of data.
1187 """Read L bytes of data from the iterator of chunks of data.
1179 Returns less than L bytes if the iterator runs dry.
1188 Returns less than L bytes if the iterator runs dry.
1180
1189
1181 If size parameter is omitted, read everything"""
1190 If size parameter is omitted, read everything"""
1182 left = l
1191 left = l
1183 buf = []
1192 buf = []
1184 queue = self._queue
1193 queue = self._queue
1185 while left is None or left > 0:
1194 while left is None or left > 0:
1186 # refill the queue
1195 # refill the queue
1187 if not queue:
1196 if not queue:
1188 target = 2**18
1197 target = 2**18
1189 for chunk in self.iter:
1198 for chunk in self.iter:
1190 queue.append(chunk)
1199 queue.append(chunk)
1191 target -= len(chunk)
1200 target -= len(chunk)
1192 if target <= 0:
1201 if target <= 0:
1193 break
1202 break
1194 if not queue:
1203 if not queue:
1195 break
1204 break
1196
1205
1197 chunk = queue.popleft()
1206 chunk = queue.popleft()
1198 if left is not None:
1207 if left is not None:
1199 left -= len(chunk)
1208 left -= len(chunk)
1200 if left is not None and left < 0:
1209 if left is not None and left < 0:
1201 queue.appendleft(chunk[left:])
1210 queue.appendleft(chunk[left:])
1202 buf.append(chunk[:left])
1211 buf.append(chunk[:left])
1203 else:
1212 else:
1204 buf.append(chunk)
1213 buf.append(chunk)
1205
1214
1206 return ''.join(buf)
1215 return ''.join(buf)
1207
1216
1208 def filechunkiter(f, size=65536, limit=None):
1217 def filechunkiter(f, size=65536, limit=None):
1209 """Create a generator that produces the data in the file size
1218 """Create a generator that produces the data in the file size
1210 (default 65536) bytes at a time, up to optional limit (default is
1219 (default 65536) bytes at a time, up to optional limit (default is
1211 to read all data). Chunks may be less than size bytes if the
1220 to read all data). Chunks may be less than size bytes if the
1212 chunk is the last chunk in the file, or the file is a socket or
1221 chunk is the last chunk in the file, or the file is a socket or
1213 some other type of file that sometimes reads less data than is
1222 some other type of file that sometimes reads less data than is
1214 requested."""
1223 requested."""
1215 assert size >= 0
1224 assert size >= 0
1216 assert limit is None or limit >= 0
1225 assert limit is None or limit >= 0
1217 while True:
1226 while True:
1218 if limit is None:
1227 if limit is None:
1219 nbytes = size
1228 nbytes = size
1220 else:
1229 else:
1221 nbytes = min(limit, size)
1230 nbytes = min(limit, size)
1222 s = nbytes and f.read(nbytes)
1231 s = nbytes and f.read(nbytes)
1223 if not s:
1232 if not s:
1224 break
1233 break
1225 if limit:
1234 if limit:
1226 limit -= len(s)
1235 limit -= len(s)
1227 yield s
1236 yield s
1228
1237
1229 def makedate(timestamp=None):
1238 def makedate(timestamp=None):
1230 '''Return a unix timestamp (or the current time) as a (unixtime,
1239 '''Return a unix timestamp (or the current time) as a (unixtime,
1231 offset) tuple based off the local timezone.'''
1240 offset) tuple based off the local timezone.'''
1232 if timestamp is None:
1241 if timestamp is None:
1233 timestamp = time.time()
1242 timestamp = time.time()
1234 if timestamp < 0:
1243 if timestamp < 0:
1235 hint = _("check your clock")
1244 hint = _("check your clock")
1236 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1245 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1237 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1246 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1238 datetime.datetime.fromtimestamp(timestamp))
1247 datetime.datetime.fromtimestamp(timestamp))
1239 tz = delta.days * 86400 + delta.seconds
1248 tz = delta.days * 86400 + delta.seconds
1240 return timestamp, tz
1249 return timestamp, tz
1241
1250
1242 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1251 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1243 """represent a (unixtime, offset) tuple as a localized time.
1252 """represent a (unixtime, offset) tuple as a localized time.
1244 unixtime is seconds since the epoch, and offset is the time zone's
1253 unixtime is seconds since the epoch, and offset is the time zone's
1245 number of seconds away from UTC. if timezone is false, do not
1254 number of seconds away from UTC. if timezone is false, do not
1246 append time zone to string."""
1255 append time zone to string."""
1247 t, tz = date or makedate()
1256 t, tz = date or makedate()
1248 if t < 0:
1257 if t < 0:
1249 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1258 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1250 tz = 0
1259 tz = 0
1251 if "%1" in format or "%2" in format or "%z" in format:
1260 if "%1" in format or "%2" in format or "%z" in format:
1252 sign = (tz > 0) and "-" or "+"
1261 sign = (tz > 0) and "-" or "+"
1253 minutes = abs(tz) // 60
1262 minutes = abs(tz) // 60
1254 format = format.replace("%z", "%1%2")
1263 format = format.replace("%z", "%1%2")
1255 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1264 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1256 format = format.replace("%2", "%02d" % (minutes % 60))
1265 format = format.replace("%2", "%02d" % (minutes % 60))
1257 try:
1266 try:
1258 t = time.gmtime(float(t) - tz)
1267 t = time.gmtime(float(t) - tz)
1259 except ValueError:
1268 except ValueError:
1260 # time was out of range
1269 # time was out of range
1261 t = time.gmtime(sys.maxint)
1270 t = time.gmtime(sys.maxint)
1262 s = time.strftime(format, t)
1271 s = time.strftime(format, t)
1263 return s
1272 return s
1264
1273
1265 def shortdate(date=None):
1274 def shortdate(date=None):
1266 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1275 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1267 return datestr(date, format='%Y-%m-%d')
1276 return datestr(date, format='%Y-%m-%d')
1268
1277
1269 def strdate(string, format, defaults=[]):
1278 def strdate(string, format, defaults=[]):
1270 """parse a localized time string and return a (unixtime, offset) tuple.
1279 """parse a localized time string and return a (unixtime, offset) tuple.
1271 if the string cannot be parsed, ValueError is raised."""
1280 if the string cannot be parsed, ValueError is raised."""
1272 def timezone(string):
1281 def timezone(string):
1273 tz = string.split()[-1]
1282 tz = string.split()[-1]
1274 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1283 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1275 sign = (tz[0] == "+") and 1 or -1
1284 sign = (tz[0] == "+") and 1 or -1
1276 hours = int(tz[1:3])
1285 hours = int(tz[1:3])
1277 minutes = int(tz[3:5])
1286 minutes = int(tz[3:5])
1278 return -sign * (hours * 60 + minutes) * 60
1287 return -sign * (hours * 60 + minutes) * 60
1279 if tz == "GMT" or tz == "UTC":
1288 if tz == "GMT" or tz == "UTC":
1280 return 0
1289 return 0
1281 return None
1290 return None
1282
1291
1283 # NOTE: unixtime = localunixtime + offset
1292 # NOTE: unixtime = localunixtime + offset
1284 offset, date = timezone(string), string
1293 offset, date = timezone(string), string
1285 if offset is not None:
1294 if offset is not None:
1286 date = " ".join(string.split()[:-1])
1295 date = " ".join(string.split()[:-1])
1287
1296
1288 # add missing elements from defaults
1297 # add missing elements from defaults
1289 usenow = False # default to using biased defaults
1298 usenow = False # default to using biased defaults
1290 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1299 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1291 found = [True for p in part if ("%"+p) in format]
1300 found = [True for p in part if ("%"+p) in format]
1292 if not found:
1301 if not found:
1293 date += "@" + defaults[part][usenow]
1302 date += "@" + defaults[part][usenow]
1294 format += "@%" + part[0]
1303 format += "@%" + part[0]
1295 else:
1304 else:
1296 # We've found a specific time element, less specific time
1305 # We've found a specific time element, less specific time
1297 # elements are relative to today
1306 # elements are relative to today
1298 usenow = True
1307 usenow = True
1299
1308
1300 timetuple = time.strptime(date, format)
1309 timetuple = time.strptime(date, format)
1301 localunixtime = int(calendar.timegm(timetuple))
1310 localunixtime = int(calendar.timegm(timetuple))
1302 if offset is None:
1311 if offset is None:
1303 # local timezone
1312 # local timezone
1304 unixtime = int(time.mktime(timetuple))
1313 unixtime = int(time.mktime(timetuple))
1305 offset = unixtime - localunixtime
1314 offset = unixtime - localunixtime
1306 else:
1315 else:
1307 unixtime = localunixtime + offset
1316 unixtime = localunixtime + offset
1308 return unixtime, offset
1317 return unixtime, offset
1309
1318
1310 def parsedate(date, formats=None, bias={}):
1319 def parsedate(date, formats=None, bias={}):
1311 """parse a localized date/time and return a (unixtime, offset) tuple.
1320 """parse a localized date/time and return a (unixtime, offset) tuple.
1312
1321
1313 The date may be a "unixtime offset" string or in one of the specified
1322 The date may be a "unixtime offset" string or in one of the specified
1314 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1323 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1315
1324
1316 >>> parsedate(' today ') == parsedate(\
1325 >>> parsedate(' today ') == parsedate(\
1317 datetime.date.today().strftime('%b %d'))
1326 datetime.date.today().strftime('%b %d'))
1318 True
1327 True
1319 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1328 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1320 datetime.timedelta(days=1)\
1329 datetime.timedelta(days=1)\
1321 ).strftime('%b %d'))
1330 ).strftime('%b %d'))
1322 True
1331 True
1323 >>> now, tz = makedate()
1332 >>> now, tz = makedate()
1324 >>> strnow, strtz = parsedate('now')
1333 >>> strnow, strtz = parsedate('now')
1325 >>> (strnow - now) < 1
1334 >>> (strnow - now) < 1
1326 True
1335 True
1327 >>> tz == strtz
1336 >>> tz == strtz
1328 True
1337 True
1329 """
1338 """
1330 if not date:
1339 if not date:
1331 return 0, 0
1340 return 0, 0
1332 if isinstance(date, tuple) and len(date) == 2:
1341 if isinstance(date, tuple) and len(date) == 2:
1333 return date
1342 return date
1334 if not formats:
1343 if not formats:
1335 formats = defaultdateformats
1344 formats = defaultdateformats
1336 date = date.strip()
1345 date = date.strip()
1337
1346
1338 if date == _('now'):
1347 if date == _('now'):
1339 return makedate()
1348 return makedate()
1340 if date == _('today'):
1349 if date == _('today'):
1341 date = datetime.date.today().strftime('%b %d')
1350 date = datetime.date.today().strftime('%b %d')
1342 elif date == _('yesterday'):
1351 elif date == _('yesterday'):
1343 date = (datetime.date.today() -
1352 date = (datetime.date.today() -
1344 datetime.timedelta(days=1)).strftime('%b %d')
1353 datetime.timedelta(days=1)).strftime('%b %d')
1345
1354
1346 try:
1355 try:
1347 when, offset = map(int, date.split(' '))
1356 when, offset = map(int, date.split(' '))
1348 except ValueError:
1357 except ValueError:
1349 # fill out defaults
1358 # fill out defaults
1350 now = makedate()
1359 now = makedate()
1351 defaults = {}
1360 defaults = {}
1352 for part in ("d", "mb", "yY", "HI", "M", "S"):
1361 for part in ("d", "mb", "yY", "HI", "M", "S"):
1353 # this piece is for rounding the specific end of unknowns
1362 # this piece is for rounding the specific end of unknowns
1354 b = bias.get(part)
1363 b = bias.get(part)
1355 if b is None:
1364 if b is None:
1356 if part[0] in "HMS":
1365 if part[0] in "HMS":
1357 b = "00"
1366 b = "00"
1358 else:
1367 else:
1359 b = "0"
1368 b = "0"
1360
1369
1361 # this piece is for matching the generic end to today's date
1370 # this piece is for matching the generic end to today's date
1362 n = datestr(now, "%" + part[0])
1371 n = datestr(now, "%" + part[0])
1363
1372
1364 defaults[part] = (b, n)
1373 defaults[part] = (b, n)
1365
1374
1366 for format in formats:
1375 for format in formats:
1367 try:
1376 try:
1368 when, offset = strdate(date, format, defaults)
1377 when, offset = strdate(date, format, defaults)
1369 except (ValueError, OverflowError):
1378 except (ValueError, OverflowError):
1370 pass
1379 pass
1371 else:
1380 else:
1372 break
1381 break
1373 else:
1382 else:
1374 raise Abort(_('invalid date: %r') % date)
1383 raise Abort(_('invalid date: %r') % date)
1375 # validate explicit (probably user-specified) date and
1384 # validate explicit (probably user-specified) date and
1376 # time zone offset. values must fit in signed 32 bits for
1385 # time zone offset. values must fit in signed 32 bits for
1377 # current 32-bit linux runtimes. timezones go from UTC-12
1386 # current 32-bit linux runtimes. timezones go from UTC-12
1378 # to UTC+14
1387 # to UTC+14
1379 if abs(when) > 0x7fffffff:
1388 if abs(when) > 0x7fffffff:
1380 raise Abort(_('date exceeds 32 bits: %d') % when)
1389 raise Abort(_('date exceeds 32 bits: %d') % when)
1381 if when < 0:
1390 if when < 0:
1382 raise Abort(_('negative date value: %d') % when)
1391 raise Abort(_('negative date value: %d') % when)
1383 if offset < -50400 or offset > 43200:
1392 if offset < -50400 or offset > 43200:
1384 raise Abort(_('impossible time zone offset: %d') % offset)
1393 raise Abort(_('impossible time zone offset: %d') % offset)
1385 return when, offset
1394 return when, offset
1386
1395
1387 def matchdate(date):
1396 def matchdate(date):
1388 """Return a function that matches a given date match specifier
1397 """Return a function that matches a given date match specifier
1389
1398
1390 Formats include:
1399 Formats include:
1391
1400
1392 '{date}' match a given date to the accuracy provided
1401 '{date}' match a given date to the accuracy provided
1393
1402
1394 '<{date}' on or before a given date
1403 '<{date}' on or before a given date
1395
1404
1396 '>{date}' on or after a given date
1405 '>{date}' on or after a given date
1397
1406
1398 >>> p1 = parsedate("10:29:59")
1407 >>> p1 = parsedate("10:29:59")
1399 >>> p2 = parsedate("10:30:00")
1408 >>> p2 = parsedate("10:30:00")
1400 >>> p3 = parsedate("10:30:59")
1409 >>> p3 = parsedate("10:30:59")
1401 >>> p4 = parsedate("10:31:00")
1410 >>> p4 = parsedate("10:31:00")
1402 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1411 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1403 >>> f = matchdate("10:30")
1412 >>> f = matchdate("10:30")
1404 >>> f(p1[0])
1413 >>> f(p1[0])
1405 False
1414 False
1406 >>> f(p2[0])
1415 >>> f(p2[0])
1407 True
1416 True
1408 >>> f(p3[0])
1417 >>> f(p3[0])
1409 True
1418 True
1410 >>> f(p4[0])
1419 >>> f(p4[0])
1411 False
1420 False
1412 >>> f(p5[0])
1421 >>> f(p5[0])
1413 False
1422 False
1414 """
1423 """
1415
1424
1416 def lower(date):
1425 def lower(date):
1417 d = {'mb': "1", 'd': "1"}
1426 d = {'mb': "1", 'd': "1"}
1418 return parsedate(date, extendeddateformats, d)[0]
1427 return parsedate(date, extendeddateformats, d)[0]
1419
1428
1420 def upper(date):
1429 def upper(date):
1421 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1430 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1422 for days in ("31", "30", "29"):
1431 for days in ("31", "30", "29"):
1423 try:
1432 try:
1424 d["d"] = days
1433 d["d"] = days
1425 return parsedate(date, extendeddateformats, d)[0]
1434 return parsedate(date, extendeddateformats, d)[0]
1426 except Abort:
1435 except Abort:
1427 pass
1436 pass
1428 d["d"] = "28"
1437 d["d"] = "28"
1429 return parsedate(date, extendeddateformats, d)[0]
1438 return parsedate(date, extendeddateformats, d)[0]
1430
1439
1431 date = date.strip()
1440 date = date.strip()
1432
1441
1433 if not date:
1442 if not date:
1434 raise Abort(_("dates cannot consist entirely of whitespace"))
1443 raise Abort(_("dates cannot consist entirely of whitespace"))
1435 elif date[0] == "<":
1444 elif date[0] == "<":
1436 if not date[1:]:
1445 if not date[1:]:
1437 raise Abort(_("invalid day spec, use '<DATE'"))
1446 raise Abort(_("invalid day spec, use '<DATE'"))
1438 when = upper(date[1:])
1447 when = upper(date[1:])
1439 return lambda x: x <= when
1448 return lambda x: x <= when
1440 elif date[0] == ">":
1449 elif date[0] == ">":
1441 if not date[1:]:
1450 if not date[1:]:
1442 raise Abort(_("invalid day spec, use '>DATE'"))
1451 raise Abort(_("invalid day spec, use '>DATE'"))
1443 when = lower(date[1:])
1452 when = lower(date[1:])
1444 return lambda x: x >= when
1453 return lambda x: x >= when
1445 elif date[0] == "-":
1454 elif date[0] == "-":
1446 try:
1455 try:
1447 days = int(date[1:])
1456 days = int(date[1:])
1448 except ValueError:
1457 except ValueError:
1449 raise Abort(_("invalid day spec: %s") % date[1:])
1458 raise Abort(_("invalid day spec: %s") % date[1:])
1450 if days < 0:
1459 if days < 0:
1451 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1460 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1452 % date[1:])
1461 % date[1:])
1453 when = makedate()[0] - days * 3600 * 24
1462 when = makedate()[0] - days * 3600 * 24
1454 return lambda x: x >= when
1463 return lambda x: x >= when
1455 elif " to " in date:
1464 elif " to " in date:
1456 a, b = date.split(" to ")
1465 a, b = date.split(" to ")
1457 start, stop = lower(a), upper(b)
1466 start, stop = lower(a), upper(b)
1458 return lambda x: x >= start and x <= stop
1467 return lambda x: x >= start and x <= stop
1459 else:
1468 else:
1460 start, stop = lower(date), upper(date)
1469 start, stop = lower(date), upper(date)
1461 return lambda x: x >= start and x <= stop
1470 return lambda x: x >= start and x <= stop
1462
1471
1463 def shortuser(user):
1472 def shortuser(user):
1464 """Return a short representation of a user name or email address."""
1473 """Return a short representation of a user name or email address."""
1465 f = user.find('@')
1474 f = user.find('@')
1466 if f >= 0:
1475 if f >= 0:
1467 user = user[:f]
1476 user = user[:f]
1468 f = user.find('<')
1477 f = user.find('<')
1469 if f >= 0:
1478 if f >= 0:
1470 user = user[f + 1:]
1479 user = user[f + 1:]
1471 f = user.find(' ')
1480 f = user.find(' ')
1472 if f >= 0:
1481 if f >= 0:
1473 user = user[:f]
1482 user = user[:f]
1474 f = user.find('.')
1483 f = user.find('.')
1475 if f >= 0:
1484 if f >= 0:
1476 user = user[:f]
1485 user = user[:f]
1477 return user
1486 return user
1478
1487
1479 def emailuser(user):
1488 def emailuser(user):
1480 """Return the user portion of an email address."""
1489 """Return the user portion of an email address."""
1481 f = user.find('@')
1490 f = user.find('@')
1482 if f >= 0:
1491 if f >= 0:
1483 user = user[:f]
1492 user = user[:f]
1484 f = user.find('<')
1493 f = user.find('<')
1485 if f >= 0:
1494 if f >= 0:
1486 user = user[f + 1:]
1495 user = user[f + 1:]
1487 return user
1496 return user
1488
1497
1489 def email(author):
1498 def email(author):
1490 '''get email of author.'''
1499 '''get email of author.'''
1491 r = author.find('>')
1500 r = author.find('>')
1492 if r == -1:
1501 if r == -1:
1493 r = None
1502 r = None
1494 return author[author.find('<') + 1:r]
1503 return author[author.find('<') + 1:r]
1495
1504
1496 def ellipsis(text, maxlength=400):
1505 def ellipsis(text, maxlength=400):
1497 """Trim string to at most maxlength (default: 400) columns in display."""
1506 """Trim string to at most maxlength (default: 400) columns in display."""
1498 return encoding.trim(text, maxlength, ellipsis='...')
1507 return encoding.trim(text, maxlength, ellipsis='...')
1499
1508
1500 def unitcountfn(*unittable):
1509 def unitcountfn(*unittable):
1501 '''return a function that renders a readable count of some quantity'''
1510 '''return a function that renders a readable count of some quantity'''
1502
1511
1503 def go(count):
1512 def go(count):
1504 for multiplier, divisor, format in unittable:
1513 for multiplier, divisor, format in unittable:
1505 if count >= divisor * multiplier:
1514 if count >= divisor * multiplier:
1506 return format % (count / float(divisor))
1515 return format % (count / float(divisor))
1507 return unittable[-1][2] % count
1516 return unittable[-1][2] % count
1508
1517
1509 return go
1518 return go
1510
1519
1511 bytecount = unitcountfn(
1520 bytecount = unitcountfn(
1512 (100, 1 << 30, _('%.0f GB')),
1521 (100, 1 << 30, _('%.0f GB')),
1513 (10, 1 << 30, _('%.1f GB')),
1522 (10, 1 << 30, _('%.1f GB')),
1514 (1, 1 << 30, _('%.2f GB')),
1523 (1, 1 << 30, _('%.2f GB')),
1515 (100, 1 << 20, _('%.0f MB')),
1524 (100, 1 << 20, _('%.0f MB')),
1516 (10, 1 << 20, _('%.1f MB')),
1525 (10, 1 << 20, _('%.1f MB')),
1517 (1, 1 << 20, _('%.2f MB')),
1526 (1, 1 << 20, _('%.2f MB')),
1518 (100, 1 << 10, _('%.0f KB')),
1527 (100, 1 << 10, _('%.0f KB')),
1519 (10, 1 << 10, _('%.1f KB')),
1528 (10, 1 << 10, _('%.1f KB')),
1520 (1, 1 << 10, _('%.2f KB')),
1529 (1, 1 << 10, _('%.2f KB')),
1521 (1, 1, _('%.0f bytes')),
1530 (1, 1, _('%.0f bytes')),
1522 )
1531 )
1523
1532
1524 def uirepr(s):
1533 def uirepr(s):
1525 # Avoid double backslash in Windows path repr()
1534 # Avoid double backslash in Windows path repr()
1526 return repr(s).replace('\\\\', '\\')
1535 return repr(s).replace('\\\\', '\\')
1527
1536
1528 # delay import of textwrap
1537 # delay import of textwrap
1529 def MBTextWrapper(**kwargs):
1538 def MBTextWrapper(**kwargs):
1530 class tw(textwrap.TextWrapper):
1539 class tw(textwrap.TextWrapper):
1531 """
1540 """
1532 Extend TextWrapper for width-awareness.
1541 Extend TextWrapper for width-awareness.
1533
1542
1534 Neither number of 'bytes' in any encoding nor 'characters' is
1543 Neither number of 'bytes' in any encoding nor 'characters' is
1535 appropriate to calculate terminal columns for specified string.
1544 appropriate to calculate terminal columns for specified string.
1536
1545
1537 Original TextWrapper implementation uses built-in 'len()' directly,
1546 Original TextWrapper implementation uses built-in 'len()' directly,
1538 so overriding is needed to use width information of each characters.
1547 so overriding is needed to use width information of each characters.
1539
1548
1540 In addition, characters classified into 'ambiguous' width are
1549 In addition, characters classified into 'ambiguous' width are
1541 treated as wide in East Asian area, but as narrow in other.
1550 treated as wide in East Asian area, but as narrow in other.
1542
1551
1543 This requires use decision to determine width of such characters.
1552 This requires use decision to determine width of such characters.
1544 """
1553 """
1545 def __init__(self, **kwargs):
1554 def __init__(self, **kwargs):
1546 textwrap.TextWrapper.__init__(self, **kwargs)
1555 textwrap.TextWrapper.__init__(self, **kwargs)
1547
1556
1548 # for compatibility between 2.4 and 2.6
1557 # for compatibility between 2.4 and 2.6
1549 if getattr(self, 'drop_whitespace', None) is None:
1558 if getattr(self, 'drop_whitespace', None) is None:
1550 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1559 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1551
1560
1552 def _cutdown(self, ucstr, space_left):
1561 def _cutdown(self, ucstr, space_left):
1553 l = 0
1562 l = 0
1554 colwidth = encoding.ucolwidth
1563 colwidth = encoding.ucolwidth
1555 for i in xrange(len(ucstr)):
1564 for i in xrange(len(ucstr)):
1556 l += colwidth(ucstr[i])
1565 l += colwidth(ucstr[i])
1557 if space_left < l:
1566 if space_left < l:
1558 return (ucstr[:i], ucstr[i:])
1567 return (ucstr[:i], ucstr[i:])
1559 return ucstr, ''
1568 return ucstr, ''
1560
1569
1561 # overriding of base class
1570 # overriding of base class
1562 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1571 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1563 space_left = max(width - cur_len, 1)
1572 space_left = max(width - cur_len, 1)
1564
1573
1565 if self.break_long_words:
1574 if self.break_long_words:
1566 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1575 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1567 cur_line.append(cut)
1576 cur_line.append(cut)
1568 reversed_chunks[-1] = res
1577 reversed_chunks[-1] = res
1569 elif not cur_line:
1578 elif not cur_line:
1570 cur_line.append(reversed_chunks.pop())
1579 cur_line.append(reversed_chunks.pop())
1571
1580
1572 # this overriding code is imported from TextWrapper of python 2.6
1581 # this overriding code is imported from TextWrapper of python 2.6
1573 # to calculate columns of string by 'encoding.ucolwidth()'
1582 # to calculate columns of string by 'encoding.ucolwidth()'
1574 def _wrap_chunks(self, chunks):
1583 def _wrap_chunks(self, chunks):
1575 colwidth = encoding.ucolwidth
1584 colwidth = encoding.ucolwidth
1576
1585
1577 lines = []
1586 lines = []
1578 if self.width <= 0:
1587 if self.width <= 0:
1579 raise ValueError("invalid width %r (must be > 0)" % self.width)
1588 raise ValueError("invalid width %r (must be > 0)" % self.width)
1580
1589
1581 # Arrange in reverse order so items can be efficiently popped
1590 # Arrange in reverse order so items can be efficiently popped
1582 # from a stack of chucks.
1591 # from a stack of chucks.
1583 chunks.reverse()
1592 chunks.reverse()
1584
1593
1585 while chunks:
1594 while chunks:
1586
1595
1587 # Start the list of chunks that will make up the current line.
1596 # Start the list of chunks that will make up the current line.
1588 # cur_len is just the length of all the chunks in cur_line.
1597 # cur_len is just the length of all the chunks in cur_line.
1589 cur_line = []
1598 cur_line = []
1590 cur_len = 0
1599 cur_len = 0
1591
1600
1592 # Figure out which static string will prefix this line.
1601 # Figure out which static string will prefix this line.
1593 if lines:
1602 if lines:
1594 indent = self.subsequent_indent
1603 indent = self.subsequent_indent
1595 else:
1604 else:
1596 indent = self.initial_indent
1605 indent = self.initial_indent
1597
1606
1598 # Maximum width for this line.
1607 # Maximum width for this line.
1599 width = self.width - len(indent)
1608 width = self.width - len(indent)
1600
1609
1601 # First chunk on line is whitespace -- drop it, unless this
1610 # First chunk on line is whitespace -- drop it, unless this
1602 # is the very beginning of the text (i.e. no lines started yet).
1611 # is the very beginning of the text (i.e. no lines started yet).
1603 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1612 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1604 del chunks[-1]
1613 del chunks[-1]
1605
1614
1606 while chunks:
1615 while chunks:
1607 l = colwidth(chunks[-1])
1616 l = colwidth(chunks[-1])
1608
1617
1609 # Can at least squeeze this chunk onto the current line.
1618 # Can at least squeeze this chunk onto the current line.
1610 if cur_len + l <= width:
1619 if cur_len + l <= width:
1611 cur_line.append(chunks.pop())
1620 cur_line.append(chunks.pop())
1612 cur_len += l
1621 cur_len += l
1613
1622
1614 # Nope, this line is full.
1623 # Nope, this line is full.
1615 else:
1624 else:
1616 break
1625 break
1617
1626
1618 # The current line is full, and the next chunk is too big to
1627 # The current line is full, and the next chunk is too big to
1619 # fit on *any* line (not just this one).
1628 # fit on *any* line (not just this one).
1620 if chunks and colwidth(chunks[-1]) > width:
1629 if chunks and colwidth(chunks[-1]) > width:
1621 self._handle_long_word(chunks, cur_line, cur_len, width)
1630 self._handle_long_word(chunks, cur_line, cur_len, width)
1622
1631
1623 # If the last chunk on this line is all whitespace, drop it.
1632 # If the last chunk on this line is all whitespace, drop it.
1624 if (self.drop_whitespace and
1633 if (self.drop_whitespace and
1625 cur_line and cur_line[-1].strip() == ''):
1634 cur_line and cur_line[-1].strip() == ''):
1626 del cur_line[-1]
1635 del cur_line[-1]
1627
1636
1628 # Convert current line back to a string and store it in list
1637 # Convert current line back to a string and store it in list
1629 # of all lines (return value).
1638 # of all lines (return value).
1630 if cur_line:
1639 if cur_line:
1631 lines.append(indent + ''.join(cur_line))
1640 lines.append(indent + ''.join(cur_line))
1632
1641
1633 return lines
1642 return lines
1634
1643
1635 global MBTextWrapper
1644 global MBTextWrapper
1636 MBTextWrapper = tw
1645 MBTextWrapper = tw
1637 return tw(**kwargs)
1646 return tw(**kwargs)
1638
1647
1639 def wrap(line, width, initindent='', hangindent=''):
1648 def wrap(line, width, initindent='', hangindent=''):
1640 maxindent = max(len(hangindent), len(initindent))
1649 maxindent = max(len(hangindent), len(initindent))
1641 if width <= maxindent:
1650 if width <= maxindent:
1642 # adjust for weird terminal size
1651 # adjust for weird terminal size
1643 width = max(78, maxindent + 1)
1652 width = max(78, maxindent + 1)
1644 line = line.decode(encoding.encoding, encoding.encodingmode)
1653 line = line.decode(encoding.encoding, encoding.encodingmode)
1645 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1654 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1646 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1655 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1647 wrapper = MBTextWrapper(width=width,
1656 wrapper = MBTextWrapper(width=width,
1648 initial_indent=initindent,
1657 initial_indent=initindent,
1649 subsequent_indent=hangindent)
1658 subsequent_indent=hangindent)
1650 return wrapper.fill(line).encode(encoding.encoding)
1659 return wrapper.fill(line).encode(encoding.encoding)
1651
1660
1652 def iterlines(iterator):
1661 def iterlines(iterator):
1653 for chunk in iterator:
1662 for chunk in iterator:
1654 for line in chunk.splitlines():
1663 for line in chunk.splitlines():
1655 yield line
1664 yield line
1656
1665
1657 def expandpath(path):
1666 def expandpath(path):
1658 return os.path.expanduser(os.path.expandvars(path))
1667 return os.path.expanduser(os.path.expandvars(path))
1659
1668
1660 def hgcmd():
1669 def hgcmd():
1661 """Return the command used to execute current hg
1670 """Return the command used to execute current hg
1662
1671
1663 This is different from hgexecutable() because on Windows we want
1672 This is different from hgexecutable() because on Windows we want
1664 to avoid things opening new shell windows like batch files, so we
1673 to avoid things opening new shell windows like batch files, so we
1665 get either the python call or current executable.
1674 get either the python call or current executable.
1666 """
1675 """
1667 if mainfrozen():
1676 if mainfrozen():
1668 return [sys.executable]
1677 return [sys.executable]
1669 return gethgcmd()
1678 return gethgcmd()
1670
1679
1671 def rundetached(args, condfn):
1680 def rundetached(args, condfn):
1672 """Execute the argument list in a detached process.
1681 """Execute the argument list in a detached process.
1673
1682
1674 condfn is a callable which is called repeatedly and should return
1683 condfn is a callable which is called repeatedly and should return
1675 True once the child process is known to have started successfully.
1684 True once the child process is known to have started successfully.
1676 At this point, the child process PID is returned. If the child
1685 At this point, the child process PID is returned. If the child
1677 process fails to start or finishes before condfn() evaluates to
1686 process fails to start or finishes before condfn() evaluates to
1678 True, return -1.
1687 True, return -1.
1679 """
1688 """
1680 # Windows case is easier because the child process is either
1689 # Windows case is easier because the child process is either
1681 # successfully starting and validating the condition or exiting
1690 # successfully starting and validating the condition or exiting
1682 # on failure. We just poll on its PID. On Unix, if the child
1691 # on failure. We just poll on its PID. On Unix, if the child
1683 # process fails to start, it will be left in a zombie state until
1692 # process fails to start, it will be left in a zombie state until
1684 # the parent wait on it, which we cannot do since we expect a long
1693 # the parent wait on it, which we cannot do since we expect a long
1685 # running process on success. Instead we listen for SIGCHLD telling
1694 # running process on success. Instead we listen for SIGCHLD telling
1686 # us our child process terminated.
1695 # us our child process terminated.
1687 terminated = set()
1696 terminated = set()
1688 def handler(signum, frame):
1697 def handler(signum, frame):
1689 terminated.add(os.wait())
1698 terminated.add(os.wait())
1690 prevhandler = None
1699 prevhandler = None
1691 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1700 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1692 if SIGCHLD is not None:
1701 if SIGCHLD is not None:
1693 prevhandler = signal.signal(SIGCHLD, handler)
1702 prevhandler = signal.signal(SIGCHLD, handler)
1694 try:
1703 try:
1695 pid = spawndetached(args)
1704 pid = spawndetached(args)
1696 while not condfn():
1705 while not condfn():
1697 if ((pid in terminated or not testpid(pid))
1706 if ((pid in terminated or not testpid(pid))
1698 and not condfn()):
1707 and not condfn()):
1699 return -1
1708 return -1
1700 time.sleep(0.1)
1709 time.sleep(0.1)
1701 return pid
1710 return pid
1702 finally:
1711 finally:
1703 if prevhandler is not None:
1712 if prevhandler is not None:
1704 signal.signal(signal.SIGCHLD, prevhandler)
1713 signal.signal(signal.SIGCHLD, prevhandler)
1705
1714
1706 try:
1715 try:
1707 any, all = any, all
1716 any, all = any, all
1708 except NameError:
1717 except NameError:
1709 def any(iterable):
1718 def any(iterable):
1710 for i in iterable:
1719 for i in iterable:
1711 if i:
1720 if i:
1712 return True
1721 return True
1713 return False
1722 return False
1714
1723
1715 def all(iterable):
1724 def all(iterable):
1716 for i in iterable:
1725 for i in iterable:
1717 if not i:
1726 if not i:
1718 return False
1727 return False
1719 return True
1728 return True
1720
1729
1721 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1730 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1722 """Return the result of interpolating items in the mapping into string s.
1731 """Return the result of interpolating items in the mapping into string s.
1723
1732
1724 prefix is a single character string, or a two character string with
1733 prefix is a single character string, or a two character string with
1725 a backslash as the first character if the prefix needs to be escaped in
1734 a backslash as the first character if the prefix needs to be escaped in
1726 a regular expression.
1735 a regular expression.
1727
1736
1728 fn is an optional function that will be applied to the replacement text
1737 fn is an optional function that will be applied to the replacement text
1729 just before replacement.
1738 just before replacement.
1730
1739
1731 escape_prefix is an optional flag that allows using doubled prefix for
1740 escape_prefix is an optional flag that allows using doubled prefix for
1732 its escaping.
1741 its escaping.
1733 """
1742 """
1734 fn = fn or (lambda s: s)
1743 fn = fn or (lambda s: s)
1735 patterns = '|'.join(mapping.keys())
1744 patterns = '|'.join(mapping.keys())
1736 if escape_prefix:
1745 if escape_prefix:
1737 patterns += '|' + prefix
1746 patterns += '|' + prefix
1738 if len(prefix) > 1:
1747 if len(prefix) > 1:
1739 prefix_char = prefix[1:]
1748 prefix_char = prefix[1:]
1740 else:
1749 else:
1741 prefix_char = prefix
1750 prefix_char = prefix
1742 mapping[prefix_char] = prefix_char
1751 mapping[prefix_char] = prefix_char
1743 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1752 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1744 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1753 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1745
1754
1746 def getport(port):
1755 def getport(port):
1747 """Return the port for a given network service.
1756 """Return the port for a given network service.
1748
1757
1749 If port is an integer, it's returned as is. If it's a string, it's
1758 If port is an integer, it's returned as is. If it's a string, it's
1750 looked up using socket.getservbyname(). If there's no matching
1759 looked up using socket.getservbyname(). If there's no matching
1751 service, util.Abort is raised.
1760 service, util.Abort is raised.
1752 """
1761 """
1753 try:
1762 try:
1754 return int(port)
1763 return int(port)
1755 except ValueError:
1764 except ValueError:
1756 pass
1765 pass
1757
1766
1758 try:
1767 try:
1759 return socket.getservbyname(port)
1768 return socket.getservbyname(port)
1760 except socket.error:
1769 except socket.error:
1761 raise Abort(_("no port number associated with service '%s'") % port)
1770 raise Abort(_("no port number associated with service '%s'") % port)
1762
1771
1763 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1772 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1764 '0': False, 'no': False, 'false': False, 'off': False,
1773 '0': False, 'no': False, 'false': False, 'off': False,
1765 'never': False}
1774 'never': False}
1766
1775
1767 def parsebool(s):
1776 def parsebool(s):
1768 """Parse s into a boolean.
1777 """Parse s into a boolean.
1769
1778
1770 If s is not a valid boolean, returns None.
1779 If s is not a valid boolean, returns None.
1771 """
1780 """
1772 return _booleans.get(s.lower(), None)
1781 return _booleans.get(s.lower(), None)
1773
1782
1774 _hexdig = '0123456789ABCDEFabcdef'
1783 _hexdig = '0123456789ABCDEFabcdef'
1775 _hextochr = dict((a + b, chr(int(a + b, 16)))
1784 _hextochr = dict((a + b, chr(int(a + b, 16)))
1776 for a in _hexdig for b in _hexdig)
1785 for a in _hexdig for b in _hexdig)
1777
1786
1778 def _urlunquote(s):
1787 def _urlunquote(s):
1779 """Decode HTTP/HTML % encoding.
1788 """Decode HTTP/HTML % encoding.
1780
1789
1781 >>> _urlunquote('abc%20def')
1790 >>> _urlunquote('abc%20def')
1782 'abc def'
1791 'abc def'
1783 """
1792 """
1784 res = s.split('%')
1793 res = s.split('%')
1785 # fastpath
1794 # fastpath
1786 if len(res) == 1:
1795 if len(res) == 1:
1787 return s
1796 return s
1788 s = res[0]
1797 s = res[0]
1789 for item in res[1:]:
1798 for item in res[1:]:
1790 try:
1799 try:
1791 s += _hextochr[item[:2]] + item[2:]
1800 s += _hextochr[item[:2]] + item[2:]
1792 except KeyError:
1801 except KeyError:
1793 s += '%' + item
1802 s += '%' + item
1794 except UnicodeDecodeError:
1803 except UnicodeDecodeError:
1795 s += unichr(int(item[:2], 16)) + item[2:]
1804 s += unichr(int(item[:2], 16)) + item[2:]
1796 return s
1805 return s
1797
1806
1798 class url(object):
1807 class url(object):
1799 r"""Reliable URL parser.
1808 r"""Reliable URL parser.
1800
1809
1801 This parses URLs and provides attributes for the following
1810 This parses URLs and provides attributes for the following
1802 components:
1811 components:
1803
1812
1804 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1813 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1805
1814
1806 Missing components are set to None. The only exception is
1815 Missing components are set to None. The only exception is
1807 fragment, which is set to '' if present but empty.
1816 fragment, which is set to '' if present but empty.
1808
1817
1809 If parsefragment is False, fragment is included in query. If
1818 If parsefragment is False, fragment is included in query. If
1810 parsequery is False, query is included in path. If both are
1819 parsequery is False, query is included in path. If both are
1811 False, both fragment and query are included in path.
1820 False, both fragment and query are included in path.
1812
1821
1813 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1822 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1814
1823
1815 Note that for backward compatibility reasons, bundle URLs do not
1824 Note that for backward compatibility reasons, bundle URLs do not
1816 take host names. That means 'bundle://../' has a path of '../'.
1825 take host names. That means 'bundle://../' has a path of '../'.
1817
1826
1818 Examples:
1827 Examples:
1819
1828
1820 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1829 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1821 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1830 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1822 >>> url('ssh://[::1]:2200//home/joe/repo')
1831 >>> url('ssh://[::1]:2200//home/joe/repo')
1823 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1832 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1824 >>> url('file:///home/joe/repo')
1833 >>> url('file:///home/joe/repo')
1825 <url scheme: 'file', path: '/home/joe/repo'>
1834 <url scheme: 'file', path: '/home/joe/repo'>
1826 >>> url('file:///c:/temp/foo/')
1835 >>> url('file:///c:/temp/foo/')
1827 <url scheme: 'file', path: 'c:/temp/foo/'>
1836 <url scheme: 'file', path: 'c:/temp/foo/'>
1828 >>> url('bundle:foo')
1837 >>> url('bundle:foo')
1829 <url scheme: 'bundle', path: 'foo'>
1838 <url scheme: 'bundle', path: 'foo'>
1830 >>> url('bundle://../foo')
1839 >>> url('bundle://../foo')
1831 <url scheme: 'bundle', path: '../foo'>
1840 <url scheme: 'bundle', path: '../foo'>
1832 >>> url(r'c:\foo\bar')
1841 >>> url(r'c:\foo\bar')
1833 <url path: 'c:\\foo\\bar'>
1842 <url path: 'c:\\foo\\bar'>
1834 >>> url(r'\\blah\blah\blah')
1843 >>> url(r'\\blah\blah\blah')
1835 <url path: '\\\\blah\\blah\\blah'>
1844 <url path: '\\\\blah\\blah\\blah'>
1836 >>> url(r'\\blah\blah\blah#baz')
1845 >>> url(r'\\blah\blah\blah#baz')
1837 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1846 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1838 >>> url(r'file:///C:\users\me')
1847 >>> url(r'file:///C:\users\me')
1839 <url scheme: 'file', path: 'C:\\users\\me'>
1848 <url scheme: 'file', path: 'C:\\users\\me'>
1840
1849
1841 Authentication credentials:
1850 Authentication credentials:
1842
1851
1843 >>> url('ssh://joe:xyz@x/repo')
1852 >>> url('ssh://joe:xyz@x/repo')
1844 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1853 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1845 >>> url('ssh://joe@x/repo')
1854 >>> url('ssh://joe@x/repo')
1846 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1855 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1847
1856
1848 Query strings and fragments:
1857 Query strings and fragments:
1849
1858
1850 >>> url('http://host/a?b#c')
1859 >>> url('http://host/a?b#c')
1851 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1860 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1852 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1861 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1853 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1862 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1854 """
1863 """
1855
1864
1856 _safechars = "!~*'()+"
1865 _safechars = "!~*'()+"
1857 _safepchars = "/!~*'()+:\\"
1866 _safepchars = "/!~*'()+:\\"
1858 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1867 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1859
1868
1860 def __init__(self, path, parsequery=True, parsefragment=True):
1869 def __init__(self, path, parsequery=True, parsefragment=True):
1861 # We slowly chomp away at path until we have only the path left
1870 # We slowly chomp away at path until we have only the path left
1862 self.scheme = self.user = self.passwd = self.host = None
1871 self.scheme = self.user = self.passwd = self.host = None
1863 self.port = self.path = self.query = self.fragment = None
1872 self.port = self.path = self.query = self.fragment = None
1864 self._localpath = True
1873 self._localpath = True
1865 self._hostport = ''
1874 self._hostport = ''
1866 self._origpath = path
1875 self._origpath = path
1867
1876
1868 if parsefragment and '#' in path:
1877 if parsefragment and '#' in path:
1869 path, self.fragment = path.split('#', 1)
1878 path, self.fragment = path.split('#', 1)
1870 if not path:
1879 if not path:
1871 path = None
1880 path = None
1872
1881
1873 # special case for Windows drive letters and UNC paths
1882 # special case for Windows drive letters and UNC paths
1874 if hasdriveletter(path) or path.startswith(r'\\'):
1883 if hasdriveletter(path) or path.startswith(r'\\'):
1875 self.path = path
1884 self.path = path
1876 return
1885 return
1877
1886
1878 # For compatibility reasons, we can't handle bundle paths as
1887 # For compatibility reasons, we can't handle bundle paths as
1879 # normal URLS
1888 # normal URLS
1880 if path.startswith('bundle:'):
1889 if path.startswith('bundle:'):
1881 self.scheme = 'bundle'
1890 self.scheme = 'bundle'
1882 path = path[7:]
1891 path = path[7:]
1883 if path.startswith('//'):
1892 if path.startswith('//'):
1884 path = path[2:]
1893 path = path[2:]
1885 self.path = path
1894 self.path = path
1886 return
1895 return
1887
1896
1888 if self._matchscheme(path):
1897 if self._matchscheme(path):
1889 parts = path.split(':', 1)
1898 parts = path.split(':', 1)
1890 if parts[0]:
1899 if parts[0]:
1891 self.scheme, path = parts
1900 self.scheme, path = parts
1892 self._localpath = False
1901 self._localpath = False
1893
1902
1894 if not path:
1903 if not path:
1895 path = None
1904 path = None
1896 if self._localpath:
1905 if self._localpath:
1897 self.path = ''
1906 self.path = ''
1898 return
1907 return
1899 else:
1908 else:
1900 if self._localpath:
1909 if self._localpath:
1901 self.path = path
1910 self.path = path
1902 return
1911 return
1903
1912
1904 if parsequery and '?' in path:
1913 if parsequery and '?' in path:
1905 path, self.query = path.split('?', 1)
1914 path, self.query = path.split('?', 1)
1906 if not path:
1915 if not path:
1907 path = None
1916 path = None
1908 if not self.query:
1917 if not self.query:
1909 self.query = None
1918 self.query = None
1910
1919
1911 # // is required to specify a host/authority
1920 # // is required to specify a host/authority
1912 if path and path.startswith('//'):
1921 if path and path.startswith('//'):
1913 parts = path[2:].split('/', 1)
1922 parts = path[2:].split('/', 1)
1914 if len(parts) > 1:
1923 if len(parts) > 1:
1915 self.host, path = parts
1924 self.host, path = parts
1916 else:
1925 else:
1917 self.host = parts[0]
1926 self.host = parts[0]
1918 path = None
1927 path = None
1919 if not self.host:
1928 if not self.host:
1920 self.host = None
1929 self.host = None
1921 # path of file:///d is /d
1930 # path of file:///d is /d
1922 # path of file:///d:/ is d:/, not /d:/
1931 # path of file:///d:/ is d:/, not /d:/
1923 if path and not hasdriveletter(path):
1932 if path and not hasdriveletter(path):
1924 path = '/' + path
1933 path = '/' + path
1925
1934
1926 if self.host and '@' in self.host:
1935 if self.host and '@' in self.host:
1927 self.user, self.host = self.host.rsplit('@', 1)
1936 self.user, self.host = self.host.rsplit('@', 1)
1928 if ':' in self.user:
1937 if ':' in self.user:
1929 self.user, self.passwd = self.user.split(':', 1)
1938 self.user, self.passwd = self.user.split(':', 1)
1930 if not self.host:
1939 if not self.host:
1931 self.host = None
1940 self.host = None
1932
1941
1933 # Don't split on colons in IPv6 addresses without ports
1942 # Don't split on colons in IPv6 addresses without ports
1934 if (self.host and ':' in self.host and
1943 if (self.host and ':' in self.host and
1935 not (self.host.startswith('[') and self.host.endswith(']'))):
1944 not (self.host.startswith('[') and self.host.endswith(']'))):
1936 self._hostport = self.host
1945 self._hostport = self.host
1937 self.host, self.port = self.host.rsplit(':', 1)
1946 self.host, self.port = self.host.rsplit(':', 1)
1938 if not self.host:
1947 if not self.host:
1939 self.host = None
1948 self.host = None
1940
1949
1941 if (self.host and self.scheme == 'file' and
1950 if (self.host and self.scheme == 'file' and
1942 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1951 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1943 raise Abort(_('file:// URLs can only refer to localhost'))
1952 raise Abort(_('file:// URLs can only refer to localhost'))
1944
1953
1945 self.path = path
1954 self.path = path
1946
1955
1947 # leave the query string escaped
1956 # leave the query string escaped
1948 for a in ('user', 'passwd', 'host', 'port',
1957 for a in ('user', 'passwd', 'host', 'port',
1949 'path', 'fragment'):
1958 'path', 'fragment'):
1950 v = getattr(self, a)
1959 v = getattr(self, a)
1951 if v is not None:
1960 if v is not None:
1952 setattr(self, a, _urlunquote(v))
1961 setattr(self, a, _urlunquote(v))
1953
1962
1954 def __repr__(self):
1963 def __repr__(self):
1955 attrs = []
1964 attrs = []
1956 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1965 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1957 'query', 'fragment'):
1966 'query', 'fragment'):
1958 v = getattr(self, a)
1967 v = getattr(self, a)
1959 if v is not None:
1968 if v is not None:
1960 attrs.append('%s: %r' % (a, v))
1969 attrs.append('%s: %r' % (a, v))
1961 return '<url %s>' % ', '.join(attrs)
1970 return '<url %s>' % ', '.join(attrs)
1962
1971
1963 def __str__(self):
1972 def __str__(self):
1964 r"""Join the URL's components back into a URL string.
1973 r"""Join the URL's components back into a URL string.
1965
1974
1966 Examples:
1975 Examples:
1967
1976
1968 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1977 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1969 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1978 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1970 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1979 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1971 'http://user:pw@host:80/?foo=bar&baz=42'
1980 'http://user:pw@host:80/?foo=bar&baz=42'
1972 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1981 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1973 'http://user:pw@host:80/?foo=bar%3dbaz'
1982 'http://user:pw@host:80/?foo=bar%3dbaz'
1974 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1983 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1975 'ssh://user:pw@[::1]:2200//home/joe#'
1984 'ssh://user:pw@[::1]:2200//home/joe#'
1976 >>> str(url('http://localhost:80//'))
1985 >>> str(url('http://localhost:80//'))
1977 'http://localhost:80//'
1986 'http://localhost:80//'
1978 >>> str(url('http://localhost:80/'))
1987 >>> str(url('http://localhost:80/'))
1979 'http://localhost:80/'
1988 'http://localhost:80/'
1980 >>> str(url('http://localhost:80'))
1989 >>> str(url('http://localhost:80'))
1981 'http://localhost:80/'
1990 'http://localhost:80/'
1982 >>> str(url('bundle:foo'))
1991 >>> str(url('bundle:foo'))
1983 'bundle:foo'
1992 'bundle:foo'
1984 >>> str(url('bundle://../foo'))
1993 >>> str(url('bundle://../foo'))
1985 'bundle:../foo'
1994 'bundle:../foo'
1986 >>> str(url('path'))
1995 >>> str(url('path'))
1987 'path'
1996 'path'
1988 >>> str(url('file:///tmp/foo/bar'))
1997 >>> str(url('file:///tmp/foo/bar'))
1989 'file:///tmp/foo/bar'
1998 'file:///tmp/foo/bar'
1990 >>> str(url('file:///c:/tmp/foo/bar'))
1999 >>> str(url('file:///c:/tmp/foo/bar'))
1991 'file:///c:/tmp/foo/bar'
2000 'file:///c:/tmp/foo/bar'
1992 >>> print url(r'bundle:foo\bar')
2001 >>> print url(r'bundle:foo\bar')
1993 bundle:foo\bar
2002 bundle:foo\bar
1994 >>> print url(r'file:///D:\data\hg')
2003 >>> print url(r'file:///D:\data\hg')
1995 file:///D:\data\hg
2004 file:///D:\data\hg
1996 """
2005 """
1997 if self._localpath:
2006 if self._localpath:
1998 s = self.path
2007 s = self.path
1999 if self.scheme == 'bundle':
2008 if self.scheme == 'bundle':
2000 s = 'bundle:' + s
2009 s = 'bundle:' + s
2001 if self.fragment:
2010 if self.fragment:
2002 s += '#' + self.fragment
2011 s += '#' + self.fragment
2003 return s
2012 return s
2004
2013
2005 s = self.scheme + ':'
2014 s = self.scheme + ':'
2006 if self.user or self.passwd or self.host:
2015 if self.user or self.passwd or self.host:
2007 s += '//'
2016 s += '//'
2008 elif self.scheme and (not self.path or self.path.startswith('/')
2017 elif self.scheme and (not self.path or self.path.startswith('/')
2009 or hasdriveletter(self.path)):
2018 or hasdriveletter(self.path)):
2010 s += '//'
2019 s += '//'
2011 if hasdriveletter(self.path):
2020 if hasdriveletter(self.path):
2012 s += '/'
2021 s += '/'
2013 if self.user:
2022 if self.user:
2014 s += urllib.quote(self.user, safe=self._safechars)
2023 s += urllib.quote(self.user, safe=self._safechars)
2015 if self.passwd:
2024 if self.passwd:
2016 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2025 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2017 if self.user or self.passwd:
2026 if self.user or self.passwd:
2018 s += '@'
2027 s += '@'
2019 if self.host:
2028 if self.host:
2020 if not (self.host.startswith('[') and self.host.endswith(']')):
2029 if not (self.host.startswith('[') and self.host.endswith(']')):
2021 s += urllib.quote(self.host)
2030 s += urllib.quote(self.host)
2022 else:
2031 else:
2023 s += self.host
2032 s += self.host
2024 if self.port:
2033 if self.port:
2025 s += ':' + urllib.quote(self.port)
2034 s += ':' + urllib.quote(self.port)
2026 if self.host:
2035 if self.host:
2027 s += '/'
2036 s += '/'
2028 if self.path:
2037 if self.path:
2029 # TODO: similar to the query string, we should not unescape the
2038 # TODO: similar to the query string, we should not unescape the
2030 # path when we store it, the path might contain '%2f' = '/',
2039 # path when we store it, the path might contain '%2f' = '/',
2031 # which we should *not* escape.
2040 # which we should *not* escape.
2032 s += urllib.quote(self.path, safe=self._safepchars)
2041 s += urllib.quote(self.path, safe=self._safepchars)
2033 if self.query:
2042 if self.query:
2034 # we store the query in escaped form.
2043 # we store the query in escaped form.
2035 s += '?' + self.query
2044 s += '?' + self.query
2036 if self.fragment is not None:
2045 if self.fragment is not None:
2037 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2046 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2038 return s
2047 return s
2039
2048
2040 def authinfo(self):
2049 def authinfo(self):
2041 user, passwd = self.user, self.passwd
2050 user, passwd = self.user, self.passwd
2042 try:
2051 try:
2043 self.user, self.passwd = None, None
2052 self.user, self.passwd = None, None
2044 s = str(self)
2053 s = str(self)
2045 finally:
2054 finally:
2046 self.user, self.passwd = user, passwd
2055 self.user, self.passwd = user, passwd
2047 if not self.user:
2056 if not self.user:
2048 return (s, None)
2057 return (s, None)
2049 # authinfo[1] is passed to urllib2 password manager, and its
2058 # authinfo[1] is passed to urllib2 password manager, and its
2050 # URIs must not contain credentials. The host is passed in the
2059 # URIs must not contain credentials. The host is passed in the
2051 # URIs list because Python < 2.4.3 uses only that to search for
2060 # URIs list because Python < 2.4.3 uses only that to search for
2052 # a password.
2061 # a password.
2053 return (s, (None, (s, self.host),
2062 return (s, (None, (s, self.host),
2054 self.user, self.passwd or ''))
2063 self.user, self.passwd or ''))
2055
2064
2056 def isabs(self):
2065 def isabs(self):
2057 if self.scheme and self.scheme != 'file':
2066 if self.scheme and self.scheme != 'file':
2058 return True # remote URL
2067 return True # remote URL
2059 if hasdriveletter(self.path):
2068 if hasdriveletter(self.path):
2060 return True # absolute for our purposes - can't be joined()
2069 return True # absolute for our purposes - can't be joined()
2061 if self.path.startswith(r'\\'):
2070 if self.path.startswith(r'\\'):
2062 return True # Windows UNC path
2071 return True # Windows UNC path
2063 if self.path.startswith('/'):
2072 if self.path.startswith('/'):
2064 return True # POSIX-style
2073 return True # POSIX-style
2065 return False
2074 return False
2066
2075
2067 def localpath(self):
2076 def localpath(self):
2068 if self.scheme == 'file' or self.scheme == 'bundle':
2077 if self.scheme == 'file' or self.scheme == 'bundle':
2069 path = self.path or '/'
2078 path = self.path or '/'
2070 # For Windows, we need to promote hosts containing drive
2079 # For Windows, we need to promote hosts containing drive
2071 # letters to paths with drive letters.
2080 # letters to paths with drive letters.
2072 if hasdriveletter(self._hostport):
2081 if hasdriveletter(self._hostport):
2073 path = self._hostport + '/' + self.path
2082 path = self._hostport + '/' + self.path
2074 elif (self.host is not None and self.path
2083 elif (self.host is not None and self.path
2075 and not hasdriveletter(path)):
2084 and not hasdriveletter(path)):
2076 path = '/' + path
2085 path = '/' + path
2077 return path
2086 return path
2078 return self._origpath
2087 return self._origpath
2079
2088
2080 def islocal(self):
2089 def islocal(self):
2081 '''whether localpath will return something that posixfile can open'''
2090 '''whether localpath will return something that posixfile can open'''
2082 return (not self.scheme or self.scheme == 'file'
2091 return (not self.scheme or self.scheme == 'file'
2083 or self.scheme == 'bundle')
2092 or self.scheme == 'bundle')
2084
2093
2085 def hasscheme(path):
2094 def hasscheme(path):
2086 return bool(url(path).scheme)
2095 return bool(url(path).scheme)
2087
2096
2088 def hasdriveletter(path):
2097 def hasdriveletter(path):
2089 return path and path[1:2] == ':' and path[0:1].isalpha()
2098 return path and path[1:2] == ':' and path[0:1].isalpha()
2090
2099
2091 def urllocalpath(path):
2100 def urllocalpath(path):
2092 return url(path, parsequery=False, parsefragment=False).localpath()
2101 return url(path, parsequery=False, parsefragment=False).localpath()
2093
2102
2094 def hidepassword(u):
2103 def hidepassword(u):
2095 '''hide user credential in a url string'''
2104 '''hide user credential in a url string'''
2096 u = url(u)
2105 u = url(u)
2097 if u.passwd:
2106 if u.passwd:
2098 u.passwd = '***'
2107 u.passwd = '***'
2099 return str(u)
2108 return str(u)
2100
2109
2101 def removeauth(u):
2110 def removeauth(u):
2102 '''remove all authentication information from a url string'''
2111 '''remove all authentication information from a url string'''
2103 u = url(u)
2112 u = url(u)
2104 u.user = u.passwd = None
2113 u.user = u.passwd = None
2105 return str(u)
2114 return str(u)
2106
2115
2107 def isatty(fd):
2116 def isatty(fd):
2108 try:
2117 try:
2109 return fd.isatty()
2118 return fd.isatty()
2110 except AttributeError:
2119 except AttributeError:
2111 return False
2120 return False
2112
2121
2113 timecount = unitcountfn(
2122 timecount = unitcountfn(
2114 (1, 1e3, _('%.0f s')),
2123 (1, 1e3, _('%.0f s')),
2115 (100, 1, _('%.1f s')),
2124 (100, 1, _('%.1f s')),
2116 (10, 1, _('%.2f s')),
2125 (10, 1, _('%.2f s')),
2117 (1, 1, _('%.3f s')),
2126 (1, 1, _('%.3f s')),
2118 (100, 0.001, _('%.1f ms')),
2127 (100, 0.001, _('%.1f ms')),
2119 (10, 0.001, _('%.2f ms')),
2128 (10, 0.001, _('%.2f ms')),
2120 (1, 0.001, _('%.3f ms')),
2129 (1, 0.001, _('%.3f ms')),
2121 (100, 0.000001, _('%.1f us')),
2130 (100, 0.000001, _('%.1f us')),
2122 (10, 0.000001, _('%.2f us')),
2131 (10, 0.000001, _('%.2f us')),
2123 (1, 0.000001, _('%.3f us')),
2132 (1, 0.000001, _('%.3f us')),
2124 (100, 0.000000001, _('%.1f ns')),
2133 (100, 0.000000001, _('%.1f ns')),
2125 (10, 0.000000001, _('%.2f ns')),
2134 (10, 0.000000001, _('%.2f ns')),
2126 (1, 0.000000001, _('%.3f ns')),
2135 (1, 0.000000001, _('%.3f ns')),
2127 )
2136 )
2128
2137
2129 _timenesting = [0]
2138 _timenesting = [0]
2130
2139
2131 def timed(func):
2140 def timed(func):
2132 '''Report the execution time of a function call to stderr.
2141 '''Report the execution time of a function call to stderr.
2133
2142
2134 During development, use as a decorator when you need to measure
2143 During development, use as a decorator when you need to measure
2135 the cost of a function, e.g. as follows:
2144 the cost of a function, e.g. as follows:
2136
2145
2137 @util.timed
2146 @util.timed
2138 def foo(a, b, c):
2147 def foo(a, b, c):
2139 pass
2148 pass
2140 '''
2149 '''
2141
2150
2142 def wrapper(*args, **kwargs):
2151 def wrapper(*args, **kwargs):
2143 start = time.time()
2152 start = time.time()
2144 indent = 2
2153 indent = 2
2145 _timenesting[0] += indent
2154 _timenesting[0] += indent
2146 try:
2155 try:
2147 return func(*args, **kwargs)
2156 return func(*args, **kwargs)
2148 finally:
2157 finally:
2149 elapsed = time.time() - start
2158 elapsed = time.time() - start
2150 _timenesting[0] -= indent
2159 _timenesting[0] -= indent
2151 sys.stderr.write('%s%s: %s\n' %
2160 sys.stderr.write('%s%s: %s\n' %
2152 (' ' * _timenesting[0], func.__name__,
2161 (' ' * _timenesting[0], func.__name__,
2153 timecount(elapsed)))
2162 timecount(elapsed)))
2154 return wrapper
2163 return wrapper
2155
2164
2156 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2165 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2157 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2166 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2158
2167
2159 def sizetoint(s):
2168 def sizetoint(s):
2160 '''Convert a space specifier to a byte count.
2169 '''Convert a space specifier to a byte count.
2161
2170
2162 >>> sizetoint('30')
2171 >>> sizetoint('30')
2163 30
2172 30
2164 >>> sizetoint('2.2kb')
2173 >>> sizetoint('2.2kb')
2165 2252
2174 2252
2166 >>> sizetoint('6M')
2175 >>> sizetoint('6M')
2167 6291456
2176 6291456
2168 '''
2177 '''
2169 t = s.strip().lower()
2178 t = s.strip().lower()
2170 try:
2179 try:
2171 for k, u in _sizeunits:
2180 for k, u in _sizeunits:
2172 if t.endswith(k):
2181 if t.endswith(k):
2173 return int(float(t[:-len(k)]) * u)
2182 return int(float(t[:-len(k)]) * u)
2174 return int(t)
2183 return int(t)
2175 except ValueError:
2184 except ValueError:
2176 raise error.ParseError(_("couldn't parse size: %s") % s)
2185 raise error.ParseError(_("couldn't parse size: %s") % s)
2177
2186
2178 class hooks(object):
2187 class hooks(object):
2179 '''A collection of hook functions that can be used to extend a
2188 '''A collection of hook functions that can be used to extend a
2180 function's behaviour. Hooks are called in lexicographic order,
2189 function's behaviour. Hooks are called in lexicographic order,
2181 based on the names of their sources.'''
2190 based on the names of their sources.'''
2182
2191
2183 def __init__(self):
2192 def __init__(self):
2184 self._hooks = []
2193 self._hooks = []
2185
2194
2186 def add(self, source, hook):
2195 def add(self, source, hook):
2187 self._hooks.append((source, hook))
2196 self._hooks.append((source, hook))
2188
2197
2189 def __call__(self, *args):
2198 def __call__(self, *args):
2190 self._hooks.sort(key=lambda x: x[0])
2199 self._hooks.sort(key=lambda x: x[0])
2191 results = []
2200 results = []
2192 for source, hook in self._hooks:
2201 for source, hook in self._hooks:
2193 results.append(hook(*args))
2202 results.append(hook(*args))
2194 return results
2203 return results
2195
2204
2196 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2205 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2197 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2206 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2198 Skips the 'skip' last entries. By default it will flush stdout first.
2207 Skips the 'skip' last entries. By default it will flush stdout first.
2199 It can be used everywhere and do intentionally not require an ui object.
2208 It can be used everywhere and do intentionally not require an ui object.
2200 Not be used in production code but very convenient while developing.
2209 Not be used in production code but very convenient while developing.
2201 '''
2210 '''
2202 if otherf:
2211 if otherf:
2203 otherf.flush()
2212 otherf.flush()
2204 f.write('%s at:\n' % msg)
2213 f.write('%s at:\n' % msg)
2205 entries = [('%s:%s' % (fn, ln), func)
2214 entries = [('%s:%s' % (fn, ln), func)
2206 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2215 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2207 if entries:
2216 if entries:
2208 fnmax = max(len(entry[0]) for entry in entries)
2217 fnmax = max(len(entry[0]) for entry in entries)
2209 for fnln, func in entries:
2218 for fnln, func in entries:
2210 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2219 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2211 f.flush()
2220 f.flush()
2212
2221
2213 # convenient shortcut
2222 # convenient shortcut
2214 dst = debugstacktrace
2223 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now