##// END OF EJS Templates
copyfile: allow optional hardlinking...
Pierre-Yves David -
r23899:4e451d13 default
parent child Browse files
Show More
@@ -1,2223 +1,2229 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 import i18n
16 import i18n
17 _ = i18n._
17 _ = i18n._
18 import error, osutil, encoding
18 import error, osutil, encoding
19 import errno, shutil, sys, tempfile, traceback
19 import errno, shutil, sys, tempfile, traceback
20 import re as remod
20 import re as remod
21 import os, time, datetime, calendar, textwrap, signal, collections
21 import os, time, datetime, calendar, textwrap, signal, collections
22 import imp, socket, urllib, struct
22 import imp, socket, urllib, struct
23 import gc
23 import gc
24
24
25 if os.name == 'nt':
25 if os.name == 'nt':
26 import windows as platform
26 import windows as platform
27 else:
27 else:
28 import posix as platform
28 import posix as platform
29
29
30 cachestat = platform.cachestat
30 cachestat = platform.cachestat
31 checkexec = platform.checkexec
31 checkexec = platform.checkexec
32 checklink = platform.checklink
32 checklink = platform.checklink
33 copymode = platform.copymode
33 copymode = platform.copymode
34 executablepath = platform.executablepath
34 executablepath = platform.executablepath
35 expandglobs = platform.expandglobs
35 expandglobs = platform.expandglobs
36 explainexit = platform.explainexit
36 explainexit = platform.explainexit
37 findexe = platform.findexe
37 findexe = platform.findexe
38 gethgcmd = platform.gethgcmd
38 gethgcmd = platform.gethgcmd
39 getuser = platform.getuser
39 getuser = platform.getuser
40 groupmembers = platform.groupmembers
40 groupmembers = platform.groupmembers
41 groupname = platform.groupname
41 groupname = platform.groupname
42 hidewindow = platform.hidewindow
42 hidewindow = platform.hidewindow
43 isexec = platform.isexec
43 isexec = platform.isexec
44 isowner = platform.isowner
44 isowner = platform.isowner
45 localpath = platform.localpath
45 localpath = platform.localpath
46 lookupreg = platform.lookupreg
46 lookupreg = platform.lookupreg
47 makedir = platform.makedir
47 makedir = platform.makedir
48 nlinks = platform.nlinks
48 nlinks = platform.nlinks
49 normpath = platform.normpath
49 normpath = platform.normpath
50 normcase = platform.normcase
50 normcase = platform.normcase
51 openhardlinks = platform.openhardlinks
51 openhardlinks = platform.openhardlinks
52 oslink = platform.oslink
52 oslink = platform.oslink
53 parsepatchoutput = platform.parsepatchoutput
53 parsepatchoutput = platform.parsepatchoutput
54 pconvert = platform.pconvert
54 pconvert = platform.pconvert
55 popen = platform.popen
55 popen = platform.popen
56 posixfile = platform.posixfile
56 posixfile = platform.posixfile
57 quotecommand = platform.quotecommand
57 quotecommand = platform.quotecommand
58 readpipe = platform.readpipe
58 readpipe = platform.readpipe
59 rename = platform.rename
59 rename = platform.rename
60 samedevice = platform.samedevice
60 samedevice = platform.samedevice
61 samefile = platform.samefile
61 samefile = platform.samefile
62 samestat = platform.samestat
62 samestat = platform.samestat
63 setbinary = platform.setbinary
63 setbinary = platform.setbinary
64 setflags = platform.setflags
64 setflags = platform.setflags
65 setsignalhandler = platform.setsignalhandler
65 setsignalhandler = platform.setsignalhandler
66 shellquote = platform.shellquote
66 shellquote = platform.shellquote
67 spawndetached = platform.spawndetached
67 spawndetached = platform.spawndetached
68 split = platform.split
68 split = platform.split
69 sshargs = platform.sshargs
69 sshargs = platform.sshargs
70 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
70 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
71 statisexec = platform.statisexec
71 statisexec = platform.statisexec
72 statislink = platform.statislink
72 statislink = platform.statislink
73 termwidth = platform.termwidth
73 termwidth = platform.termwidth
74 testpid = platform.testpid
74 testpid = platform.testpid
75 umask = platform.umask
75 umask = platform.umask
76 unlink = platform.unlink
76 unlink = platform.unlink
77 unlinkpath = platform.unlinkpath
77 unlinkpath = platform.unlinkpath
78 username = platform.username
78 username = platform.username
79
79
80 # Python compatibility
80 # Python compatibility
81
81
82 _notset = object()
82 _notset = object()
83
83
84 def safehasattr(thing, attr):
84 def safehasattr(thing, attr):
85 return getattr(thing, attr, _notset) is not _notset
85 return getattr(thing, attr, _notset) is not _notset
86
86
87 def sha1(s=''):
87 def sha1(s=''):
88 '''
88 '''
89 Low-overhead wrapper around Python's SHA support
89 Low-overhead wrapper around Python's SHA support
90
90
91 >>> f = _fastsha1
91 >>> f = _fastsha1
92 >>> a = sha1()
92 >>> a = sha1()
93 >>> a = f()
93 >>> a = f()
94 >>> a.hexdigest()
94 >>> a.hexdigest()
95 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
95 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
96 '''
96 '''
97
97
98 return _fastsha1(s)
98 return _fastsha1(s)
99
99
100 def _fastsha1(s=''):
100 def _fastsha1(s=''):
101 # This function will import sha1 from hashlib or sha (whichever is
101 # This function will import sha1 from hashlib or sha (whichever is
102 # available) and overwrite itself with it on the first call.
102 # available) and overwrite itself with it on the first call.
103 # Subsequent calls will go directly to the imported function.
103 # Subsequent calls will go directly to the imported function.
104 if sys.version_info >= (2, 5):
104 if sys.version_info >= (2, 5):
105 from hashlib import sha1 as _sha1
105 from hashlib import sha1 as _sha1
106 else:
106 else:
107 from sha import sha as _sha1
107 from sha import sha as _sha1
108 global _fastsha1, sha1
108 global _fastsha1, sha1
109 _fastsha1 = sha1 = _sha1
109 _fastsha1 = sha1 = _sha1
110 return _sha1(s)
110 return _sha1(s)
111
111
112 def md5(s=''):
112 def md5(s=''):
113 try:
113 try:
114 from hashlib import md5 as _md5
114 from hashlib import md5 as _md5
115 except ImportError:
115 except ImportError:
116 from md5 import md5 as _md5
116 from md5 import md5 as _md5
117 global md5
117 global md5
118 md5 = _md5
118 md5 = _md5
119 return _md5(s)
119 return _md5(s)
120
120
121 DIGESTS = {
121 DIGESTS = {
122 'md5': md5,
122 'md5': md5,
123 'sha1': sha1,
123 'sha1': sha1,
124 }
124 }
125 # List of digest types from strongest to weakest
125 # List of digest types from strongest to weakest
126 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
126 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
127
127
128 try:
128 try:
129 import hashlib
129 import hashlib
130 DIGESTS.update({
130 DIGESTS.update({
131 'sha512': hashlib.sha512,
131 'sha512': hashlib.sha512,
132 })
132 })
133 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
133 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
134 except ImportError:
134 except ImportError:
135 pass
135 pass
136
136
137 for k in DIGESTS_BY_STRENGTH:
137 for k in DIGESTS_BY_STRENGTH:
138 assert k in DIGESTS
138 assert k in DIGESTS
139
139
140 class digester(object):
140 class digester(object):
141 """helper to compute digests.
141 """helper to compute digests.
142
142
143 This helper can be used to compute one or more digests given their name.
143 This helper can be used to compute one or more digests given their name.
144
144
145 >>> d = digester(['md5', 'sha1'])
145 >>> d = digester(['md5', 'sha1'])
146 >>> d.update('foo')
146 >>> d.update('foo')
147 >>> [k for k in sorted(d)]
147 >>> [k for k in sorted(d)]
148 ['md5', 'sha1']
148 ['md5', 'sha1']
149 >>> d['md5']
149 >>> d['md5']
150 'acbd18db4cc2f85cedef654fccc4a4d8'
150 'acbd18db4cc2f85cedef654fccc4a4d8'
151 >>> d['sha1']
151 >>> d['sha1']
152 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
152 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
153 >>> digester.preferred(['md5', 'sha1'])
153 >>> digester.preferred(['md5', 'sha1'])
154 'sha1'
154 'sha1'
155 """
155 """
156
156
157 def __init__(self, digests, s=''):
157 def __init__(self, digests, s=''):
158 self._hashes = {}
158 self._hashes = {}
159 for k in digests:
159 for k in digests:
160 if k not in DIGESTS:
160 if k not in DIGESTS:
161 raise Abort(_('unknown digest type: %s') % k)
161 raise Abort(_('unknown digest type: %s') % k)
162 self._hashes[k] = DIGESTS[k]()
162 self._hashes[k] = DIGESTS[k]()
163 if s:
163 if s:
164 self.update(s)
164 self.update(s)
165
165
166 def update(self, data):
166 def update(self, data):
167 for h in self._hashes.values():
167 for h in self._hashes.values():
168 h.update(data)
168 h.update(data)
169
169
170 def __getitem__(self, key):
170 def __getitem__(self, key):
171 if key not in DIGESTS:
171 if key not in DIGESTS:
172 raise Abort(_('unknown digest type: %s') % k)
172 raise Abort(_('unknown digest type: %s') % k)
173 return self._hashes[key].hexdigest()
173 return self._hashes[key].hexdigest()
174
174
175 def __iter__(self):
175 def __iter__(self):
176 return iter(self._hashes)
176 return iter(self._hashes)
177
177
178 @staticmethod
178 @staticmethod
179 def preferred(supported):
179 def preferred(supported):
180 """returns the strongest digest type in both supported and DIGESTS."""
180 """returns the strongest digest type in both supported and DIGESTS."""
181
181
182 for k in DIGESTS_BY_STRENGTH:
182 for k in DIGESTS_BY_STRENGTH:
183 if k in supported:
183 if k in supported:
184 return k
184 return k
185 return None
185 return None
186
186
187 class digestchecker(object):
187 class digestchecker(object):
188 """file handle wrapper that additionally checks content against a given
188 """file handle wrapper that additionally checks content against a given
189 size and digests.
189 size and digests.
190
190
191 d = digestchecker(fh, size, {'md5': '...'})
191 d = digestchecker(fh, size, {'md5': '...'})
192
192
193 When multiple digests are given, all of them are validated.
193 When multiple digests are given, all of them are validated.
194 """
194 """
195
195
196 def __init__(self, fh, size, digests):
196 def __init__(self, fh, size, digests):
197 self._fh = fh
197 self._fh = fh
198 self._size = size
198 self._size = size
199 self._got = 0
199 self._got = 0
200 self._digests = dict(digests)
200 self._digests = dict(digests)
201 self._digester = digester(self._digests.keys())
201 self._digester = digester(self._digests.keys())
202
202
203 def read(self, length=-1):
203 def read(self, length=-1):
204 content = self._fh.read(length)
204 content = self._fh.read(length)
205 self._digester.update(content)
205 self._digester.update(content)
206 self._got += len(content)
206 self._got += len(content)
207 return content
207 return content
208
208
209 def validate(self):
209 def validate(self):
210 if self._size != self._got:
210 if self._size != self._got:
211 raise Abort(_('size mismatch: expected %d, got %d') %
211 raise Abort(_('size mismatch: expected %d, got %d') %
212 (self._size, self._got))
212 (self._size, self._got))
213 for k, v in self._digests.items():
213 for k, v in self._digests.items():
214 if v != self._digester[k]:
214 if v != self._digester[k]:
215 # i18n: first parameter is a digest name
215 # i18n: first parameter is a digest name
216 raise Abort(_('%s mismatch: expected %s, got %s') %
216 raise Abort(_('%s mismatch: expected %s, got %s') %
217 (k, v, self._digester[k]))
217 (k, v, self._digester[k]))
218
218
219 try:
219 try:
220 buffer = buffer
220 buffer = buffer
221 except NameError:
221 except NameError:
222 if sys.version_info[0] < 3:
222 if sys.version_info[0] < 3:
223 def buffer(sliceable, offset=0):
223 def buffer(sliceable, offset=0):
224 return sliceable[offset:]
224 return sliceable[offset:]
225 else:
225 else:
226 def buffer(sliceable, offset=0):
226 def buffer(sliceable, offset=0):
227 return memoryview(sliceable)[offset:]
227 return memoryview(sliceable)[offset:]
228
228
229 import subprocess
229 import subprocess
230 closefds = os.name == 'posix'
230 closefds = os.name == 'posix'
231
231
232 def unpacker(fmt):
232 def unpacker(fmt):
233 """create a struct unpacker for the specified format"""
233 """create a struct unpacker for the specified format"""
234 try:
234 try:
235 # 2.5+
235 # 2.5+
236 return struct.Struct(fmt).unpack
236 return struct.Struct(fmt).unpack
237 except AttributeError:
237 except AttributeError:
238 # 2.4
238 # 2.4
239 return lambda buf: struct.unpack(fmt, buf)
239 return lambda buf: struct.unpack(fmt, buf)
240
240
241 def popen2(cmd, env=None, newlines=False):
241 def popen2(cmd, env=None, newlines=False):
242 # Setting bufsize to -1 lets the system decide the buffer size.
242 # Setting bufsize to -1 lets the system decide the buffer size.
243 # The default for bufsize is 0, meaning unbuffered. This leads to
243 # The default for bufsize is 0, meaning unbuffered. This leads to
244 # poor performance on Mac OS X: http://bugs.python.org/issue4194
244 # poor performance on Mac OS X: http://bugs.python.org/issue4194
245 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
245 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
246 close_fds=closefds,
246 close_fds=closefds,
247 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
247 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
248 universal_newlines=newlines,
248 universal_newlines=newlines,
249 env=env)
249 env=env)
250 return p.stdin, p.stdout
250 return p.stdin, p.stdout
251
251
252 def popen3(cmd, env=None, newlines=False):
252 def popen3(cmd, env=None, newlines=False):
253 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
253 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
254 return stdin, stdout, stderr
254 return stdin, stdout, stderr
255
255
256 def popen4(cmd, env=None, newlines=False):
256 def popen4(cmd, env=None, newlines=False):
257 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
257 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
258 close_fds=closefds,
258 close_fds=closefds,
259 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
259 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
260 stderr=subprocess.PIPE,
260 stderr=subprocess.PIPE,
261 universal_newlines=newlines,
261 universal_newlines=newlines,
262 env=env)
262 env=env)
263 return p.stdin, p.stdout, p.stderr, p
263 return p.stdin, p.stdout, p.stderr, p
264
264
265 def version():
265 def version():
266 """Return version information if available."""
266 """Return version information if available."""
267 try:
267 try:
268 import __version__
268 import __version__
269 return __version__.version
269 return __version__.version
270 except ImportError:
270 except ImportError:
271 return 'unknown'
271 return 'unknown'
272
272
273 # used by parsedate
273 # used by parsedate
274 defaultdateformats = (
274 defaultdateformats = (
275 '%Y-%m-%d %H:%M:%S',
275 '%Y-%m-%d %H:%M:%S',
276 '%Y-%m-%d %I:%M:%S%p',
276 '%Y-%m-%d %I:%M:%S%p',
277 '%Y-%m-%d %H:%M',
277 '%Y-%m-%d %H:%M',
278 '%Y-%m-%d %I:%M%p',
278 '%Y-%m-%d %I:%M%p',
279 '%Y-%m-%d',
279 '%Y-%m-%d',
280 '%m-%d',
280 '%m-%d',
281 '%m/%d',
281 '%m/%d',
282 '%m/%d/%y',
282 '%m/%d/%y',
283 '%m/%d/%Y',
283 '%m/%d/%Y',
284 '%a %b %d %H:%M:%S %Y',
284 '%a %b %d %H:%M:%S %Y',
285 '%a %b %d %I:%M:%S%p %Y',
285 '%a %b %d %I:%M:%S%p %Y',
286 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
286 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
287 '%b %d %H:%M:%S %Y',
287 '%b %d %H:%M:%S %Y',
288 '%b %d %I:%M:%S%p %Y',
288 '%b %d %I:%M:%S%p %Y',
289 '%b %d %H:%M:%S',
289 '%b %d %H:%M:%S',
290 '%b %d %I:%M:%S%p',
290 '%b %d %I:%M:%S%p',
291 '%b %d %H:%M',
291 '%b %d %H:%M',
292 '%b %d %I:%M%p',
292 '%b %d %I:%M%p',
293 '%b %d %Y',
293 '%b %d %Y',
294 '%b %d',
294 '%b %d',
295 '%H:%M:%S',
295 '%H:%M:%S',
296 '%I:%M:%S%p',
296 '%I:%M:%S%p',
297 '%H:%M',
297 '%H:%M',
298 '%I:%M%p',
298 '%I:%M%p',
299 )
299 )
300
300
301 extendeddateformats = defaultdateformats + (
301 extendeddateformats = defaultdateformats + (
302 "%Y",
302 "%Y",
303 "%Y-%m",
303 "%Y-%m",
304 "%b",
304 "%b",
305 "%b %Y",
305 "%b %Y",
306 )
306 )
307
307
308 def cachefunc(func):
308 def cachefunc(func):
309 '''cache the result of function calls'''
309 '''cache the result of function calls'''
310 # XXX doesn't handle keywords args
310 # XXX doesn't handle keywords args
311 if func.func_code.co_argcount == 0:
311 if func.func_code.co_argcount == 0:
312 cache = []
312 cache = []
313 def f():
313 def f():
314 if len(cache) == 0:
314 if len(cache) == 0:
315 cache.append(func())
315 cache.append(func())
316 return cache[0]
316 return cache[0]
317 return f
317 return f
318 cache = {}
318 cache = {}
319 if func.func_code.co_argcount == 1:
319 if func.func_code.co_argcount == 1:
320 # we gain a small amount of time because
320 # we gain a small amount of time because
321 # we don't need to pack/unpack the list
321 # we don't need to pack/unpack the list
322 def f(arg):
322 def f(arg):
323 if arg not in cache:
323 if arg not in cache:
324 cache[arg] = func(arg)
324 cache[arg] = func(arg)
325 return cache[arg]
325 return cache[arg]
326 else:
326 else:
327 def f(*args):
327 def f(*args):
328 if args not in cache:
328 if args not in cache:
329 cache[args] = func(*args)
329 cache[args] = func(*args)
330 return cache[args]
330 return cache[args]
331
331
332 return f
332 return f
333
333
334 try:
334 try:
335 collections.deque.remove
335 collections.deque.remove
336 deque = collections.deque
336 deque = collections.deque
337 except AttributeError:
337 except AttributeError:
338 # python 2.4 lacks deque.remove
338 # python 2.4 lacks deque.remove
339 class deque(collections.deque):
339 class deque(collections.deque):
340 def remove(self, val):
340 def remove(self, val):
341 for i, v in enumerate(self):
341 for i, v in enumerate(self):
342 if v == val:
342 if v == val:
343 del self[i]
343 del self[i]
344 break
344 break
345
345
346 class sortdict(dict):
346 class sortdict(dict):
347 '''a simple sorted dictionary'''
347 '''a simple sorted dictionary'''
348 def __init__(self, data=None):
348 def __init__(self, data=None):
349 self._list = []
349 self._list = []
350 if data:
350 if data:
351 self.update(data)
351 self.update(data)
352 def copy(self):
352 def copy(self):
353 return sortdict(self)
353 return sortdict(self)
354 def __setitem__(self, key, val):
354 def __setitem__(self, key, val):
355 if key in self:
355 if key in self:
356 self._list.remove(key)
356 self._list.remove(key)
357 self._list.append(key)
357 self._list.append(key)
358 dict.__setitem__(self, key, val)
358 dict.__setitem__(self, key, val)
359 def __iter__(self):
359 def __iter__(self):
360 return self._list.__iter__()
360 return self._list.__iter__()
361 def update(self, src):
361 def update(self, src):
362 for k in src:
362 for k in src:
363 self[k] = src[k]
363 self[k] = src[k]
364 def clear(self):
364 def clear(self):
365 dict.clear(self)
365 dict.clear(self)
366 self._list = []
366 self._list = []
367 def items(self):
367 def items(self):
368 return [(k, self[k]) for k in self._list]
368 return [(k, self[k]) for k in self._list]
369 def __delitem__(self, key):
369 def __delitem__(self, key):
370 dict.__delitem__(self, key)
370 dict.__delitem__(self, key)
371 self._list.remove(key)
371 self._list.remove(key)
372 def pop(self, key, *args, **kwargs):
372 def pop(self, key, *args, **kwargs):
373 dict.pop(self, key, *args, **kwargs)
373 dict.pop(self, key, *args, **kwargs)
374 try:
374 try:
375 self._list.remove(key)
375 self._list.remove(key)
376 except ValueError:
376 except ValueError:
377 pass
377 pass
378 def keys(self):
378 def keys(self):
379 return self._list
379 return self._list
380 def iterkeys(self):
380 def iterkeys(self):
381 return self._list.__iter__()
381 return self._list.__iter__()
382 def iteritems(self):
382 def iteritems(self):
383 for k in self._list:
383 for k in self._list:
384 yield k, self[k]
384 yield k, self[k]
385 def insert(self, index, key, val):
385 def insert(self, index, key, val):
386 self._list.insert(index, key)
386 self._list.insert(index, key)
387 dict.__setitem__(self, key, val)
387 dict.__setitem__(self, key, val)
388
388
389 class lrucachedict(object):
389 class lrucachedict(object):
390 '''cache most recent gets from or sets to this dictionary'''
390 '''cache most recent gets from or sets to this dictionary'''
391 def __init__(self, maxsize):
391 def __init__(self, maxsize):
392 self._cache = {}
392 self._cache = {}
393 self._maxsize = maxsize
393 self._maxsize = maxsize
394 self._order = deque()
394 self._order = deque()
395
395
396 def __getitem__(self, key):
396 def __getitem__(self, key):
397 value = self._cache[key]
397 value = self._cache[key]
398 self._order.remove(key)
398 self._order.remove(key)
399 self._order.append(key)
399 self._order.append(key)
400 return value
400 return value
401
401
402 def __setitem__(self, key, value):
402 def __setitem__(self, key, value):
403 if key not in self._cache:
403 if key not in self._cache:
404 if len(self._cache) >= self._maxsize:
404 if len(self._cache) >= self._maxsize:
405 del self._cache[self._order.popleft()]
405 del self._cache[self._order.popleft()]
406 else:
406 else:
407 self._order.remove(key)
407 self._order.remove(key)
408 self._cache[key] = value
408 self._cache[key] = value
409 self._order.append(key)
409 self._order.append(key)
410
410
411 def __contains__(self, key):
411 def __contains__(self, key):
412 return key in self._cache
412 return key in self._cache
413
413
414 def clear(self):
414 def clear(self):
415 self._cache.clear()
415 self._cache.clear()
416 self._order = deque()
416 self._order = deque()
417
417
418 def lrucachefunc(func):
418 def lrucachefunc(func):
419 '''cache most recent results of function calls'''
419 '''cache most recent results of function calls'''
420 cache = {}
420 cache = {}
421 order = deque()
421 order = deque()
422 if func.func_code.co_argcount == 1:
422 if func.func_code.co_argcount == 1:
423 def f(arg):
423 def f(arg):
424 if arg not in cache:
424 if arg not in cache:
425 if len(cache) > 20:
425 if len(cache) > 20:
426 del cache[order.popleft()]
426 del cache[order.popleft()]
427 cache[arg] = func(arg)
427 cache[arg] = func(arg)
428 else:
428 else:
429 order.remove(arg)
429 order.remove(arg)
430 order.append(arg)
430 order.append(arg)
431 return cache[arg]
431 return cache[arg]
432 else:
432 else:
433 def f(*args):
433 def f(*args):
434 if args not in cache:
434 if args not in cache:
435 if len(cache) > 20:
435 if len(cache) > 20:
436 del cache[order.popleft()]
436 del cache[order.popleft()]
437 cache[args] = func(*args)
437 cache[args] = func(*args)
438 else:
438 else:
439 order.remove(args)
439 order.remove(args)
440 order.append(args)
440 order.append(args)
441 return cache[args]
441 return cache[args]
442
442
443 return f
443 return f
444
444
445 class propertycache(object):
445 class propertycache(object):
446 def __init__(self, func):
446 def __init__(self, func):
447 self.func = func
447 self.func = func
448 self.name = func.__name__
448 self.name = func.__name__
449 def __get__(self, obj, type=None):
449 def __get__(self, obj, type=None):
450 result = self.func(obj)
450 result = self.func(obj)
451 self.cachevalue(obj, result)
451 self.cachevalue(obj, result)
452 return result
452 return result
453
453
454 def cachevalue(self, obj, value):
454 def cachevalue(self, obj, value):
455 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
455 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
456 obj.__dict__[self.name] = value
456 obj.__dict__[self.name] = value
457
457
458 def pipefilter(s, cmd):
458 def pipefilter(s, cmd):
459 '''filter string S through command CMD, returning its output'''
459 '''filter string S through command CMD, returning its output'''
460 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
460 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
461 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
461 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
462 pout, perr = p.communicate(s)
462 pout, perr = p.communicate(s)
463 return pout
463 return pout
464
464
465 def tempfilter(s, cmd):
465 def tempfilter(s, cmd):
466 '''filter string S through a pair of temporary files with CMD.
466 '''filter string S through a pair of temporary files with CMD.
467 CMD is used as a template to create the real command to be run,
467 CMD is used as a template to create the real command to be run,
468 with the strings INFILE and OUTFILE replaced by the real names of
468 with the strings INFILE and OUTFILE replaced by the real names of
469 the temporary files generated.'''
469 the temporary files generated.'''
470 inname, outname = None, None
470 inname, outname = None, None
471 try:
471 try:
472 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
472 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
473 fp = os.fdopen(infd, 'wb')
473 fp = os.fdopen(infd, 'wb')
474 fp.write(s)
474 fp.write(s)
475 fp.close()
475 fp.close()
476 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
476 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
477 os.close(outfd)
477 os.close(outfd)
478 cmd = cmd.replace('INFILE', inname)
478 cmd = cmd.replace('INFILE', inname)
479 cmd = cmd.replace('OUTFILE', outname)
479 cmd = cmd.replace('OUTFILE', outname)
480 code = os.system(cmd)
480 code = os.system(cmd)
481 if sys.platform == 'OpenVMS' and code & 1:
481 if sys.platform == 'OpenVMS' and code & 1:
482 code = 0
482 code = 0
483 if code:
483 if code:
484 raise Abort(_("command '%s' failed: %s") %
484 raise Abort(_("command '%s' failed: %s") %
485 (cmd, explainexit(code)))
485 (cmd, explainexit(code)))
486 fp = open(outname, 'rb')
486 fp = open(outname, 'rb')
487 r = fp.read()
487 r = fp.read()
488 fp.close()
488 fp.close()
489 return r
489 return r
490 finally:
490 finally:
491 try:
491 try:
492 if inname:
492 if inname:
493 os.unlink(inname)
493 os.unlink(inname)
494 except OSError:
494 except OSError:
495 pass
495 pass
496 try:
496 try:
497 if outname:
497 if outname:
498 os.unlink(outname)
498 os.unlink(outname)
499 except OSError:
499 except OSError:
500 pass
500 pass
501
501
502 filtertable = {
502 filtertable = {
503 'tempfile:': tempfilter,
503 'tempfile:': tempfilter,
504 'pipe:': pipefilter,
504 'pipe:': pipefilter,
505 }
505 }
506
506
507 def filter(s, cmd):
507 def filter(s, cmd):
508 "filter a string through a command that transforms its input to its output"
508 "filter a string through a command that transforms its input to its output"
509 for name, fn in filtertable.iteritems():
509 for name, fn in filtertable.iteritems():
510 if cmd.startswith(name):
510 if cmd.startswith(name):
511 return fn(s, cmd[len(name):].lstrip())
511 return fn(s, cmd[len(name):].lstrip())
512 return pipefilter(s, cmd)
512 return pipefilter(s, cmd)
513
513
514 def binary(s):
514 def binary(s):
515 """return true if a string is binary data"""
515 """return true if a string is binary data"""
516 return bool(s and '\0' in s)
516 return bool(s and '\0' in s)
517
517
518 def increasingchunks(source, min=1024, max=65536):
518 def increasingchunks(source, min=1024, max=65536):
519 '''return no less than min bytes per chunk while data remains,
519 '''return no less than min bytes per chunk while data remains,
520 doubling min after each chunk until it reaches max'''
520 doubling min after each chunk until it reaches max'''
521 def log2(x):
521 def log2(x):
522 if not x:
522 if not x:
523 return 0
523 return 0
524 i = 0
524 i = 0
525 while x:
525 while x:
526 x >>= 1
526 x >>= 1
527 i += 1
527 i += 1
528 return i - 1
528 return i - 1
529
529
530 buf = []
530 buf = []
531 blen = 0
531 blen = 0
532 for chunk in source:
532 for chunk in source:
533 buf.append(chunk)
533 buf.append(chunk)
534 blen += len(chunk)
534 blen += len(chunk)
535 if blen >= min:
535 if blen >= min:
536 if min < max:
536 if min < max:
537 min = min << 1
537 min = min << 1
538 nmin = 1 << log2(blen)
538 nmin = 1 << log2(blen)
539 if nmin > min:
539 if nmin > min:
540 min = nmin
540 min = nmin
541 if min > max:
541 if min > max:
542 min = max
542 min = max
543 yield ''.join(buf)
543 yield ''.join(buf)
544 blen = 0
544 blen = 0
545 buf = []
545 buf = []
546 if buf:
546 if buf:
547 yield ''.join(buf)
547 yield ''.join(buf)
548
548
549 Abort = error.Abort
549 Abort = error.Abort
550
550
551 def always(fn):
551 def always(fn):
552 return True
552 return True
553
553
554 def never(fn):
554 def never(fn):
555 return False
555 return False
556
556
557 def nogc(func):
557 def nogc(func):
558 """disable garbage collector
558 """disable garbage collector
559
559
560 Python's garbage collector triggers a GC each time a certain number of
560 Python's garbage collector triggers a GC each time a certain number of
561 container objects (the number being defined by gc.get_threshold()) are
561 container objects (the number being defined by gc.get_threshold()) are
562 allocated even when marked not to be tracked by the collector. Tracking has
562 allocated even when marked not to be tracked by the collector. Tracking has
563 no effect on when GCs are triggered, only on what objects the GC looks
563 no effect on when GCs are triggered, only on what objects the GC looks
564 into. As a workaround, disable GC while building complex (huge)
564 into. As a workaround, disable GC while building complex (huge)
565 containers.
565 containers.
566
566
567 This garbage collector issue have been fixed in 2.7.
567 This garbage collector issue have been fixed in 2.7.
568 """
568 """
569 def wrapper(*args, **kwargs):
569 def wrapper(*args, **kwargs):
570 gcenabled = gc.isenabled()
570 gcenabled = gc.isenabled()
571 gc.disable()
571 gc.disable()
572 try:
572 try:
573 return func(*args, **kwargs)
573 return func(*args, **kwargs)
574 finally:
574 finally:
575 if gcenabled:
575 if gcenabled:
576 gc.enable()
576 gc.enable()
577 return wrapper
577 return wrapper
578
578
579 def pathto(root, n1, n2):
579 def pathto(root, n1, n2):
580 '''return the relative path from one place to another.
580 '''return the relative path from one place to another.
581 root should use os.sep to separate directories
581 root should use os.sep to separate directories
582 n1 should use os.sep to separate directories
582 n1 should use os.sep to separate directories
583 n2 should use "/" to separate directories
583 n2 should use "/" to separate directories
584 returns an os.sep-separated path.
584 returns an os.sep-separated path.
585
585
586 If n1 is a relative path, it's assumed it's
586 If n1 is a relative path, it's assumed it's
587 relative to root.
587 relative to root.
588 n2 should always be relative to root.
588 n2 should always be relative to root.
589 '''
589 '''
590 if not n1:
590 if not n1:
591 return localpath(n2)
591 return localpath(n2)
592 if os.path.isabs(n1):
592 if os.path.isabs(n1):
593 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
593 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
594 return os.path.join(root, localpath(n2))
594 return os.path.join(root, localpath(n2))
595 n2 = '/'.join((pconvert(root), n2))
595 n2 = '/'.join((pconvert(root), n2))
596 a, b = splitpath(n1), n2.split('/')
596 a, b = splitpath(n1), n2.split('/')
597 a.reverse()
597 a.reverse()
598 b.reverse()
598 b.reverse()
599 while a and b and a[-1] == b[-1]:
599 while a and b and a[-1] == b[-1]:
600 a.pop()
600 a.pop()
601 b.pop()
601 b.pop()
602 b.reverse()
602 b.reverse()
603 return os.sep.join((['..'] * len(a)) + b) or '.'
603 return os.sep.join((['..'] * len(a)) + b) or '.'
604
604
605 def mainfrozen():
605 def mainfrozen():
606 """return True if we are a frozen executable.
606 """return True if we are a frozen executable.
607
607
608 The code supports py2exe (most common, Windows only) and tools/freeze
608 The code supports py2exe (most common, Windows only) and tools/freeze
609 (portable, not much used).
609 (portable, not much used).
610 """
610 """
611 return (safehasattr(sys, "frozen") or # new py2exe
611 return (safehasattr(sys, "frozen") or # new py2exe
612 safehasattr(sys, "importers") or # old py2exe
612 safehasattr(sys, "importers") or # old py2exe
613 imp.is_frozen("__main__")) # tools/freeze
613 imp.is_frozen("__main__")) # tools/freeze
614
614
615 # the location of data files matching the source code
615 # the location of data files matching the source code
616 if mainfrozen():
616 if mainfrozen():
617 # executable version (py2exe) doesn't support __file__
617 # executable version (py2exe) doesn't support __file__
618 datapath = os.path.dirname(sys.executable)
618 datapath = os.path.dirname(sys.executable)
619 else:
619 else:
620 datapath = os.path.dirname(__file__)
620 datapath = os.path.dirname(__file__)
621
621
622 i18n.setdatapath(datapath)
622 i18n.setdatapath(datapath)
623
623
624 _hgexecutable = None
624 _hgexecutable = None
625
625
626 def hgexecutable():
626 def hgexecutable():
627 """return location of the 'hg' executable.
627 """return location of the 'hg' executable.
628
628
629 Defaults to $HG or 'hg' in the search path.
629 Defaults to $HG or 'hg' in the search path.
630 """
630 """
631 if _hgexecutable is None:
631 if _hgexecutable is None:
632 hg = os.environ.get('HG')
632 hg = os.environ.get('HG')
633 mainmod = sys.modules['__main__']
633 mainmod = sys.modules['__main__']
634 if hg:
634 if hg:
635 _sethgexecutable(hg)
635 _sethgexecutable(hg)
636 elif mainfrozen():
636 elif mainfrozen():
637 _sethgexecutable(sys.executable)
637 _sethgexecutable(sys.executable)
638 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
638 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
639 _sethgexecutable(mainmod.__file__)
639 _sethgexecutable(mainmod.__file__)
640 else:
640 else:
641 exe = findexe('hg') or os.path.basename(sys.argv[0])
641 exe = findexe('hg') or os.path.basename(sys.argv[0])
642 _sethgexecutable(exe)
642 _sethgexecutable(exe)
643 return _hgexecutable
643 return _hgexecutable
644
644
645 def _sethgexecutable(path):
645 def _sethgexecutable(path):
646 """set location of the 'hg' executable"""
646 """set location of the 'hg' executable"""
647 global _hgexecutable
647 global _hgexecutable
648 _hgexecutable = path
648 _hgexecutable = path
649
649
650 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
650 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
651 '''enhanced shell command execution.
651 '''enhanced shell command execution.
652 run with environment maybe modified, maybe in different dir.
652 run with environment maybe modified, maybe in different dir.
653
653
654 if command fails and onerr is None, return status, else raise onerr
654 if command fails and onerr is None, return status, else raise onerr
655 object as exception.
655 object as exception.
656
656
657 if out is specified, it is assumed to be a file-like object that has a
657 if out is specified, it is assumed to be a file-like object that has a
658 write() method. stdout and stderr will be redirected to out.'''
658 write() method. stdout and stderr will be redirected to out.'''
659 try:
659 try:
660 sys.stdout.flush()
660 sys.stdout.flush()
661 except Exception:
661 except Exception:
662 pass
662 pass
663 def py2shell(val):
663 def py2shell(val):
664 'convert python object into string that is useful to shell'
664 'convert python object into string that is useful to shell'
665 if val is None or val is False:
665 if val is None or val is False:
666 return '0'
666 return '0'
667 if val is True:
667 if val is True:
668 return '1'
668 return '1'
669 return str(val)
669 return str(val)
670 origcmd = cmd
670 origcmd = cmd
671 cmd = quotecommand(cmd)
671 cmd = quotecommand(cmd)
672 if sys.platform == 'plan9' and (sys.version_info[0] == 2
672 if sys.platform == 'plan9' and (sys.version_info[0] == 2
673 and sys.version_info[1] < 7):
673 and sys.version_info[1] < 7):
674 # subprocess kludge to work around issues in half-baked Python
674 # subprocess kludge to work around issues in half-baked Python
675 # ports, notably bichued/python:
675 # ports, notably bichued/python:
676 if not cwd is None:
676 if not cwd is None:
677 os.chdir(cwd)
677 os.chdir(cwd)
678 rc = os.system(cmd)
678 rc = os.system(cmd)
679 else:
679 else:
680 env = dict(os.environ)
680 env = dict(os.environ)
681 env.update((k, py2shell(v)) for k, v in environ.iteritems())
681 env.update((k, py2shell(v)) for k, v in environ.iteritems())
682 env['HG'] = hgexecutable()
682 env['HG'] = hgexecutable()
683 if out is None or out == sys.__stdout__:
683 if out is None or out == sys.__stdout__:
684 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
684 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
685 env=env, cwd=cwd)
685 env=env, cwd=cwd)
686 else:
686 else:
687 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
687 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
688 env=env, cwd=cwd, stdout=subprocess.PIPE,
688 env=env, cwd=cwd, stdout=subprocess.PIPE,
689 stderr=subprocess.STDOUT)
689 stderr=subprocess.STDOUT)
690 while True:
690 while True:
691 line = proc.stdout.readline()
691 line = proc.stdout.readline()
692 if not line:
692 if not line:
693 break
693 break
694 out.write(line)
694 out.write(line)
695 proc.wait()
695 proc.wait()
696 rc = proc.returncode
696 rc = proc.returncode
697 if sys.platform == 'OpenVMS' and rc & 1:
697 if sys.platform == 'OpenVMS' and rc & 1:
698 rc = 0
698 rc = 0
699 if rc and onerr:
699 if rc and onerr:
700 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
700 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
701 explainexit(rc)[0])
701 explainexit(rc)[0])
702 if errprefix:
702 if errprefix:
703 errmsg = '%s: %s' % (errprefix, errmsg)
703 errmsg = '%s: %s' % (errprefix, errmsg)
704 raise onerr(errmsg)
704 raise onerr(errmsg)
705 return rc
705 return rc
706
706
707 def checksignature(func):
707 def checksignature(func):
708 '''wrap a function with code to check for calling errors'''
708 '''wrap a function with code to check for calling errors'''
709 def check(*args, **kwargs):
709 def check(*args, **kwargs):
710 try:
710 try:
711 return func(*args, **kwargs)
711 return func(*args, **kwargs)
712 except TypeError:
712 except TypeError:
713 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
713 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
714 raise error.SignatureError
714 raise error.SignatureError
715 raise
715 raise
716
716
717 return check
717 return check
718
718
719 def copyfile(src, dest):
719 def copyfile(src, dest, hardlink=False):
720 "copy a file, preserving mode and atime/mtime"
720 "copy a file, preserving mode and atime/mtime"
721 if os.path.lexists(dest):
721 if os.path.lexists(dest):
722 unlink(dest)
722 unlink(dest)
723 if hardlink:
724 try:
725 oslink(src, dest)
726 return
727 except (IOError, OSError):
728 pass # fall back to normal copy
723 if os.path.islink(src):
729 if os.path.islink(src):
724 os.symlink(os.readlink(src), dest)
730 os.symlink(os.readlink(src), dest)
725 else:
731 else:
726 try:
732 try:
727 shutil.copyfile(src, dest)
733 shutil.copyfile(src, dest)
728 shutil.copymode(src, dest)
734 shutil.copymode(src, dest)
729 except shutil.Error, inst:
735 except shutil.Error, inst:
730 raise Abort(str(inst))
736 raise Abort(str(inst))
731
737
732 def copyfiles(src, dst, hardlink=None):
738 def copyfiles(src, dst, hardlink=None):
733 """Copy a directory tree using hardlinks if possible"""
739 """Copy a directory tree using hardlinks if possible"""
734
740
735 if hardlink is None:
741 if hardlink is None:
736 hardlink = (os.stat(src).st_dev ==
742 hardlink = (os.stat(src).st_dev ==
737 os.stat(os.path.dirname(dst)).st_dev)
743 os.stat(os.path.dirname(dst)).st_dev)
738
744
739 num = 0
745 num = 0
740 if os.path.isdir(src):
746 if os.path.isdir(src):
741 os.mkdir(dst)
747 os.mkdir(dst)
742 for name, kind in osutil.listdir(src):
748 for name, kind in osutil.listdir(src):
743 srcname = os.path.join(src, name)
749 srcname = os.path.join(src, name)
744 dstname = os.path.join(dst, name)
750 dstname = os.path.join(dst, name)
745 hardlink, n = copyfiles(srcname, dstname, hardlink)
751 hardlink, n = copyfiles(srcname, dstname, hardlink)
746 num += n
752 num += n
747 else:
753 else:
748 if hardlink:
754 if hardlink:
749 try:
755 try:
750 oslink(src, dst)
756 oslink(src, dst)
751 except (IOError, OSError):
757 except (IOError, OSError):
752 hardlink = False
758 hardlink = False
753 shutil.copy(src, dst)
759 shutil.copy(src, dst)
754 else:
760 else:
755 shutil.copy(src, dst)
761 shutil.copy(src, dst)
756 num += 1
762 num += 1
757
763
758 return hardlink, num
764 return hardlink, num
759
765
760 _winreservednames = '''con prn aux nul
766 _winreservednames = '''con prn aux nul
761 com1 com2 com3 com4 com5 com6 com7 com8 com9
767 com1 com2 com3 com4 com5 com6 com7 com8 com9
762 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
768 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
763 _winreservedchars = ':*?"<>|'
769 _winreservedchars = ':*?"<>|'
764 def checkwinfilename(path):
770 def checkwinfilename(path):
765 r'''Check that the base-relative path is a valid filename on Windows.
771 r'''Check that the base-relative path is a valid filename on Windows.
766 Returns None if the path is ok, or a UI string describing the problem.
772 Returns None if the path is ok, or a UI string describing the problem.
767
773
768 >>> checkwinfilename("just/a/normal/path")
774 >>> checkwinfilename("just/a/normal/path")
769 >>> checkwinfilename("foo/bar/con.xml")
775 >>> checkwinfilename("foo/bar/con.xml")
770 "filename contains 'con', which is reserved on Windows"
776 "filename contains 'con', which is reserved on Windows"
771 >>> checkwinfilename("foo/con.xml/bar")
777 >>> checkwinfilename("foo/con.xml/bar")
772 "filename contains 'con', which is reserved on Windows"
778 "filename contains 'con', which is reserved on Windows"
773 >>> checkwinfilename("foo/bar/xml.con")
779 >>> checkwinfilename("foo/bar/xml.con")
774 >>> checkwinfilename("foo/bar/AUX/bla.txt")
780 >>> checkwinfilename("foo/bar/AUX/bla.txt")
775 "filename contains 'AUX', which is reserved on Windows"
781 "filename contains 'AUX', which is reserved on Windows"
776 >>> checkwinfilename("foo/bar/bla:.txt")
782 >>> checkwinfilename("foo/bar/bla:.txt")
777 "filename contains ':', which is reserved on Windows"
783 "filename contains ':', which is reserved on Windows"
778 >>> checkwinfilename("foo/bar/b\07la.txt")
784 >>> checkwinfilename("foo/bar/b\07la.txt")
779 "filename contains '\\x07', which is invalid on Windows"
785 "filename contains '\\x07', which is invalid on Windows"
780 >>> checkwinfilename("foo/bar/bla ")
786 >>> checkwinfilename("foo/bar/bla ")
781 "filename ends with ' ', which is not allowed on Windows"
787 "filename ends with ' ', which is not allowed on Windows"
782 >>> checkwinfilename("../bar")
788 >>> checkwinfilename("../bar")
783 >>> checkwinfilename("foo\\")
789 >>> checkwinfilename("foo\\")
784 "filename ends with '\\', which is invalid on Windows"
790 "filename ends with '\\', which is invalid on Windows"
785 >>> checkwinfilename("foo\\/bar")
791 >>> checkwinfilename("foo\\/bar")
786 "directory name ends with '\\', which is invalid on Windows"
792 "directory name ends with '\\', which is invalid on Windows"
787 '''
793 '''
788 if path.endswith('\\'):
794 if path.endswith('\\'):
789 return _("filename ends with '\\', which is invalid on Windows")
795 return _("filename ends with '\\', which is invalid on Windows")
790 if '\\/' in path:
796 if '\\/' in path:
791 return _("directory name ends with '\\', which is invalid on Windows")
797 return _("directory name ends with '\\', which is invalid on Windows")
792 for n in path.replace('\\', '/').split('/'):
798 for n in path.replace('\\', '/').split('/'):
793 if not n:
799 if not n:
794 continue
800 continue
795 for c in n:
801 for c in n:
796 if c in _winreservedchars:
802 if c in _winreservedchars:
797 return _("filename contains '%s', which is reserved "
803 return _("filename contains '%s', which is reserved "
798 "on Windows") % c
804 "on Windows") % c
799 if ord(c) <= 31:
805 if ord(c) <= 31:
800 return _("filename contains %r, which is invalid "
806 return _("filename contains %r, which is invalid "
801 "on Windows") % c
807 "on Windows") % c
802 base = n.split('.')[0]
808 base = n.split('.')[0]
803 if base and base.lower() in _winreservednames:
809 if base and base.lower() in _winreservednames:
804 return _("filename contains '%s', which is reserved "
810 return _("filename contains '%s', which is reserved "
805 "on Windows") % base
811 "on Windows") % base
806 t = n[-1]
812 t = n[-1]
807 if t in '. ' and n not in '..':
813 if t in '. ' and n not in '..':
808 return _("filename ends with '%s', which is not allowed "
814 return _("filename ends with '%s', which is not allowed "
809 "on Windows") % t
815 "on Windows") % t
810
816
811 if os.name == 'nt':
817 if os.name == 'nt':
812 checkosfilename = checkwinfilename
818 checkosfilename = checkwinfilename
813 else:
819 else:
814 checkosfilename = platform.checkosfilename
820 checkosfilename = platform.checkosfilename
815
821
816 def makelock(info, pathname):
822 def makelock(info, pathname):
817 try:
823 try:
818 return os.symlink(info, pathname)
824 return os.symlink(info, pathname)
819 except OSError, why:
825 except OSError, why:
820 if why.errno == errno.EEXIST:
826 if why.errno == errno.EEXIST:
821 raise
827 raise
822 except AttributeError: # no symlink in os
828 except AttributeError: # no symlink in os
823 pass
829 pass
824
830
825 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
831 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
826 os.write(ld, info)
832 os.write(ld, info)
827 os.close(ld)
833 os.close(ld)
828
834
829 def readlock(pathname):
835 def readlock(pathname):
830 try:
836 try:
831 return os.readlink(pathname)
837 return os.readlink(pathname)
832 except OSError, why:
838 except OSError, why:
833 if why.errno not in (errno.EINVAL, errno.ENOSYS):
839 if why.errno not in (errno.EINVAL, errno.ENOSYS):
834 raise
840 raise
835 except AttributeError: # no symlink in os
841 except AttributeError: # no symlink in os
836 pass
842 pass
837 fp = posixfile(pathname)
843 fp = posixfile(pathname)
838 r = fp.read()
844 r = fp.read()
839 fp.close()
845 fp.close()
840 return r
846 return r
841
847
842 def fstat(fp):
848 def fstat(fp):
843 '''stat file object that may not have fileno method.'''
849 '''stat file object that may not have fileno method.'''
844 try:
850 try:
845 return os.fstat(fp.fileno())
851 return os.fstat(fp.fileno())
846 except AttributeError:
852 except AttributeError:
847 return os.stat(fp.name)
853 return os.stat(fp.name)
848
854
849 # File system features
855 # File system features
850
856
851 def checkcase(path):
857 def checkcase(path):
852 """
858 """
853 Return true if the given path is on a case-sensitive filesystem
859 Return true if the given path is on a case-sensitive filesystem
854
860
855 Requires a path (like /foo/.hg) ending with a foldable final
861 Requires a path (like /foo/.hg) ending with a foldable final
856 directory component.
862 directory component.
857 """
863 """
858 s1 = os.stat(path)
864 s1 = os.stat(path)
859 d, b = os.path.split(path)
865 d, b = os.path.split(path)
860 b2 = b.upper()
866 b2 = b.upper()
861 if b == b2:
867 if b == b2:
862 b2 = b.lower()
868 b2 = b.lower()
863 if b == b2:
869 if b == b2:
864 return True # no evidence against case sensitivity
870 return True # no evidence against case sensitivity
865 p2 = os.path.join(d, b2)
871 p2 = os.path.join(d, b2)
866 try:
872 try:
867 s2 = os.stat(p2)
873 s2 = os.stat(p2)
868 if s2 == s1:
874 if s2 == s1:
869 return False
875 return False
870 return True
876 return True
871 except OSError:
877 except OSError:
872 return True
878 return True
873
879
874 try:
880 try:
875 import re2
881 import re2
876 _re2 = None
882 _re2 = None
877 except ImportError:
883 except ImportError:
878 _re2 = False
884 _re2 = False
879
885
880 class _re(object):
886 class _re(object):
881 def _checkre2(self):
887 def _checkre2(self):
882 global _re2
888 global _re2
883 try:
889 try:
884 # check if match works, see issue3964
890 # check if match works, see issue3964
885 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
891 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
886 except ImportError:
892 except ImportError:
887 _re2 = False
893 _re2 = False
888
894
889 def compile(self, pat, flags=0):
895 def compile(self, pat, flags=0):
890 '''Compile a regular expression, using re2 if possible
896 '''Compile a regular expression, using re2 if possible
891
897
892 For best performance, use only re2-compatible regexp features. The
898 For best performance, use only re2-compatible regexp features. The
893 only flags from the re module that are re2-compatible are
899 only flags from the re module that are re2-compatible are
894 IGNORECASE and MULTILINE.'''
900 IGNORECASE and MULTILINE.'''
895 if _re2 is None:
901 if _re2 is None:
896 self._checkre2()
902 self._checkre2()
897 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
903 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
898 if flags & remod.IGNORECASE:
904 if flags & remod.IGNORECASE:
899 pat = '(?i)' + pat
905 pat = '(?i)' + pat
900 if flags & remod.MULTILINE:
906 if flags & remod.MULTILINE:
901 pat = '(?m)' + pat
907 pat = '(?m)' + pat
902 try:
908 try:
903 return re2.compile(pat)
909 return re2.compile(pat)
904 except re2.error:
910 except re2.error:
905 pass
911 pass
906 return remod.compile(pat, flags)
912 return remod.compile(pat, flags)
907
913
908 @propertycache
914 @propertycache
909 def escape(self):
915 def escape(self):
910 '''Return the version of escape corresponding to self.compile.
916 '''Return the version of escape corresponding to self.compile.
911
917
912 This is imperfect because whether re2 or re is used for a particular
918 This is imperfect because whether re2 or re is used for a particular
913 function depends on the flags, etc, but it's the best we can do.
919 function depends on the flags, etc, but it's the best we can do.
914 '''
920 '''
915 global _re2
921 global _re2
916 if _re2 is None:
922 if _re2 is None:
917 self._checkre2()
923 self._checkre2()
918 if _re2:
924 if _re2:
919 return re2.escape
925 return re2.escape
920 else:
926 else:
921 return remod.escape
927 return remod.escape
922
928
923 re = _re()
929 re = _re()
924
930
925 _fspathcache = {}
931 _fspathcache = {}
926 def fspath(name, root):
932 def fspath(name, root):
927 '''Get name in the case stored in the filesystem
933 '''Get name in the case stored in the filesystem
928
934
929 The name should be relative to root, and be normcase-ed for efficiency.
935 The name should be relative to root, and be normcase-ed for efficiency.
930
936
931 Note that this function is unnecessary, and should not be
937 Note that this function is unnecessary, and should not be
932 called, for case-sensitive filesystems (simply because it's expensive).
938 called, for case-sensitive filesystems (simply because it's expensive).
933
939
934 The root should be normcase-ed, too.
940 The root should be normcase-ed, too.
935 '''
941 '''
936 def _makefspathcacheentry(dir):
942 def _makefspathcacheentry(dir):
937 return dict((normcase(n), n) for n in os.listdir(dir))
943 return dict((normcase(n), n) for n in os.listdir(dir))
938
944
939 seps = os.sep
945 seps = os.sep
940 if os.altsep:
946 if os.altsep:
941 seps = seps + os.altsep
947 seps = seps + os.altsep
942 # Protect backslashes. This gets silly very quickly.
948 # Protect backslashes. This gets silly very quickly.
943 seps.replace('\\','\\\\')
949 seps.replace('\\','\\\\')
944 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
950 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
945 dir = os.path.normpath(root)
951 dir = os.path.normpath(root)
946 result = []
952 result = []
947 for part, sep in pattern.findall(name):
953 for part, sep in pattern.findall(name):
948 if sep:
954 if sep:
949 result.append(sep)
955 result.append(sep)
950 continue
956 continue
951
957
952 if dir not in _fspathcache:
958 if dir not in _fspathcache:
953 _fspathcache[dir] = _makefspathcacheentry(dir)
959 _fspathcache[dir] = _makefspathcacheentry(dir)
954 contents = _fspathcache[dir]
960 contents = _fspathcache[dir]
955
961
956 found = contents.get(part)
962 found = contents.get(part)
957 if not found:
963 if not found:
958 # retry "once per directory" per "dirstate.walk" which
964 # retry "once per directory" per "dirstate.walk" which
959 # may take place for each patches of "hg qpush", for example
965 # may take place for each patches of "hg qpush", for example
960 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
966 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
961 found = contents.get(part)
967 found = contents.get(part)
962
968
963 result.append(found or part)
969 result.append(found or part)
964 dir = os.path.join(dir, part)
970 dir = os.path.join(dir, part)
965
971
966 return ''.join(result)
972 return ''.join(result)
967
973
968 def checknlink(testfile):
974 def checknlink(testfile):
969 '''check whether hardlink count reporting works properly'''
975 '''check whether hardlink count reporting works properly'''
970
976
971 # testfile may be open, so we need a separate file for checking to
977 # testfile may be open, so we need a separate file for checking to
972 # work around issue2543 (or testfile may get lost on Samba shares)
978 # work around issue2543 (or testfile may get lost on Samba shares)
973 f1 = testfile + ".hgtmp1"
979 f1 = testfile + ".hgtmp1"
974 if os.path.lexists(f1):
980 if os.path.lexists(f1):
975 return False
981 return False
976 try:
982 try:
977 posixfile(f1, 'w').close()
983 posixfile(f1, 'w').close()
978 except IOError:
984 except IOError:
979 return False
985 return False
980
986
981 f2 = testfile + ".hgtmp2"
987 f2 = testfile + ".hgtmp2"
982 fd = None
988 fd = None
983 try:
989 try:
984 try:
990 try:
985 oslink(f1, f2)
991 oslink(f1, f2)
986 except OSError:
992 except OSError:
987 return False
993 return False
988
994
989 # nlinks() may behave differently for files on Windows shares if
995 # nlinks() may behave differently for files on Windows shares if
990 # the file is open.
996 # the file is open.
991 fd = posixfile(f2)
997 fd = posixfile(f2)
992 return nlinks(f2) > 1
998 return nlinks(f2) > 1
993 finally:
999 finally:
994 if fd is not None:
1000 if fd is not None:
995 fd.close()
1001 fd.close()
996 for f in (f1, f2):
1002 for f in (f1, f2):
997 try:
1003 try:
998 os.unlink(f)
1004 os.unlink(f)
999 except OSError:
1005 except OSError:
1000 pass
1006 pass
1001
1007
1002 def endswithsep(path):
1008 def endswithsep(path):
1003 '''Check path ends with os.sep or os.altsep.'''
1009 '''Check path ends with os.sep or os.altsep.'''
1004 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1010 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1005
1011
1006 def splitpath(path):
1012 def splitpath(path):
1007 '''Split path by os.sep.
1013 '''Split path by os.sep.
1008 Note that this function does not use os.altsep because this is
1014 Note that this function does not use os.altsep because this is
1009 an alternative of simple "xxx.split(os.sep)".
1015 an alternative of simple "xxx.split(os.sep)".
1010 It is recommended to use os.path.normpath() before using this
1016 It is recommended to use os.path.normpath() before using this
1011 function if need.'''
1017 function if need.'''
1012 return path.split(os.sep)
1018 return path.split(os.sep)
1013
1019
1014 def gui():
1020 def gui():
1015 '''Are we running in a GUI?'''
1021 '''Are we running in a GUI?'''
1016 if sys.platform == 'darwin':
1022 if sys.platform == 'darwin':
1017 if 'SSH_CONNECTION' in os.environ:
1023 if 'SSH_CONNECTION' in os.environ:
1018 # handle SSH access to a box where the user is logged in
1024 # handle SSH access to a box where the user is logged in
1019 return False
1025 return False
1020 elif getattr(osutil, 'isgui', None):
1026 elif getattr(osutil, 'isgui', None):
1021 # check if a CoreGraphics session is available
1027 # check if a CoreGraphics session is available
1022 return osutil.isgui()
1028 return osutil.isgui()
1023 else:
1029 else:
1024 # pure build; use a safe default
1030 # pure build; use a safe default
1025 return True
1031 return True
1026 else:
1032 else:
1027 return os.name == "nt" or os.environ.get("DISPLAY")
1033 return os.name == "nt" or os.environ.get("DISPLAY")
1028
1034
1029 def mktempcopy(name, emptyok=False, createmode=None):
1035 def mktempcopy(name, emptyok=False, createmode=None):
1030 """Create a temporary file with the same contents from name
1036 """Create a temporary file with the same contents from name
1031
1037
1032 The permission bits are copied from the original file.
1038 The permission bits are copied from the original file.
1033
1039
1034 If the temporary file is going to be truncated immediately, you
1040 If the temporary file is going to be truncated immediately, you
1035 can use emptyok=True as an optimization.
1041 can use emptyok=True as an optimization.
1036
1042
1037 Returns the name of the temporary file.
1043 Returns the name of the temporary file.
1038 """
1044 """
1039 d, fn = os.path.split(name)
1045 d, fn = os.path.split(name)
1040 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1046 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1041 os.close(fd)
1047 os.close(fd)
1042 # Temporary files are created with mode 0600, which is usually not
1048 # Temporary files are created with mode 0600, which is usually not
1043 # what we want. If the original file already exists, just copy
1049 # what we want. If the original file already exists, just copy
1044 # its mode. Otherwise, manually obey umask.
1050 # its mode. Otherwise, manually obey umask.
1045 copymode(name, temp, createmode)
1051 copymode(name, temp, createmode)
1046 if emptyok:
1052 if emptyok:
1047 return temp
1053 return temp
1048 try:
1054 try:
1049 try:
1055 try:
1050 ifp = posixfile(name, "rb")
1056 ifp = posixfile(name, "rb")
1051 except IOError, inst:
1057 except IOError, inst:
1052 if inst.errno == errno.ENOENT:
1058 if inst.errno == errno.ENOENT:
1053 return temp
1059 return temp
1054 if not getattr(inst, 'filename', None):
1060 if not getattr(inst, 'filename', None):
1055 inst.filename = name
1061 inst.filename = name
1056 raise
1062 raise
1057 ofp = posixfile(temp, "wb")
1063 ofp = posixfile(temp, "wb")
1058 for chunk in filechunkiter(ifp):
1064 for chunk in filechunkiter(ifp):
1059 ofp.write(chunk)
1065 ofp.write(chunk)
1060 ifp.close()
1066 ifp.close()
1061 ofp.close()
1067 ofp.close()
1062 except: # re-raises
1068 except: # re-raises
1063 try: os.unlink(temp)
1069 try: os.unlink(temp)
1064 except OSError: pass
1070 except OSError: pass
1065 raise
1071 raise
1066 return temp
1072 return temp
1067
1073
1068 class atomictempfile(object):
1074 class atomictempfile(object):
1069 '''writable file object that atomically updates a file
1075 '''writable file object that atomically updates a file
1070
1076
1071 All writes will go to a temporary copy of the original file. Call
1077 All writes will go to a temporary copy of the original file. Call
1072 close() when you are done writing, and atomictempfile will rename
1078 close() when you are done writing, and atomictempfile will rename
1073 the temporary copy to the original name, making the changes
1079 the temporary copy to the original name, making the changes
1074 visible. If the object is destroyed without being closed, all your
1080 visible. If the object is destroyed without being closed, all your
1075 writes are discarded.
1081 writes are discarded.
1076 '''
1082 '''
1077 def __init__(self, name, mode='w+b', createmode=None):
1083 def __init__(self, name, mode='w+b', createmode=None):
1078 self.__name = name # permanent name
1084 self.__name = name # permanent name
1079 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1085 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1080 createmode=createmode)
1086 createmode=createmode)
1081 self._fp = posixfile(self._tempname, mode)
1087 self._fp = posixfile(self._tempname, mode)
1082
1088
1083 # delegated methods
1089 # delegated methods
1084 self.write = self._fp.write
1090 self.write = self._fp.write
1085 self.seek = self._fp.seek
1091 self.seek = self._fp.seek
1086 self.tell = self._fp.tell
1092 self.tell = self._fp.tell
1087 self.fileno = self._fp.fileno
1093 self.fileno = self._fp.fileno
1088
1094
1089 def close(self):
1095 def close(self):
1090 if not self._fp.closed:
1096 if not self._fp.closed:
1091 self._fp.close()
1097 self._fp.close()
1092 rename(self._tempname, localpath(self.__name))
1098 rename(self._tempname, localpath(self.__name))
1093
1099
1094 def discard(self):
1100 def discard(self):
1095 if not self._fp.closed:
1101 if not self._fp.closed:
1096 try:
1102 try:
1097 os.unlink(self._tempname)
1103 os.unlink(self._tempname)
1098 except OSError:
1104 except OSError:
1099 pass
1105 pass
1100 self._fp.close()
1106 self._fp.close()
1101
1107
1102 def __del__(self):
1108 def __del__(self):
1103 if safehasattr(self, '_fp'): # constructor actually did something
1109 if safehasattr(self, '_fp'): # constructor actually did something
1104 self.discard()
1110 self.discard()
1105
1111
1106 def makedirs(name, mode=None, notindexed=False):
1112 def makedirs(name, mode=None, notindexed=False):
1107 """recursive directory creation with parent mode inheritance"""
1113 """recursive directory creation with parent mode inheritance"""
1108 try:
1114 try:
1109 makedir(name, notindexed)
1115 makedir(name, notindexed)
1110 except OSError, err:
1116 except OSError, err:
1111 if err.errno == errno.EEXIST:
1117 if err.errno == errno.EEXIST:
1112 return
1118 return
1113 if err.errno != errno.ENOENT or not name:
1119 if err.errno != errno.ENOENT or not name:
1114 raise
1120 raise
1115 parent = os.path.dirname(os.path.abspath(name))
1121 parent = os.path.dirname(os.path.abspath(name))
1116 if parent == name:
1122 if parent == name:
1117 raise
1123 raise
1118 makedirs(parent, mode, notindexed)
1124 makedirs(parent, mode, notindexed)
1119 makedir(name, notindexed)
1125 makedir(name, notindexed)
1120 if mode is not None:
1126 if mode is not None:
1121 os.chmod(name, mode)
1127 os.chmod(name, mode)
1122
1128
1123 def ensuredirs(name, mode=None, notindexed=False):
1129 def ensuredirs(name, mode=None, notindexed=False):
1124 """race-safe recursive directory creation
1130 """race-safe recursive directory creation
1125
1131
1126 Newly created directories are marked as "not to be indexed by
1132 Newly created directories are marked as "not to be indexed by
1127 the content indexing service", if ``notindexed`` is specified
1133 the content indexing service", if ``notindexed`` is specified
1128 for "write" mode access.
1134 for "write" mode access.
1129 """
1135 """
1130 if os.path.isdir(name):
1136 if os.path.isdir(name):
1131 return
1137 return
1132 parent = os.path.dirname(os.path.abspath(name))
1138 parent = os.path.dirname(os.path.abspath(name))
1133 if parent != name:
1139 if parent != name:
1134 ensuredirs(parent, mode, notindexed)
1140 ensuredirs(parent, mode, notindexed)
1135 try:
1141 try:
1136 makedir(name, notindexed)
1142 makedir(name, notindexed)
1137 except OSError, err:
1143 except OSError, err:
1138 if err.errno == errno.EEXIST and os.path.isdir(name):
1144 if err.errno == errno.EEXIST and os.path.isdir(name):
1139 # someone else seems to have won a directory creation race
1145 # someone else seems to have won a directory creation race
1140 return
1146 return
1141 raise
1147 raise
1142 if mode is not None:
1148 if mode is not None:
1143 os.chmod(name, mode)
1149 os.chmod(name, mode)
1144
1150
1145 def readfile(path):
1151 def readfile(path):
1146 fp = open(path, 'rb')
1152 fp = open(path, 'rb')
1147 try:
1153 try:
1148 return fp.read()
1154 return fp.read()
1149 finally:
1155 finally:
1150 fp.close()
1156 fp.close()
1151
1157
1152 def writefile(path, text):
1158 def writefile(path, text):
1153 fp = open(path, 'wb')
1159 fp = open(path, 'wb')
1154 try:
1160 try:
1155 fp.write(text)
1161 fp.write(text)
1156 finally:
1162 finally:
1157 fp.close()
1163 fp.close()
1158
1164
1159 def appendfile(path, text):
1165 def appendfile(path, text):
1160 fp = open(path, 'ab')
1166 fp = open(path, 'ab')
1161 try:
1167 try:
1162 fp.write(text)
1168 fp.write(text)
1163 finally:
1169 finally:
1164 fp.close()
1170 fp.close()
1165
1171
1166 class chunkbuffer(object):
1172 class chunkbuffer(object):
1167 """Allow arbitrary sized chunks of data to be efficiently read from an
1173 """Allow arbitrary sized chunks of data to be efficiently read from an
1168 iterator over chunks of arbitrary size."""
1174 iterator over chunks of arbitrary size."""
1169
1175
1170 def __init__(self, in_iter):
1176 def __init__(self, in_iter):
1171 """in_iter is the iterator that's iterating over the input chunks.
1177 """in_iter is the iterator that's iterating over the input chunks.
1172 targetsize is how big a buffer to try to maintain."""
1178 targetsize is how big a buffer to try to maintain."""
1173 def splitbig(chunks):
1179 def splitbig(chunks):
1174 for chunk in chunks:
1180 for chunk in chunks:
1175 if len(chunk) > 2**20:
1181 if len(chunk) > 2**20:
1176 pos = 0
1182 pos = 0
1177 while pos < len(chunk):
1183 while pos < len(chunk):
1178 end = pos + 2 ** 18
1184 end = pos + 2 ** 18
1179 yield chunk[pos:end]
1185 yield chunk[pos:end]
1180 pos = end
1186 pos = end
1181 else:
1187 else:
1182 yield chunk
1188 yield chunk
1183 self.iter = splitbig(in_iter)
1189 self.iter = splitbig(in_iter)
1184 self._queue = deque()
1190 self._queue = deque()
1185
1191
1186 def read(self, l=None):
1192 def read(self, l=None):
1187 """Read L bytes of data from the iterator of chunks of data.
1193 """Read L bytes of data from the iterator of chunks of data.
1188 Returns less than L bytes if the iterator runs dry.
1194 Returns less than L bytes if the iterator runs dry.
1189
1195
1190 If size parameter is omitted, read everything"""
1196 If size parameter is omitted, read everything"""
1191 left = l
1197 left = l
1192 buf = []
1198 buf = []
1193 queue = self._queue
1199 queue = self._queue
1194 while left is None or left > 0:
1200 while left is None or left > 0:
1195 # refill the queue
1201 # refill the queue
1196 if not queue:
1202 if not queue:
1197 target = 2**18
1203 target = 2**18
1198 for chunk in self.iter:
1204 for chunk in self.iter:
1199 queue.append(chunk)
1205 queue.append(chunk)
1200 target -= len(chunk)
1206 target -= len(chunk)
1201 if target <= 0:
1207 if target <= 0:
1202 break
1208 break
1203 if not queue:
1209 if not queue:
1204 break
1210 break
1205
1211
1206 chunk = queue.popleft()
1212 chunk = queue.popleft()
1207 if left is not None:
1213 if left is not None:
1208 left -= len(chunk)
1214 left -= len(chunk)
1209 if left is not None and left < 0:
1215 if left is not None and left < 0:
1210 queue.appendleft(chunk[left:])
1216 queue.appendleft(chunk[left:])
1211 buf.append(chunk[:left])
1217 buf.append(chunk[:left])
1212 else:
1218 else:
1213 buf.append(chunk)
1219 buf.append(chunk)
1214
1220
1215 return ''.join(buf)
1221 return ''.join(buf)
1216
1222
1217 def filechunkiter(f, size=65536, limit=None):
1223 def filechunkiter(f, size=65536, limit=None):
1218 """Create a generator that produces the data in the file size
1224 """Create a generator that produces the data in the file size
1219 (default 65536) bytes at a time, up to optional limit (default is
1225 (default 65536) bytes at a time, up to optional limit (default is
1220 to read all data). Chunks may be less than size bytes if the
1226 to read all data). Chunks may be less than size bytes if the
1221 chunk is the last chunk in the file, or the file is a socket or
1227 chunk is the last chunk in the file, or the file is a socket or
1222 some other type of file that sometimes reads less data than is
1228 some other type of file that sometimes reads less data than is
1223 requested."""
1229 requested."""
1224 assert size >= 0
1230 assert size >= 0
1225 assert limit is None or limit >= 0
1231 assert limit is None or limit >= 0
1226 while True:
1232 while True:
1227 if limit is None:
1233 if limit is None:
1228 nbytes = size
1234 nbytes = size
1229 else:
1235 else:
1230 nbytes = min(limit, size)
1236 nbytes = min(limit, size)
1231 s = nbytes and f.read(nbytes)
1237 s = nbytes and f.read(nbytes)
1232 if not s:
1238 if not s:
1233 break
1239 break
1234 if limit:
1240 if limit:
1235 limit -= len(s)
1241 limit -= len(s)
1236 yield s
1242 yield s
1237
1243
1238 def makedate(timestamp=None):
1244 def makedate(timestamp=None):
1239 '''Return a unix timestamp (or the current time) as a (unixtime,
1245 '''Return a unix timestamp (or the current time) as a (unixtime,
1240 offset) tuple based off the local timezone.'''
1246 offset) tuple based off the local timezone.'''
1241 if timestamp is None:
1247 if timestamp is None:
1242 timestamp = time.time()
1248 timestamp = time.time()
1243 if timestamp < 0:
1249 if timestamp < 0:
1244 hint = _("check your clock")
1250 hint = _("check your clock")
1245 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1251 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1246 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1252 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1247 datetime.datetime.fromtimestamp(timestamp))
1253 datetime.datetime.fromtimestamp(timestamp))
1248 tz = delta.days * 86400 + delta.seconds
1254 tz = delta.days * 86400 + delta.seconds
1249 return timestamp, tz
1255 return timestamp, tz
1250
1256
1251 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1257 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1252 """represent a (unixtime, offset) tuple as a localized time.
1258 """represent a (unixtime, offset) tuple as a localized time.
1253 unixtime is seconds since the epoch, and offset is the time zone's
1259 unixtime is seconds since the epoch, and offset is the time zone's
1254 number of seconds away from UTC. if timezone is false, do not
1260 number of seconds away from UTC. if timezone is false, do not
1255 append time zone to string."""
1261 append time zone to string."""
1256 t, tz = date or makedate()
1262 t, tz = date or makedate()
1257 if t < 0:
1263 if t < 0:
1258 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1264 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1259 tz = 0
1265 tz = 0
1260 if "%1" in format or "%2" in format or "%z" in format:
1266 if "%1" in format or "%2" in format or "%z" in format:
1261 sign = (tz > 0) and "-" or "+"
1267 sign = (tz > 0) and "-" or "+"
1262 minutes = abs(tz) // 60
1268 minutes = abs(tz) // 60
1263 format = format.replace("%z", "%1%2")
1269 format = format.replace("%z", "%1%2")
1264 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1270 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1265 format = format.replace("%2", "%02d" % (minutes % 60))
1271 format = format.replace("%2", "%02d" % (minutes % 60))
1266 try:
1272 try:
1267 t = time.gmtime(float(t) - tz)
1273 t = time.gmtime(float(t) - tz)
1268 except ValueError:
1274 except ValueError:
1269 # time was out of range
1275 # time was out of range
1270 t = time.gmtime(sys.maxint)
1276 t = time.gmtime(sys.maxint)
1271 s = time.strftime(format, t)
1277 s = time.strftime(format, t)
1272 return s
1278 return s
1273
1279
1274 def shortdate(date=None):
1280 def shortdate(date=None):
1275 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1281 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1276 return datestr(date, format='%Y-%m-%d')
1282 return datestr(date, format='%Y-%m-%d')
1277
1283
1278 def strdate(string, format, defaults=[]):
1284 def strdate(string, format, defaults=[]):
1279 """parse a localized time string and return a (unixtime, offset) tuple.
1285 """parse a localized time string and return a (unixtime, offset) tuple.
1280 if the string cannot be parsed, ValueError is raised."""
1286 if the string cannot be parsed, ValueError is raised."""
1281 def timezone(string):
1287 def timezone(string):
1282 tz = string.split()[-1]
1288 tz = string.split()[-1]
1283 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1289 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1284 sign = (tz[0] == "+") and 1 or -1
1290 sign = (tz[0] == "+") and 1 or -1
1285 hours = int(tz[1:3])
1291 hours = int(tz[1:3])
1286 minutes = int(tz[3:5])
1292 minutes = int(tz[3:5])
1287 return -sign * (hours * 60 + minutes) * 60
1293 return -sign * (hours * 60 + minutes) * 60
1288 if tz == "GMT" or tz == "UTC":
1294 if tz == "GMT" or tz == "UTC":
1289 return 0
1295 return 0
1290 return None
1296 return None
1291
1297
1292 # NOTE: unixtime = localunixtime + offset
1298 # NOTE: unixtime = localunixtime + offset
1293 offset, date = timezone(string), string
1299 offset, date = timezone(string), string
1294 if offset is not None:
1300 if offset is not None:
1295 date = " ".join(string.split()[:-1])
1301 date = " ".join(string.split()[:-1])
1296
1302
1297 # add missing elements from defaults
1303 # add missing elements from defaults
1298 usenow = False # default to using biased defaults
1304 usenow = False # default to using biased defaults
1299 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1305 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1300 found = [True for p in part if ("%"+p) in format]
1306 found = [True for p in part if ("%"+p) in format]
1301 if not found:
1307 if not found:
1302 date += "@" + defaults[part][usenow]
1308 date += "@" + defaults[part][usenow]
1303 format += "@%" + part[0]
1309 format += "@%" + part[0]
1304 else:
1310 else:
1305 # We've found a specific time element, less specific time
1311 # We've found a specific time element, less specific time
1306 # elements are relative to today
1312 # elements are relative to today
1307 usenow = True
1313 usenow = True
1308
1314
1309 timetuple = time.strptime(date, format)
1315 timetuple = time.strptime(date, format)
1310 localunixtime = int(calendar.timegm(timetuple))
1316 localunixtime = int(calendar.timegm(timetuple))
1311 if offset is None:
1317 if offset is None:
1312 # local timezone
1318 # local timezone
1313 unixtime = int(time.mktime(timetuple))
1319 unixtime = int(time.mktime(timetuple))
1314 offset = unixtime - localunixtime
1320 offset = unixtime - localunixtime
1315 else:
1321 else:
1316 unixtime = localunixtime + offset
1322 unixtime = localunixtime + offset
1317 return unixtime, offset
1323 return unixtime, offset
1318
1324
1319 def parsedate(date, formats=None, bias={}):
1325 def parsedate(date, formats=None, bias={}):
1320 """parse a localized date/time and return a (unixtime, offset) tuple.
1326 """parse a localized date/time and return a (unixtime, offset) tuple.
1321
1327
1322 The date may be a "unixtime offset" string or in one of the specified
1328 The date may be a "unixtime offset" string or in one of the specified
1323 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1329 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1324
1330
1325 >>> parsedate(' today ') == parsedate(\
1331 >>> parsedate(' today ') == parsedate(\
1326 datetime.date.today().strftime('%b %d'))
1332 datetime.date.today().strftime('%b %d'))
1327 True
1333 True
1328 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1334 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1329 datetime.timedelta(days=1)\
1335 datetime.timedelta(days=1)\
1330 ).strftime('%b %d'))
1336 ).strftime('%b %d'))
1331 True
1337 True
1332 >>> now, tz = makedate()
1338 >>> now, tz = makedate()
1333 >>> strnow, strtz = parsedate('now')
1339 >>> strnow, strtz = parsedate('now')
1334 >>> (strnow - now) < 1
1340 >>> (strnow - now) < 1
1335 True
1341 True
1336 >>> tz == strtz
1342 >>> tz == strtz
1337 True
1343 True
1338 """
1344 """
1339 if not date:
1345 if not date:
1340 return 0, 0
1346 return 0, 0
1341 if isinstance(date, tuple) and len(date) == 2:
1347 if isinstance(date, tuple) and len(date) == 2:
1342 return date
1348 return date
1343 if not formats:
1349 if not formats:
1344 formats = defaultdateformats
1350 formats = defaultdateformats
1345 date = date.strip()
1351 date = date.strip()
1346
1352
1347 if date == _('now'):
1353 if date == _('now'):
1348 return makedate()
1354 return makedate()
1349 if date == _('today'):
1355 if date == _('today'):
1350 date = datetime.date.today().strftime('%b %d')
1356 date = datetime.date.today().strftime('%b %d')
1351 elif date == _('yesterday'):
1357 elif date == _('yesterday'):
1352 date = (datetime.date.today() -
1358 date = (datetime.date.today() -
1353 datetime.timedelta(days=1)).strftime('%b %d')
1359 datetime.timedelta(days=1)).strftime('%b %d')
1354
1360
1355 try:
1361 try:
1356 when, offset = map(int, date.split(' '))
1362 when, offset = map(int, date.split(' '))
1357 except ValueError:
1363 except ValueError:
1358 # fill out defaults
1364 # fill out defaults
1359 now = makedate()
1365 now = makedate()
1360 defaults = {}
1366 defaults = {}
1361 for part in ("d", "mb", "yY", "HI", "M", "S"):
1367 for part in ("d", "mb", "yY", "HI", "M", "S"):
1362 # this piece is for rounding the specific end of unknowns
1368 # this piece is for rounding the specific end of unknowns
1363 b = bias.get(part)
1369 b = bias.get(part)
1364 if b is None:
1370 if b is None:
1365 if part[0] in "HMS":
1371 if part[0] in "HMS":
1366 b = "00"
1372 b = "00"
1367 else:
1373 else:
1368 b = "0"
1374 b = "0"
1369
1375
1370 # this piece is for matching the generic end to today's date
1376 # this piece is for matching the generic end to today's date
1371 n = datestr(now, "%" + part[0])
1377 n = datestr(now, "%" + part[0])
1372
1378
1373 defaults[part] = (b, n)
1379 defaults[part] = (b, n)
1374
1380
1375 for format in formats:
1381 for format in formats:
1376 try:
1382 try:
1377 when, offset = strdate(date, format, defaults)
1383 when, offset = strdate(date, format, defaults)
1378 except (ValueError, OverflowError):
1384 except (ValueError, OverflowError):
1379 pass
1385 pass
1380 else:
1386 else:
1381 break
1387 break
1382 else:
1388 else:
1383 raise Abort(_('invalid date: %r') % date)
1389 raise Abort(_('invalid date: %r') % date)
1384 # validate explicit (probably user-specified) date and
1390 # validate explicit (probably user-specified) date and
1385 # time zone offset. values must fit in signed 32 bits for
1391 # time zone offset. values must fit in signed 32 bits for
1386 # current 32-bit linux runtimes. timezones go from UTC-12
1392 # current 32-bit linux runtimes. timezones go from UTC-12
1387 # to UTC+14
1393 # to UTC+14
1388 if abs(when) > 0x7fffffff:
1394 if abs(when) > 0x7fffffff:
1389 raise Abort(_('date exceeds 32 bits: %d') % when)
1395 raise Abort(_('date exceeds 32 bits: %d') % when)
1390 if when < 0:
1396 if when < 0:
1391 raise Abort(_('negative date value: %d') % when)
1397 raise Abort(_('negative date value: %d') % when)
1392 if offset < -50400 or offset > 43200:
1398 if offset < -50400 or offset > 43200:
1393 raise Abort(_('impossible time zone offset: %d') % offset)
1399 raise Abort(_('impossible time zone offset: %d') % offset)
1394 return when, offset
1400 return when, offset
1395
1401
1396 def matchdate(date):
1402 def matchdate(date):
1397 """Return a function that matches a given date match specifier
1403 """Return a function that matches a given date match specifier
1398
1404
1399 Formats include:
1405 Formats include:
1400
1406
1401 '{date}' match a given date to the accuracy provided
1407 '{date}' match a given date to the accuracy provided
1402
1408
1403 '<{date}' on or before a given date
1409 '<{date}' on or before a given date
1404
1410
1405 '>{date}' on or after a given date
1411 '>{date}' on or after a given date
1406
1412
1407 >>> p1 = parsedate("10:29:59")
1413 >>> p1 = parsedate("10:29:59")
1408 >>> p2 = parsedate("10:30:00")
1414 >>> p2 = parsedate("10:30:00")
1409 >>> p3 = parsedate("10:30:59")
1415 >>> p3 = parsedate("10:30:59")
1410 >>> p4 = parsedate("10:31:00")
1416 >>> p4 = parsedate("10:31:00")
1411 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1417 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1412 >>> f = matchdate("10:30")
1418 >>> f = matchdate("10:30")
1413 >>> f(p1[0])
1419 >>> f(p1[0])
1414 False
1420 False
1415 >>> f(p2[0])
1421 >>> f(p2[0])
1416 True
1422 True
1417 >>> f(p3[0])
1423 >>> f(p3[0])
1418 True
1424 True
1419 >>> f(p4[0])
1425 >>> f(p4[0])
1420 False
1426 False
1421 >>> f(p5[0])
1427 >>> f(p5[0])
1422 False
1428 False
1423 """
1429 """
1424
1430
1425 def lower(date):
1431 def lower(date):
1426 d = {'mb': "1", 'd': "1"}
1432 d = {'mb': "1", 'd': "1"}
1427 return parsedate(date, extendeddateformats, d)[0]
1433 return parsedate(date, extendeddateformats, d)[0]
1428
1434
1429 def upper(date):
1435 def upper(date):
1430 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1436 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1431 for days in ("31", "30", "29"):
1437 for days in ("31", "30", "29"):
1432 try:
1438 try:
1433 d["d"] = days
1439 d["d"] = days
1434 return parsedate(date, extendeddateformats, d)[0]
1440 return parsedate(date, extendeddateformats, d)[0]
1435 except Abort:
1441 except Abort:
1436 pass
1442 pass
1437 d["d"] = "28"
1443 d["d"] = "28"
1438 return parsedate(date, extendeddateformats, d)[0]
1444 return parsedate(date, extendeddateformats, d)[0]
1439
1445
1440 date = date.strip()
1446 date = date.strip()
1441
1447
1442 if not date:
1448 if not date:
1443 raise Abort(_("dates cannot consist entirely of whitespace"))
1449 raise Abort(_("dates cannot consist entirely of whitespace"))
1444 elif date[0] == "<":
1450 elif date[0] == "<":
1445 if not date[1:]:
1451 if not date[1:]:
1446 raise Abort(_("invalid day spec, use '<DATE'"))
1452 raise Abort(_("invalid day spec, use '<DATE'"))
1447 when = upper(date[1:])
1453 when = upper(date[1:])
1448 return lambda x: x <= when
1454 return lambda x: x <= when
1449 elif date[0] == ">":
1455 elif date[0] == ">":
1450 if not date[1:]:
1456 if not date[1:]:
1451 raise Abort(_("invalid day spec, use '>DATE'"))
1457 raise Abort(_("invalid day spec, use '>DATE'"))
1452 when = lower(date[1:])
1458 when = lower(date[1:])
1453 return lambda x: x >= when
1459 return lambda x: x >= when
1454 elif date[0] == "-":
1460 elif date[0] == "-":
1455 try:
1461 try:
1456 days = int(date[1:])
1462 days = int(date[1:])
1457 except ValueError:
1463 except ValueError:
1458 raise Abort(_("invalid day spec: %s") % date[1:])
1464 raise Abort(_("invalid day spec: %s") % date[1:])
1459 if days < 0:
1465 if days < 0:
1460 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1466 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1461 % date[1:])
1467 % date[1:])
1462 when = makedate()[0] - days * 3600 * 24
1468 when = makedate()[0] - days * 3600 * 24
1463 return lambda x: x >= when
1469 return lambda x: x >= when
1464 elif " to " in date:
1470 elif " to " in date:
1465 a, b = date.split(" to ")
1471 a, b = date.split(" to ")
1466 start, stop = lower(a), upper(b)
1472 start, stop = lower(a), upper(b)
1467 return lambda x: x >= start and x <= stop
1473 return lambda x: x >= start and x <= stop
1468 else:
1474 else:
1469 start, stop = lower(date), upper(date)
1475 start, stop = lower(date), upper(date)
1470 return lambda x: x >= start and x <= stop
1476 return lambda x: x >= start and x <= stop
1471
1477
1472 def shortuser(user):
1478 def shortuser(user):
1473 """Return a short representation of a user name or email address."""
1479 """Return a short representation of a user name or email address."""
1474 f = user.find('@')
1480 f = user.find('@')
1475 if f >= 0:
1481 if f >= 0:
1476 user = user[:f]
1482 user = user[:f]
1477 f = user.find('<')
1483 f = user.find('<')
1478 if f >= 0:
1484 if f >= 0:
1479 user = user[f + 1:]
1485 user = user[f + 1:]
1480 f = user.find(' ')
1486 f = user.find(' ')
1481 if f >= 0:
1487 if f >= 0:
1482 user = user[:f]
1488 user = user[:f]
1483 f = user.find('.')
1489 f = user.find('.')
1484 if f >= 0:
1490 if f >= 0:
1485 user = user[:f]
1491 user = user[:f]
1486 return user
1492 return user
1487
1493
1488 def emailuser(user):
1494 def emailuser(user):
1489 """Return the user portion of an email address."""
1495 """Return the user portion of an email address."""
1490 f = user.find('@')
1496 f = user.find('@')
1491 if f >= 0:
1497 if f >= 0:
1492 user = user[:f]
1498 user = user[:f]
1493 f = user.find('<')
1499 f = user.find('<')
1494 if f >= 0:
1500 if f >= 0:
1495 user = user[f + 1:]
1501 user = user[f + 1:]
1496 return user
1502 return user
1497
1503
1498 def email(author):
1504 def email(author):
1499 '''get email of author.'''
1505 '''get email of author.'''
1500 r = author.find('>')
1506 r = author.find('>')
1501 if r == -1:
1507 if r == -1:
1502 r = None
1508 r = None
1503 return author[author.find('<') + 1:r]
1509 return author[author.find('<') + 1:r]
1504
1510
1505 def ellipsis(text, maxlength=400):
1511 def ellipsis(text, maxlength=400):
1506 """Trim string to at most maxlength (default: 400) columns in display."""
1512 """Trim string to at most maxlength (default: 400) columns in display."""
1507 return encoding.trim(text, maxlength, ellipsis='...')
1513 return encoding.trim(text, maxlength, ellipsis='...')
1508
1514
1509 def unitcountfn(*unittable):
1515 def unitcountfn(*unittable):
1510 '''return a function that renders a readable count of some quantity'''
1516 '''return a function that renders a readable count of some quantity'''
1511
1517
1512 def go(count):
1518 def go(count):
1513 for multiplier, divisor, format in unittable:
1519 for multiplier, divisor, format in unittable:
1514 if count >= divisor * multiplier:
1520 if count >= divisor * multiplier:
1515 return format % (count / float(divisor))
1521 return format % (count / float(divisor))
1516 return unittable[-1][2] % count
1522 return unittable[-1][2] % count
1517
1523
1518 return go
1524 return go
1519
1525
1520 bytecount = unitcountfn(
1526 bytecount = unitcountfn(
1521 (100, 1 << 30, _('%.0f GB')),
1527 (100, 1 << 30, _('%.0f GB')),
1522 (10, 1 << 30, _('%.1f GB')),
1528 (10, 1 << 30, _('%.1f GB')),
1523 (1, 1 << 30, _('%.2f GB')),
1529 (1, 1 << 30, _('%.2f GB')),
1524 (100, 1 << 20, _('%.0f MB')),
1530 (100, 1 << 20, _('%.0f MB')),
1525 (10, 1 << 20, _('%.1f MB')),
1531 (10, 1 << 20, _('%.1f MB')),
1526 (1, 1 << 20, _('%.2f MB')),
1532 (1, 1 << 20, _('%.2f MB')),
1527 (100, 1 << 10, _('%.0f KB')),
1533 (100, 1 << 10, _('%.0f KB')),
1528 (10, 1 << 10, _('%.1f KB')),
1534 (10, 1 << 10, _('%.1f KB')),
1529 (1, 1 << 10, _('%.2f KB')),
1535 (1, 1 << 10, _('%.2f KB')),
1530 (1, 1, _('%.0f bytes')),
1536 (1, 1, _('%.0f bytes')),
1531 )
1537 )
1532
1538
1533 def uirepr(s):
1539 def uirepr(s):
1534 # Avoid double backslash in Windows path repr()
1540 # Avoid double backslash in Windows path repr()
1535 return repr(s).replace('\\\\', '\\')
1541 return repr(s).replace('\\\\', '\\')
1536
1542
1537 # delay import of textwrap
1543 # delay import of textwrap
1538 def MBTextWrapper(**kwargs):
1544 def MBTextWrapper(**kwargs):
1539 class tw(textwrap.TextWrapper):
1545 class tw(textwrap.TextWrapper):
1540 """
1546 """
1541 Extend TextWrapper for width-awareness.
1547 Extend TextWrapper for width-awareness.
1542
1548
1543 Neither number of 'bytes' in any encoding nor 'characters' is
1549 Neither number of 'bytes' in any encoding nor 'characters' is
1544 appropriate to calculate terminal columns for specified string.
1550 appropriate to calculate terminal columns for specified string.
1545
1551
1546 Original TextWrapper implementation uses built-in 'len()' directly,
1552 Original TextWrapper implementation uses built-in 'len()' directly,
1547 so overriding is needed to use width information of each characters.
1553 so overriding is needed to use width information of each characters.
1548
1554
1549 In addition, characters classified into 'ambiguous' width are
1555 In addition, characters classified into 'ambiguous' width are
1550 treated as wide in East Asian area, but as narrow in other.
1556 treated as wide in East Asian area, but as narrow in other.
1551
1557
1552 This requires use decision to determine width of such characters.
1558 This requires use decision to determine width of such characters.
1553 """
1559 """
1554 def __init__(self, **kwargs):
1560 def __init__(self, **kwargs):
1555 textwrap.TextWrapper.__init__(self, **kwargs)
1561 textwrap.TextWrapper.__init__(self, **kwargs)
1556
1562
1557 # for compatibility between 2.4 and 2.6
1563 # for compatibility between 2.4 and 2.6
1558 if getattr(self, 'drop_whitespace', None) is None:
1564 if getattr(self, 'drop_whitespace', None) is None:
1559 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1565 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1560
1566
1561 def _cutdown(self, ucstr, space_left):
1567 def _cutdown(self, ucstr, space_left):
1562 l = 0
1568 l = 0
1563 colwidth = encoding.ucolwidth
1569 colwidth = encoding.ucolwidth
1564 for i in xrange(len(ucstr)):
1570 for i in xrange(len(ucstr)):
1565 l += colwidth(ucstr[i])
1571 l += colwidth(ucstr[i])
1566 if space_left < l:
1572 if space_left < l:
1567 return (ucstr[:i], ucstr[i:])
1573 return (ucstr[:i], ucstr[i:])
1568 return ucstr, ''
1574 return ucstr, ''
1569
1575
1570 # overriding of base class
1576 # overriding of base class
1571 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1577 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1572 space_left = max(width - cur_len, 1)
1578 space_left = max(width - cur_len, 1)
1573
1579
1574 if self.break_long_words:
1580 if self.break_long_words:
1575 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1581 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1576 cur_line.append(cut)
1582 cur_line.append(cut)
1577 reversed_chunks[-1] = res
1583 reversed_chunks[-1] = res
1578 elif not cur_line:
1584 elif not cur_line:
1579 cur_line.append(reversed_chunks.pop())
1585 cur_line.append(reversed_chunks.pop())
1580
1586
1581 # this overriding code is imported from TextWrapper of python 2.6
1587 # this overriding code is imported from TextWrapper of python 2.6
1582 # to calculate columns of string by 'encoding.ucolwidth()'
1588 # to calculate columns of string by 'encoding.ucolwidth()'
1583 def _wrap_chunks(self, chunks):
1589 def _wrap_chunks(self, chunks):
1584 colwidth = encoding.ucolwidth
1590 colwidth = encoding.ucolwidth
1585
1591
1586 lines = []
1592 lines = []
1587 if self.width <= 0:
1593 if self.width <= 0:
1588 raise ValueError("invalid width %r (must be > 0)" % self.width)
1594 raise ValueError("invalid width %r (must be > 0)" % self.width)
1589
1595
1590 # Arrange in reverse order so items can be efficiently popped
1596 # Arrange in reverse order so items can be efficiently popped
1591 # from a stack of chucks.
1597 # from a stack of chucks.
1592 chunks.reverse()
1598 chunks.reverse()
1593
1599
1594 while chunks:
1600 while chunks:
1595
1601
1596 # Start the list of chunks that will make up the current line.
1602 # Start the list of chunks that will make up the current line.
1597 # cur_len is just the length of all the chunks in cur_line.
1603 # cur_len is just the length of all the chunks in cur_line.
1598 cur_line = []
1604 cur_line = []
1599 cur_len = 0
1605 cur_len = 0
1600
1606
1601 # Figure out which static string will prefix this line.
1607 # Figure out which static string will prefix this line.
1602 if lines:
1608 if lines:
1603 indent = self.subsequent_indent
1609 indent = self.subsequent_indent
1604 else:
1610 else:
1605 indent = self.initial_indent
1611 indent = self.initial_indent
1606
1612
1607 # Maximum width for this line.
1613 # Maximum width for this line.
1608 width = self.width - len(indent)
1614 width = self.width - len(indent)
1609
1615
1610 # First chunk on line is whitespace -- drop it, unless this
1616 # First chunk on line is whitespace -- drop it, unless this
1611 # is the very beginning of the text (i.e. no lines started yet).
1617 # is the very beginning of the text (i.e. no lines started yet).
1612 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1618 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1613 del chunks[-1]
1619 del chunks[-1]
1614
1620
1615 while chunks:
1621 while chunks:
1616 l = colwidth(chunks[-1])
1622 l = colwidth(chunks[-1])
1617
1623
1618 # Can at least squeeze this chunk onto the current line.
1624 # Can at least squeeze this chunk onto the current line.
1619 if cur_len + l <= width:
1625 if cur_len + l <= width:
1620 cur_line.append(chunks.pop())
1626 cur_line.append(chunks.pop())
1621 cur_len += l
1627 cur_len += l
1622
1628
1623 # Nope, this line is full.
1629 # Nope, this line is full.
1624 else:
1630 else:
1625 break
1631 break
1626
1632
1627 # The current line is full, and the next chunk is too big to
1633 # The current line is full, and the next chunk is too big to
1628 # fit on *any* line (not just this one).
1634 # fit on *any* line (not just this one).
1629 if chunks and colwidth(chunks[-1]) > width:
1635 if chunks and colwidth(chunks[-1]) > width:
1630 self._handle_long_word(chunks, cur_line, cur_len, width)
1636 self._handle_long_word(chunks, cur_line, cur_len, width)
1631
1637
1632 # If the last chunk on this line is all whitespace, drop it.
1638 # If the last chunk on this line is all whitespace, drop it.
1633 if (self.drop_whitespace and
1639 if (self.drop_whitespace and
1634 cur_line and cur_line[-1].strip() == ''):
1640 cur_line and cur_line[-1].strip() == ''):
1635 del cur_line[-1]
1641 del cur_line[-1]
1636
1642
1637 # Convert current line back to a string and store it in list
1643 # Convert current line back to a string and store it in list
1638 # of all lines (return value).
1644 # of all lines (return value).
1639 if cur_line:
1645 if cur_line:
1640 lines.append(indent + ''.join(cur_line))
1646 lines.append(indent + ''.join(cur_line))
1641
1647
1642 return lines
1648 return lines
1643
1649
1644 global MBTextWrapper
1650 global MBTextWrapper
1645 MBTextWrapper = tw
1651 MBTextWrapper = tw
1646 return tw(**kwargs)
1652 return tw(**kwargs)
1647
1653
1648 def wrap(line, width, initindent='', hangindent=''):
1654 def wrap(line, width, initindent='', hangindent=''):
1649 maxindent = max(len(hangindent), len(initindent))
1655 maxindent = max(len(hangindent), len(initindent))
1650 if width <= maxindent:
1656 if width <= maxindent:
1651 # adjust for weird terminal size
1657 # adjust for weird terminal size
1652 width = max(78, maxindent + 1)
1658 width = max(78, maxindent + 1)
1653 line = line.decode(encoding.encoding, encoding.encodingmode)
1659 line = line.decode(encoding.encoding, encoding.encodingmode)
1654 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1660 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1655 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1661 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1656 wrapper = MBTextWrapper(width=width,
1662 wrapper = MBTextWrapper(width=width,
1657 initial_indent=initindent,
1663 initial_indent=initindent,
1658 subsequent_indent=hangindent)
1664 subsequent_indent=hangindent)
1659 return wrapper.fill(line).encode(encoding.encoding)
1665 return wrapper.fill(line).encode(encoding.encoding)
1660
1666
1661 def iterlines(iterator):
1667 def iterlines(iterator):
1662 for chunk in iterator:
1668 for chunk in iterator:
1663 for line in chunk.splitlines():
1669 for line in chunk.splitlines():
1664 yield line
1670 yield line
1665
1671
1666 def expandpath(path):
1672 def expandpath(path):
1667 return os.path.expanduser(os.path.expandvars(path))
1673 return os.path.expanduser(os.path.expandvars(path))
1668
1674
1669 def hgcmd():
1675 def hgcmd():
1670 """Return the command used to execute current hg
1676 """Return the command used to execute current hg
1671
1677
1672 This is different from hgexecutable() because on Windows we want
1678 This is different from hgexecutable() because on Windows we want
1673 to avoid things opening new shell windows like batch files, so we
1679 to avoid things opening new shell windows like batch files, so we
1674 get either the python call or current executable.
1680 get either the python call or current executable.
1675 """
1681 """
1676 if mainfrozen():
1682 if mainfrozen():
1677 return [sys.executable]
1683 return [sys.executable]
1678 return gethgcmd()
1684 return gethgcmd()
1679
1685
1680 def rundetached(args, condfn):
1686 def rundetached(args, condfn):
1681 """Execute the argument list in a detached process.
1687 """Execute the argument list in a detached process.
1682
1688
1683 condfn is a callable which is called repeatedly and should return
1689 condfn is a callable which is called repeatedly and should return
1684 True once the child process is known to have started successfully.
1690 True once the child process is known to have started successfully.
1685 At this point, the child process PID is returned. If the child
1691 At this point, the child process PID is returned. If the child
1686 process fails to start or finishes before condfn() evaluates to
1692 process fails to start or finishes before condfn() evaluates to
1687 True, return -1.
1693 True, return -1.
1688 """
1694 """
1689 # Windows case is easier because the child process is either
1695 # Windows case is easier because the child process is either
1690 # successfully starting and validating the condition or exiting
1696 # successfully starting and validating the condition or exiting
1691 # on failure. We just poll on its PID. On Unix, if the child
1697 # on failure. We just poll on its PID. On Unix, if the child
1692 # process fails to start, it will be left in a zombie state until
1698 # process fails to start, it will be left in a zombie state until
1693 # the parent wait on it, which we cannot do since we expect a long
1699 # the parent wait on it, which we cannot do since we expect a long
1694 # running process on success. Instead we listen for SIGCHLD telling
1700 # running process on success. Instead we listen for SIGCHLD telling
1695 # us our child process terminated.
1701 # us our child process terminated.
1696 terminated = set()
1702 terminated = set()
1697 def handler(signum, frame):
1703 def handler(signum, frame):
1698 terminated.add(os.wait())
1704 terminated.add(os.wait())
1699 prevhandler = None
1705 prevhandler = None
1700 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1706 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1701 if SIGCHLD is not None:
1707 if SIGCHLD is not None:
1702 prevhandler = signal.signal(SIGCHLD, handler)
1708 prevhandler = signal.signal(SIGCHLD, handler)
1703 try:
1709 try:
1704 pid = spawndetached(args)
1710 pid = spawndetached(args)
1705 while not condfn():
1711 while not condfn():
1706 if ((pid in terminated or not testpid(pid))
1712 if ((pid in terminated or not testpid(pid))
1707 and not condfn()):
1713 and not condfn()):
1708 return -1
1714 return -1
1709 time.sleep(0.1)
1715 time.sleep(0.1)
1710 return pid
1716 return pid
1711 finally:
1717 finally:
1712 if prevhandler is not None:
1718 if prevhandler is not None:
1713 signal.signal(signal.SIGCHLD, prevhandler)
1719 signal.signal(signal.SIGCHLD, prevhandler)
1714
1720
1715 try:
1721 try:
1716 any, all = any, all
1722 any, all = any, all
1717 except NameError:
1723 except NameError:
1718 def any(iterable):
1724 def any(iterable):
1719 for i in iterable:
1725 for i in iterable:
1720 if i:
1726 if i:
1721 return True
1727 return True
1722 return False
1728 return False
1723
1729
1724 def all(iterable):
1730 def all(iterable):
1725 for i in iterable:
1731 for i in iterable:
1726 if not i:
1732 if not i:
1727 return False
1733 return False
1728 return True
1734 return True
1729
1735
1730 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1736 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1731 """Return the result of interpolating items in the mapping into string s.
1737 """Return the result of interpolating items in the mapping into string s.
1732
1738
1733 prefix is a single character string, or a two character string with
1739 prefix is a single character string, or a two character string with
1734 a backslash as the first character if the prefix needs to be escaped in
1740 a backslash as the first character if the prefix needs to be escaped in
1735 a regular expression.
1741 a regular expression.
1736
1742
1737 fn is an optional function that will be applied to the replacement text
1743 fn is an optional function that will be applied to the replacement text
1738 just before replacement.
1744 just before replacement.
1739
1745
1740 escape_prefix is an optional flag that allows using doubled prefix for
1746 escape_prefix is an optional flag that allows using doubled prefix for
1741 its escaping.
1747 its escaping.
1742 """
1748 """
1743 fn = fn or (lambda s: s)
1749 fn = fn or (lambda s: s)
1744 patterns = '|'.join(mapping.keys())
1750 patterns = '|'.join(mapping.keys())
1745 if escape_prefix:
1751 if escape_prefix:
1746 patterns += '|' + prefix
1752 patterns += '|' + prefix
1747 if len(prefix) > 1:
1753 if len(prefix) > 1:
1748 prefix_char = prefix[1:]
1754 prefix_char = prefix[1:]
1749 else:
1755 else:
1750 prefix_char = prefix
1756 prefix_char = prefix
1751 mapping[prefix_char] = prefix_char
1757 mapping[prefix_char] = prefix_char
1752 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1758 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1753 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1759 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1754
1760
1755 def getport(port):
1761 def getport(port):
1756 """Return the port for a given network service.
1762 """Return the port for a given network service.
1757
1763
1758 If port is an integer, it's returned as is. If it's a string, it's
1764 If port is an integer, it's returned as is. If it's a string, it's
1759 looked up using socket.getservbyname(). If there's no matching
1765 looked up using socket.getservbyname(). If there's no matching
1760 service, util.Abort is raised.
1766 service, util.Abort is raised.
1761 """
1767 """
1762 try:
1768 try:
1763 return int(port)
1769 return int(port)
1764 except ValueError:
1770 except ValueError:
1765 pass
1771 pass
1766
1772
1767 try:
1773 try:
1768 return socket.getservbyname(port)
1774 return socket.getservbyname(port)
1769 except socket.error:
1775 except socket.error:
1770 raise Abort(_("no port number associated with service '%s'") % port)
1776 raise Abort(_("no port number associated with service '%s'") % port)
1771
1777
1772 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1778 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1773 '0': False, 'no': False, 'false': False, 'off': False,
1779 '0': False, 'no': False, 'false': False, 'off': False,
1774 'never': False}
1780 'never': False}
1775
1781
1776 def parsebool(s):
1782 def parsebool(s):
1777 """Parse s into a boolean.
1783 """Parse s into a boolean.
1778
1784
1779 If s is not a valid boolean, returns None.
1785 If s is not a valid boolean, returns None.
1780 """
1786 """
1781 return _booleans.get(s.lower(), None)
1787 return _booleans.get(s.lower(), None)
1782
1788
1783 _hexdig = '0123456789ABCDEFabcdef'
1789 _hexdig = '0123456789ABCDEFabcdef'
1784 _hextochr = dict((a + b, chr(int(a + b, 16)))
1790 _hextochr = dict((a + b, chr(int(a + b, 16)))
1785 for a in _hexdig for b in _hexdig)
1791 for a in _hexdig for b in _hexdig)
1786
1792
1787 def _urlunquote(s):
1793 def _urlunquote(s):
1788 """Decode HTTP/HTML % encoding.
1794 """Decode HTTP/HTML % encoding.
1789
1795
1790 >>> _urlunquote('abc%20def')
1796 >>> _urlunquote('abc%20def')
1791 'abc def'
1797 'abc def'
1792 """
1798 """
1793 res = s.split('%')
1799 res = s.split('%')
1794 # fastpath
1800 # fastpath
1795 if len(res) == 1:
1801 if len(res) == 1:
1796 return s
1802 return s
1797 s = res[0]
1803 s = res[0]
1798 for item in res[1:]:
1804 for item in res[1:]:
1799 try:
1805 try:
1800 s += _hextochr[item[:2]] + item[2:]
1806 s += _hextochr[item[:2]] + item[2:]
1801 except KeyError:
1807 except KeyError:
1802 s += '%' + item
1808 s += '%' + item
1803 except UnicodeDecodeError:
1809 except UnicodeDecodeError:
1804 s += unichr(int(item[:2], 16)) + item[2:]
1810 s += unichr(int(item[:2], 16)) + item[2:]
1805 return s
1811 return s
1806
1812
1807 class url(object):
1813 class url(object):
1808 r"""Reliable URL parser.
1814 r"""Reliable URL parser.
1809
1815
1810 This parses URLs and provides attributes for the following
1816 This parses URLs and provides attributes for the following
1811 components:
1817 components:
1812
1818
1813 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1819 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1814
1820
1815 Missing components are set to None. The only exception is
1821 Missing components are set to None. The only exception is
1816 fragment, which is set to '' if present but empty.
1822 fragment, which is set to '' if present but empty.
1817
1823
1818 If parsefragment is False, fragment is included in query. If
1824 If parsefragment is False, fragment is included in query. If
1819 parsequery is False, query is included in path. If both are
1825 parsequery is False, query is included in path. If both are
1820 False, both fragment and query are included in path.
1826 False, both fragment and query are included in path.
1821
1827
1822 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1828 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1823
1829
1824 Note that for backward compatibility reasons, bundle URLs do not
1830 Note that for backward compatibility reasons, bundle URLs do not
1825 take host names. That means 'bundle://../' has a path of '../'.
1831 take host names. That means 'bundle://../' has a path of '../'.
1826
1832
1827 Examples:
1833 Examples:
1828
1834
1829 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1835 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1830 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1836 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1831 >>> url('ssh://[::1]:2200//home/joe/repo')
1837 >>> url('ssh://[::1]:2200//home/joe/repo')
1832 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1838 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1833 >>> url('file:///home/joe/repo')
1839 >>> url('file:///home/joe/repo')
1834 <url scheme: 'file', path: '/home/joe/repo'>
1840 <url scheme: 'file', path: '/home/joe/repo'>
1835 >>> url('file:///c:/temp/foo/')
1841 >>> url('file:///c:/temp/foo/')
1836 <url scheme: 'file', path: 'c:/temp/foo/'>
1842 <url scheme: 'file', path: 'c:/temp/foo/'>
1837 >>> url('bundle:foo')
1843 >>> url('bundle:foo')
1838 <url scheme: 'bundle', path: 'foo'>
1844 <url scheme: 'bundle', path: 'foo'>
1839 >>> url('bundle://../foo')
1845 >>> url('bundle://../foo')
1840 <url scheme: 'bundle', path: '../foo'>
1846 <url scheme: 'bundle', path: '../foo'>
1841 >>> url(r'c:\foo\bar')
1847 >>> url(r'c:\foo\bar')
1842 <url path: 'c:\\foo\\bar'>
1848 <url path: 'c:\\foo\\bar'>
1843 >>> url(r'\\blah\blah\blah')
1849 >>> url(r'\\blah\blah\blah')
1844 <url path: '\\\\blah\\blah\\blah'>
1850 <url path: '\\\\blah\\blah\\blah'>
1845 >>> url(r'\\blah\blah\blah#baz')
1851 >>> url(r'\\blah\blah\blah#baz')
1846 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1852 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1847 >>> url(r'file:///C:\users\me')
1853 >>> url(r'file:///C:\users\me')
1848 <url scheme: 'file', path: 'C:\\users\\me'>
1854 <url scheme: 'file', path: 'C:\\users\\me'>
1849
1855
1850 Authentication credentials:
1856 Authentication credentials:
1851
1857
1852 >>> url('ssh://joe:xyz@x/repo')
1858 >>> url('ssh://joe:xyz@x/repo')
1853 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1859 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1854 >>> url('ssh://joe@x/repo')
1860 >>> url('ssh://joe@x/repo')
1855 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1861 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1856
1862
1857 Query strings and fragments:
1863 Query strings and fragments:
1858
1864
1859 >>> url('http://host/a?b#c')
1865 >>> url('http://host/a?b#c')
1860 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1866 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1861 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1867 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1862 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1868 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1863 """
1869 """
1864
1870
1865 _safechars = "!~*'()+"
1871 _safechars = "!~*'()+"
1866 _safepchars = "/!~*'()+:\\"
1872 _safepchars = "/!~*'()+:\\"
1867 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1873 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1868
1874
1869 def __init__(self, path, parsequery=True, parsefragment=True):
1875 def __init__(self, path, parsequery=True, parsefragment=True):
1870 # We slowly chomp away at path until we have only the path left
1876 # We slowly chomp away at path until we have only the path left
1871 self.scheme = self.user = self.passwd = self.host = None
1877 self.scheme = self.user = self.passwd = self.host = None
1872 self.port = self.path = self.query = self.fragment = None
1878 self.port = self.path = self.query = self.fragment = None
1873 self._localpath = True
1879 self._localpath = True
1874 self._hostport = ''
1880 self._hostport = ''
1875 self._origpath = path
1881 self._origpath = path
1876
1882
1877 if parsefragment and '#' in path:
1883 if parsefragment and '#' in path:
1878 path, self.fragment = path.split('#', 1)
1884 path, self.fragment = path.split('#', 1)
1879 if not path:
1885 if not path:
1880 path = None
1886 path = None
1881
1887
1882 # special case for Windows drive letters and UNC paths
1888 # special case for Windows drive letters and UNC paths
1883 if hasdriveletter(path) or path.startswith(r'\\'):
1889 if hasdriveletter(path) or path.startswith(r'\\'):
1884 self.path = path
1890 self.path = path
1885 return
1891 return
1886
1892
1887 # For compatibility reasons, we can't handle bundle paths as
1893 # For compatibility reasons, we can't handle bundle paths as
1888 # normal URLS
1894 # normal URLS
1889 if path.startswith('bundle:'):
1895 if path.startswith('bundle:'):
1890 self.scheme = 'bundle'
1896 self.scheme = 'bundle'
1891 path = path[7:]
1897 path = path[7:]
1892 if path.startswith('//'):
1898 if path.startswith('//'):
1893 path = path[2:]
1899 path = path[2:]
1894 self.path = path
1900 self.path = path
1895 return
1901 return
1896
1902
1897 if self._matchscheme(path):
1903 if self._matchscheme(path):
1898 parts = path.split(':', 1)
1904 parts = path.split(':', 1)
1899 if parts[0]:
1905 if parts[0]:
1900 self.scheme, path = parts
1906 self.scheme, path = parts
1901 self._localpath = False
1907 self._localpath = False
1902
1908
1903 if not path:
1909 if not path:
1904 path = None
1910 path = None
1905 if self._localpath:
1911 if self._localpath:
1906 self.path = ''
1912 self.path = ''
1907 return
1913 return
1908 else:
1914 else:
1909 if self._localpath:
1915 if self._localpath:
1910 self.path = path
1916 self.path = path
1911 return
1917 return
1912
1918
1913 if parsequery and '?' in path:
1919 if parsequery and '?' in path:
1914 path, self.query = path.split('?', 1)
1920 path, self.query = path.split('?', 1)
1915 if not path:
1921 if not path:
1916 path = None
1922 path = None
1917 if not self.query:
1923 if not self.query:
1918 self.query = None
1924 self.query = None
1919
1925
1920 # // is required to specify a host/authority
1926 # // is required to specify a host/authority
1921 if path and path.startswith('//'):
1927 if path and path.startswith('//'):
1922 parts = path[2:].split('/', 1)
1928 parts = path[2:].split('/', 1)
1923 if len(parts) > 1:
1929 if len(parts) > 1:
1924 self.host, path = parts
1930 self.host, path = parts
1925 else:
1931 else:
1926 self.host = parts[0]
1932 self.host = parts[0]
1927 path = None
1933 path = None
1928 if not self.host:
1934 if not self.host:
1929 self.host = None
1935 self.host = None
1930 # path of file:///d is /d
1936 # path of file:///d is /d
1931 # path of file:///d:/ is d:/, not /d:/
1937 # path of file:///d:/ is d:/, not /d:/
1932 if path and not hasdriveletter(path):
1938 if path and not hasdriveletter(path):
1933 path = '/' + path
1939 path = '/' + path
1934
1940
1935 if self.host and '@' in self.host:
1941 if self.host and '@' in self.host:
1936 self.user, self.host = self.host.rsplit('@', 1)
1942 self.user, self.host = self.host.rsplit('@', 1)
1937 if ':' in self.user:
1943 if ':' in self.user:
1938 self.user, self.passwd = self.user.split(':', 1)
1944 self.user, self.passwd = self.user.split(':', 1)
1939 if not self.host:
1945 if not self.host:
1940 self.host = None
1946 self.host = None
1941
1947
1942 # Don't split on colons in IPv6 addresses without ports
1948 # Don't split on colons in IPv6 addresses without ports
1943 if (self.host and ':' in self.host and
1949 if (self.host and ':' in self.host and
1944 not (self.host.startswith('[') and self.host.endswith(']'))):
1950 not (self.host.startswith('[') and self.host.endswith(']'))):
1945 self._hostport = self.host
1951 self._hostport = self.host
1946 self.host, self.port = self.host.rsplit(':', 1)
1952 self.host, self.port = self.host.rsplit(':', 1)
1947 if not self.host:
1953 if not self.host:
1948 self.host = None
1954 self.host = None
1949
1955
1950 if (self.host and self.scheme == 'file' and
1956 if (self.host and self.scheme == 'file' and
1951 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1957 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1952 raise Abort(_('file:// URLs can only refer to localhost'))
1958 raise Abort(_('file:// URLs can only refer to localhost'))
1953
1959
1954 self.path = path
1960 self.path = path
1955
1961
1956 # leave the query string escaped
1962 # leave the query string escaped
1957 for a in ('user', 'passwd', 'host', 'port',
1963 for a in ('user', 'passwd', 'host', 'port',
1958 'path', 'fragment'):
1964 'path', 'fragment'):
1959 v = getattr(self, a)
1965 v = getattr(self, a)
1960 if v is not None:
1966 if v is not None:
1961 setattr(self, a, _urlunquote(v))
1967 setattr(self, a, _urlunquote(v))
1962
1968
1963 def __repr__(self):
1969 def __repr__(self):
1964 attrs = []
1970 attrs = []
1965 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1971 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1966 'query', 'fragment'):
1972 'query', 'fragment'):
1967 v = getattr(self, a)
1973 v = getattr(self, a)
1968 if v is not None:
1974 if v is not None:
1969 attrs.append('%s: %r' % (a, v))
1975 attrs.append('%s: %r' % (a, v))
1970 return '<url %s>' % ', '.join(attrs)
1976 return '<url %s>' % ', '.join(attrs)
1971
1977
1972 def __str__(self):
1978 def __str__(self):
1973 r"""Join the URL's components back into a URL string.
1979 r"""Join the URL's components back into a URL string.
1974
1980
1975 Examples:
1981 Examples:
1976
1982
1977 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1983 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1978 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1984 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1979 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1985 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1980 'http://user:pw@host:80/?foo=bar&baz=42'
1986 'http://user:pw@host:80/?foo=bar&baz=42'
1981 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1987 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1982 'http://user:pw@host:80/?foo=bar%3dbaz'
1988 'http://user:pw@host:80/?foo=bar%3dbaz'
1983 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1989 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1984 'ssh://user:pw@[::1]:2200//home/joe#'
1990 'ssh://user:pw@[::1]:2200//home/joe#'
1985 >>> str(url('http://localhost:80//'))
1991 >>> str(url('http://localhost:80//'))
1986 'http://localhost:80//'
1992 'http://localhost:80//'
1987 >>> str(url('http://localhost:80/'))
1993 >>> str(url('http://localhost:80/'))
1988 'http://localhost:80/'
1994 'http://localhost:80/'
1989 >>> str(url('http://localhost:80'))
1995 >>> str(url('http://localhost:80'))
1990 'http://localhost:80/'
1996 'http://localhost:80/'
1991 >>> str(url('bundle:foo'))
1997 >>> str(url('bundle:foo'))
1992 'bundle:foo'
1998 'bundle:foo'
1993 >>> str(url('bundle://../foo'))
1999 >>> str(url('bundle://../foo'))
1994 'bundle:../foo'
2000 'bundle:../foo'
1995 >>> str(url('path'))
2001 >>> str(url('path'))
1996 'path'
2002 'path'
1997 >>> str(url('file:///tmp/foo/bar'))
2003 >>> str(url('file:///tmp/foo/bar'))
1998 'file:///tmp/foo/bar'
2004 'file:///tmp/foo/bar'
1999 >>> str(url('file:///c:/tmp/foo/bar'))
2005 >>> str(url('file:///c:/tmp/foo/bar'))
2000 'file:///c:/tmp/foo/bar'
2006 'file:///c:/tmp/foo/bar'
2001 >>> print url(r'bundle:foo\bar')
2007 >>> print url(r'bundle:foo\bar')
2002 bundle:foo\bar
2008 bundle:foo\bar
2003 >>> print url(r'file:///D:\data\hg')
2009 >>> print url(r'file:///D:\data\hg')
2004 file:///D:\data\hg
2010 file:///D:\data\hg
2005 """
2011 """
2006 if self._localpath:
2012 if self._localpath:
2007 s = self.path
2013 s = self.path
2008 if self.scheme == 'bundle':
2014 if self.scheme == 'bundle':
2009 s = 'bundle:' + s
2015 s = 'bundle:' + s
2010 if self.fragment:
2016 if self.fragment:
2011 s += '#' + self.fragment
2017 s += '#' + self.fragment
2012 return s
2018 return s
2013
2019
2014 s = self.scheme + ':'
2020 s = self.scheme + ':'
2015 if self.user or self.passwd or self.host:
2021 if self.user or self.passwd or self.host:
2016 s += '//'
2022 s += '//'
2017 elif self.scheme and (not self.path or self.path.startswith('/')
2023 elif self.scheme and (not self.path or self.path.startswith('/')
2018 or hasdriveletter(self.path)):
2024 or hasdriveletter(self.path)):
2019 s += '//'
2025 s += '//'
2020 if hasdriveletter(self.path):
2026 if hasdriveletter(self.path):
2021 s += '/'
2027 s += '/'
2022 if self.user:
2028 if self.user:
2023 s += urllib.quote(self.user, safe=self._safechars)
2029 s += urllib.quote(self.user, safe=self._safechars)
2024 if self.passwd:
2030 if self.passwd:
2025 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2031 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2026 if self.user or self.passwd:
2032 if self.user or self.passwd:
2027 s += '@'
2033 s += '@'
2028 if self.host:
2034 if self.host:
2029 if not (self.host.startswith('[') and self.host.endswith(']')):
2035 if not (self.host.startswith('[') and self.host.endswith(']')):
2030 s += urllib.quote(self.host)
2036 s += urllib.quote(self.host)
2031 else:
2037 else:
2032 s += self.host
2038 s += self.host
2033 if self.port:
2039 if self.port:
2034 s += ':' + urllib.quote(self.port)
2040 s += ':' + urllib.quote(self.port)
2035 if self.host:
2041 if self.host:
2036 s += '/'
2042 s += '/'
2037 if self.path:
2043 if self.path:
2038 # TODO: similar to the query string, we should not unescape the
2044 # TODO: similar to the query string, we should not unescape the
2039 # path when we store it, the path might contain '%2f' = '/',
2045 # path when we store it, the path might contain '%2f' = '/',
2040 # which we should *not* escape.
2046 # which we should *not* escape.
2041 s += urllib.quote(self.path, safe=self._safepchars)
2047 s += urllib.quote(self.path, safe=self._safepchars)
2042 if self.query:
2048 if self.query:
2043 # we store the query in escaped form.
2049 # we store the query in escaped form.
2044 s += '?' + self.query
2050 s += '?' + self.query
2045 if self.fragment is not None:
2051 if self.fragment is not None:
2046 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2052 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2047 return s
2053 return s
2048
2054
2049 def authinfo(self):
2055 def authinfo(self):
2050 user, passwd = self.user, self.passwd
2056 user, passwd = self.user, self.passwd
2051 try:
2057 try:
2052 self.user, self.passwd = None, None
2058 self.user, self.passwd = None, None
2053 s = str(self)
2059 s = str(self)
2054 finally:
2060 finally:
2055 self.user, self.passwd = user, passwd
2061 self.user, self.passwd = user, passwd
2056 if not self.user:
2062 if not self.user:
2057 return (s, None)
2063 return (s, None)
2058 # authinfo[1] is passed to urllib2 password manager, and its
2064 # authinfo[1] is passed to urllib2 password manager, and its
2059 # URIs must not contain credentials. The host is passed in the
2065 # URIs must not contain credentials. The host is passed in the
2060 # URIs list because Python < 2.4.3 uses only that to search for
2066 # URIs list because Python < 2.4.3 uses only that to search for
2061 # a password.
2067 # a password.
2062 return (s, (None, (s, self.host),
2068 return (s, (None, (s, self.host),
2063 self.user, self.passwd or ''))
2069 self.user, self.passwd or ''))
2064
2070
2065 def isabs(self):
2071 def isabs(self):
2066 if self.scheme and self.scheme != 'file':
2072 if self.scheme and self.scheme != 'file':
2067 return True # remote URL
2073 return True # remote URL
2068 if hasdriveletter(self.path):
2074 if hasdriveletter(self.path):
2069 return True # absolute for our purposes - can't be joined()
2075 return True # absolute for our purposes - can't be joined()
2070 if self.path.startswith(r'\\'):
2076 if self.path.startswith(r'\\'):
2071 return True # Windows UNC path
2077 return True # Windows UNC path
2072 if self.path.startswith('/'):
2078 if self.path.startswith('/'):
2073 return True # POSIX-style
2079 return True # POSIX-style
2074 return False
2080 return False
2075
2081
2076 def localpath(self):
2082 def localpath(self):
2077 if self.scheme == 'file' or self.scheme == 'bundle':
2083 if self.scheme == 'file' or self.scheme == 'bundle':
2078 path = self.path or '/'
2084 path = self.path or '/'
2079 # For Windows, we need to promote hosts containing drive
2085 # For Windows, we need to promote hosts containing drive
2080 # letters to paths with drive letters.
2086 # letters to paths with drive letters.
2081 if hasdriveletter(self._hostport):
2087 if hasdriveletter(self._hostport):
2082 path = self._hostport + '/' + self.path
2088 path = self._hostport + '/' + self.path
2083 elif (self.host is not None and self.path
2089 elif (self.host is not None and self.path
2084 and not hasdriveletter(path)):
2090 and not hasdriveletter(path)):
2085 path = '/' + path
2091 path = '/' + path
2086 return path
2092 return path
2087 return self._origpath
2093 return self._origpath
2088
2094
2089 def islocal(self):
2095 def islocal(self):
2090 '''whether localpath will return something that posixfile can open'''
2096 '''whether localpath will return something that posixfile can open'''
2091 return (not self.scheme or self.scheme == 'file'
2097 return (not self.scheme or self.scheme == 'file'
2092 or self.scheme == 'bundle')
2098 or self.scheme == 'bundle')
2093
2099
2094 def hasscheme(path):
2100 def hasscheme(path):
2095 return bool(url(path).scheme)
2101 return bool(url(path).scheme)
2096
2102
2097 def hasdriveletter(path):
2103 def hasdriveletter(path):
2098 return path and path[1:2] == ':' and path[0:1].isalpha()
2104 return path and path[1:2] == ':' and path[0:1].isalpha()
2099
2105
2100 def urllocalpath(path):
2106 def urllocalpath(path):
2101 return url(path, parsequery=False, parsefragment=False).localpath()
2107 return url(path, parsequery=False, parsefragment=False).localpath()
2102
2108
2103 def hidepassword(u):
2109 def hidepassword(u):
2104 '''hide user credential in a url string'''
2110 '''hide user credential in a url string'''
2105 u = url(u)
2111 u = url(u)
2106 if u.passwd:
2112 if u.passwd:
2107 u.passwd = '***'
2113 u.passwd = '***'
2108 return str(u)
2114 return str(u)
2109
2115
2110 def removeauth(u):
2116 def removeauth(u):
2111 '''remove all authentication information from a url string'''
2117 '''remove all authentication information from a url string'''
2112 u = url(u)
2118 u = url(u)
2113 u.user = u.passwd = None
2119 u.user = u.passwd = None
2114 return str(u)
2120 return str(u)
2115
2121
2116 def isatty(fd):
2122 def isatty(fd):
2117 try:
2123 try:
2118 return fd.isatty()
2124 return fd.isatty()
2119 except AttributeError:
2125 except AttributeError:
2120 return False
2126 return False
2121
2127
2122 timecount = unitcountfn(
2128 timecount = unitcountfn(
2123 (1, 1e3, _('%.0f s')),
2129 (1, 1e3, _('%.0f s')),
2124 (100, 1, _('%.1f s')),
2130 (100, 1, _('%.1f s')),
2125 (10, 1, _('%.2f s')),
2131 (10, 1, _('%.2f s')),
2126 (1, 1, _('%.3f s')),
2132 (1, 1, _('%.3f s')),
2127 (100, 0.001, _('%.1f ms')),
2133 (100, 0.001, _('%.1f ms')),
2128 (10, 0.001, _('%.2f ms')),
2134 (10, 0.001, _('%.2f ms')),
2129 (1, 0.001, _('%.3f ms')),
2135 (1, 0.001, _('%.3f ms')),
2130 (100, 0.000001, _('%.1f us')),
2136 (100, 0.000001, _('%.1f us')),
2131 (10, 0.000001, _('%.2f us')),
2137 (10, 0.000001, _('%.2f us')),
2132 (1, 0.000001, _('%.3f us')),
2138 (1, 0.000001, _('%.3f us')),
2133 (100, 0.000000001, _('%.1f ns')),
2139 (100, 0.000000001, _('%.1f ns')),
2134 (10, 0.000000001, _('%.2f ns')),
2140 (10, 0.000000001, _('%.2f ns')),
2135 (1, 0.000000001, _('%.3f ns')),
2141 (1, 0.000000001, _('%.3f ns')),
2136 )
2142 )
2137
2143
2138 _timenesting = [0]
2144 _timenesting = [0]
2139
2145
2140 def timed(func):
2146 def timed(func):
2141 '''Report the execution time of a function call to stderr.
2147 '''Report the execution time of a function call to stderr.
2142
2148
2143 During development, use as a decorator when you need to measure
2149 During development, use as a decorator when you need to measure
2144 the cost of a function, e.g. as follows:
2150 the cost of a function, e.g. as follows:
2145
2151
2146 @util.timed
2152 @util.timed
2147 def foo(a, b, c):
2153 def foo(a, b, c):
2148 pass
2154 pass
2149 '''
2155 '''
2150
2156
2151 def wrapper(*args, **kwargs):
2157 def wrapper(*args, **kwargs):
2152 start = time.time()
2158 start = time.time()
2153 indent = 2
2159 indent = 2
2154 _timenesting[0] += indent
2160 _timenesting[0] += indent
2155 try:
2161 try:
2156 return func(*args, **kwargs)
2162 return func(*args, **kwargs)
2157 finally:
2163 finally:
2158 elapsed = time.time() - start
2164 elapsed = time.time() - start
2159 _timenesting[0] -= indent
2165 _timenesting[0] -= indent
2160 sys.stderr.write('%s%s: %s\n' %
2166 sys.stderr.write('%s%s: %s\n' %
2161 (' ' * _timenesting[0], func.__name__,
2167 (' ' * _timenesting[0], func.__name__,
2162 timecount(elapsed)))
2168 timecount(elapsed)))
2163 return wrapper
2169 return wrapper
2164
2170
2165 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2171 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2166 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2172 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2167
2173
2168 def sizetoint(s):
2174 def sizetoint(s):
2169 '''Convert a space specifier to a byte count.
2175 '''Convert a space specifier to a byte count.
2170
2176
2171 >>> sizetoint('30')
2177 >>> sizetoint('30')
2172 30
2178 30
2173 >>> sizetoint('2.2kb')
2179 >>> sizetoint('2.2kb')
2174 2252
2180 2252
2175 >>> sizetoint('6M')
2181 >>> sizetoint('6M')
2176 6291456
2182 6291456
2177 '''
2183 '''
2178 t = s.strip().lower()
2184 t = s.strip().lower()
2179 try:
2185 try:
2180 for k, u in _sizeunits:
2186 for k, u in _sizeunits:
2181 if t.endswith(k):
2187 if t.endswith(k):
2182 return int(float(t[:-len(k)]) * u)
2188 return int(float(t[:-len(k)]) * u)
2183 return int(t)
2189 return int(t)
2184 except ValueError:
2190 except ValueError:
2185 raise error.ParseError(_("couldn't parse size: %s") % s)
2191 raise error.ParseError(_("couldn't parse size: %s") % s)
2186
2192
2187 class hooks(object):
2193 class hooks(object):
2188 '''A collection of hook functions that can be used to extend a
2194 '''A collection of hook functions that can be used to extend a
2189 function's behaviour. Hooks are called in lexicographic order,
2195 function's behaviour. Hooks are called in lexicographic order,
2190 based on the names of their sources.'''
2196 based on the names of their sources.'''
2191
2197
2192 def __init__(self):
2198 def __init__(self):
2193 self._hooks = []
2199 self._hooks = []
2194
2200
2195 def add(self, source, hook):
2201 def add(self, source, hook):
2196 self._hooks.append((source, hook))
2202 self._hooks.append((source, hook))
2197
2203
2198 def __call__(self, *args):
2204 def __call__(self, *args):
2199 self._hooks.sort(key=lambda x: x[0])
2205 self._hooks.sort(key=lambda x: x[0])
2200 results = []
2206 results = []
2201 for source, hook in self._hooks:
2207 for source, hook in self._hooks:
2202 results.append(hook(*args))
2208 results.append(hook(*args))
2203 return results
2209 return results
2204
2210
2205 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2211 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2206 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2212 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2207 Skips the 'skip' last entries. By default it will flush stdout first.
2213 Skips the 'skip' last entries. By default it will flush stdout first.
2208 It can be used everywhere and do intentionally not require an ui object.
2214 It can be used everywhere and do intentionally not require an ui object.
2209 Not be used in production code but very convenient while developing.
2215 Not be used in production code but very convenient while developing.
2210 '''
2216 '''
2211 if otherf:
2217 if otherf:
2212 otherf.flush()
2218 otherf.flush()
2213 f.write('%s at:\n' % msg)
2219 f.write('%s at:\n' % msg)
2214 entries = [('%s:%s' % (fn, ln), func)
2220 entries = [('%s:%s' % (fn, ln), func)
2215 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2221 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2216 if entries:
2222 if entries:
2217 fnmax = max(len(entry[0]) for entry in entries)
2223 fnmax = max(len(entry[0]) for entry in entries)
2218 for fnln, func in entries:
2224 for fnln, func in entries:
2219 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2225 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2220 f.flush()
2226 f.flush()
2221
2227
2222 # convenient shortcut
2228 # convenient shortcut
2223 dst = debugstacktrace
2229 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now