##// END OF EJS Templates
util: clean up trailing whitespace
Augie Fackler -
r12086:dba2db7a default
parent child Browse files
Show More
@@ -1,1437 +1,1437 b''
1 # util.py - Mercurial utility functions and platform specfic implementations
1 # util.py - Mercurial utility functions and platform specfic implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specfic implementations.
10 """Mercurial utility functions and platform specfic implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from i18n import _
16 from i18n import _
17 import error, osutil, encoding
17 import error, osutil, encoding
18 import errno, re, shutil, sys, tempfile, traceback
18 import errno, re, shutil, sys, tempfile, traceback
19 import os, stat, time, calendar, textwrap, unicodedata, signal
19 import os, stat, time, calendar, textwrap, unicodedata, signal
20 import imp, socket
20 import imp, socket
21
21
22 # Python compatibility
22 # Python compatibility
23
23
24 def sha1(s):
24 def sha1(s):
25 return _fastsha1(s)
25 return _fastsha1(s)
26
26
27 def _fastsha1(s):
27 def _fastsha1(s):
28 # This function will import sha1 from hashlib or sha (whichever is
28 # This function will import sha1 from hashlib or sha (whichever is
29 # available) and overwrite itself with it on the first call.
29 # available) and overwrite itself with it on the first call.
30 # Subsequent calls will go directly to the imported function.
30 # Subsequent calls will go directly to the imported function.
31 if sys.version_info >= (2, 5):
31 if sys.version_info >= (2, 5):
32 from hashlib import sha1 as _sha1
32 from hashlib import sha1 as _sha1
33 else:
33 else:
34 from sha import sha as _sha1
34 from sha import sha as _sha1
35 global _fastsha1, sha1
35 global _fastsha1, sha1
36 _fastsha1 = sha1 = _sha1
36 _fastsha1 = sha1 = _sha1
37 return _sha1(s)
37 return _sha1(s)
38
38
39 import __builtin__
39 import __builtin__
40
40
41 if sys.version_info[0] < 3:
41 if sys.version_info[0] < 3:
42 def fakebuffer(sliceable, offset=0):
42 def fakebuffer(sliceable, offset=0):
43 return sliceable[offset:]
43 return sliceable[offset:]
44 else:
44 else:
45 def fakebuffer(sliceable, offset=0):
45 def fakebuffer(sliceable, offset=0):
46 return memoryview(sliceable)[offset:]
46 return memoryview(sliceable)[offset:]
47 try:
47 try:
48 buffer
48 buffer
49 except NameError:
49 except NameError:
50 __builtin__.buffer = fakebuffer
50 __builtin__.buffer = fakebuffer
51
51
52 import subprocess
52 import subprocess
53 closefds = os.name == 'posix'
53 closefds = os.name == 'posix'
54
54
55 def popen2(cmd, env=None, newlines=False):
55 def popen2(cmd, env=None, newlines=False):
56 # Setting bufsize to -1 lets the system decide the buffer size.
56 # Setting bufsize to -1 lets the system decide the buffer size.
57 # The default for bufsize is 0, meaning unbuffered. This leads to
57 # The default for bufsize is 0, meaning unbuffered. This leads to
58 # poor performance on Mac OS X: http://bugs.python.org/issue4194
58 # poor performance on Mac OS X: http://bugs.python.org/issue4194
59 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
59 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
60 close_fds=closefds,
60 close_fds=closefds,
61 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
61 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
62 universal_newlines=newlines,
62 universal_newlines=newlines,
63 env=env)
63 env=env)
64 return p.stdin, p.stdout
64 return p.stdin, p.stdout
65
65
66 def popen3(cmd, env=None, newlines=False):
66 def popen3(cmd, env=None, newlines=False):
67 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
67 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
68 close_fds=closefds,
68 close_fds=closefds,
69 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
69 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
70 stderr=subprocess.PIPE,
70 stderr=subprocess.PIPE,
71 universal_newlines=newlines,
71 universal_newlines=newlines,
72 env=env)
72 env=env)
73 return p.stdin, p.stdout, p.stderr
73 return p.stdin, p.stdout, p.stderr
74
74
75 def version():
75 def version():
76 """Return version information if available."""
76 """Return version information if available."""
77 try:
77 try:
78 import __version__
78 import __version__
79 return __version__.version
79 return __version__.version
80 except ImportError:
80 except ImportError:
81 return 'unknown'
81 return 'unknown'
82
82
83 # used by parsedate
83 # used by parsedate
84 defaultdateformats = (
84 defaultdateformats = (
85 '%Y-%m-%d %H:%M:%S',
85 '%Y-%m-%d %H:%M:%S',
86 '%Y-%m-%d %I:%M:%S%p',
86 '%Y-%m-%d %I:%M:%S%p',
87 '%Y-%m-%d %H:%M',
87 '%Y-%m-%d %H:%M',
88 '%Y-%m-%d %I:%M%p',
88 '%Y-%m-%d %I:%M%p',
89 '%Y-%m-%d',
89 '%Y-%m-%d',
90 '%m-%d',
90 '%m-%d',
91 '%m/%d',
91 '%m/%d',
92 '%m/%d/%y',
92 '%m/%d/%y',
93 '%m/%d/%Y',
93 '%m/%d/%Y',
94 '%a %b %d %H:%M:%S %Y',
94 '%a %b %d %H:%M:%S %Y',
95 '%a %b %d %I:%M:%S%p %Y',
95 '%a %b %d %I:%M:%S%p %Y',
96 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
96 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
97 '%b %d %H:%M:%S %Y',
97 '%b %d %H:%M:%S %Y',
98 '%b %d %I:%M:%S%p %Y',
98 '%b %d %I:%M:%S%p %Y',
99 '%b %d %H:%M:%S',
99 '%b %d %H:%M:%S',
100 '%b %d %I:%M:%S%p',
100 '%b %d %I:%M:%S%p',
101 '%b %d %H:%M',
101 '%b %d %H:%M',
102 '%b %d %I:%M%p',
102 '%b %d %I:%M%p',
103 '%b %d %Y',
103 '%b %d %Y',
104 '%b %d',
104 '%b %d',
105 '%H:%M:%S',
105 '%H:%M:%S',
106 '%I:%M:%S%p',
106 '%I:%M:%S%p',
107 '%H:%M',
107 '%H:%M',
108 '%I:%M%p',
108 '%I:%M%p',
109 )
109 )
110
110
111 extendeddateformats = defaultdateformats + (
111 extendeddateformats = defaultdateformats + (
112 "%Y",
112 "%Y",
113 "%Y-%m",
113 "%Y-%m",
114 "%b",
114 "%b",
115 "%b %Y",
115 "%b %Y",
116 )
116 )
117
117
118 def cachefunc(func):
118 def cachefunc(func):
119 '''cache the result of function calls'''
119 '''cache the result of function calls'''
120 # XXX doesn't handle keywords args
120 # XXX doesn't handle keywords args
121 cache = {}
121 cache = {}
122 if func.func_code.co_argcount == 1:
122 if func.func_code.co_argcount == 1:
123 # we gain a small amount of time because
123 # we gain a small amount of time because
124 # we don't need to pack/unpack the list
124 # we don't need to pack/unpack the list
125 def f(arg):
125 def f(arg):
126 if arg not in cache:
126 if arg not in cache:
127 cache[arg] = func(arg)
127 cache[arg] = func(arg)
128 return cache[arg]
128 return cache[arg]
129 else:
129 else:
130 def f(*args):
130 def f(*args):
131 if args not in cache:
131 if args not in cache:
132 cache[args] = func(*args)
132 cache[args] = func(*args)
133 return cache[args]
133 return cache[args]
134
134
135 return f
135 return f
136
136
137 def lrucachefunc(func):
137 def lrucachefunc(func):
138 '''cache most recent results of function calls'''
138 '''cache most recent results of function calls'''
139 cache = {}
139 cache = {}
140 order = []
140 order = []
141 if func.func_code.co_argcount == 1:
141 if func.func_code.co_argcount == 1:
142 def f(arg):
142 def f(arg):
143 if arg not in cache:
143 if arg not in cache:
144 if len(cache) > 20:
144 if len(cache) > 20:
145 del cache[order.pop(0)]
145 del cache[order.pop(0)]
146 cache[arg] = func(arg)
146 cache[arg] = func(arg)
147 else:
147 else:
148 order.remove(arg)
148 order.remove(arg)
149 order.append(arg)
149 order.append(arg)
150 return cache[arg]
150 return cache[arg]
151 else:
151 else:
152 def f(*args):
152 def f(*args):
153 if args not in cache:
153 if args not in cache:
154 if len(cache) > 20:
154 if len(cache) > 20:
155 del cache[order.pop(0)]
155 del cache[order.pop(0)]
156 cache[args] = func(*args)
156 cache[args] = func(*args)
157 else:
157 else:
158 order.remove(args)
158 order.remove(args)
159 order.append(args)
159 order.append(args)
160 return cache[args]
160 return cache[args]
161
161
162 return f
162 return f
163
163
164 class propertycache(object):
164 class propertycache(object):
165 def __init__(self, func):
165 def __init__(self, func):
166 self.func = func
166 self.func = func
167 self.name = func.__name__
167 self.name = func.__name__
168 def __get__(self, obj, type=None):
168 def __get__(self, obj, type=None):
169 result = self.func(obj)
169 result = self.func(obj)
170 setattr(obj, self.name, result)
170 setattr(obj, self.name, result)
171 return result
171 return result
172
172
173 def pipefilter(s, cmd):
173 def pipefilter(s, cmd):
174 '''filter string S through command CMD, returning its output'''
174 '''filter string S through command CMD, returning its output'''
175 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
175 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
176 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
176 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
177 pout, perr = p.communicate(s)
177 pout, perr = p.communicate(s)
178 return pout
178 return pout
179
179
180 def tempfilter(s, cmd):
180 def tempfilter(s, cmd):
181 '''filter string S through a pair of temporary files with CMD.
181 '''filter string S through a pair of temporary files with CMD.
182 CMD is used as a template to create the real command to be run,
182 CMD is used as a template to create the real command to be run,
183 with the strings INFILE and OUTFILE replaced by the real names of
183 with the strings INFILE and OUTFILE replaced by the real names of
184 the temporary files generated.'''
184 the temporary files generated.'''
185 inname, outname = None, None
185 inname, outname = None, None
186 try:
186 try:
187 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
187 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
188 fp = os.fdopen(infd, 'wb')
188 fp = os.fdopen(infd, 'wb')
189 fp.write(s)
189 fp.write(s)
190 fp.close()
190 fp.close()
191 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
191 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
192 os.close(outfd)
192 os.close(outfd)
193 cmd = cmd.replace('INFILE', inname)
193 cmd = cmd.replace('INFILE', inname)
194 cmd = cmd.replace('OUTFILE', outname)
194 cmd = cmd.replace('OUTFILE', outname)
195 code = os.system(cmd)
195 code = os.system(cmd)
196 if sys.platform == 'OpenVMS' and code & 1:
196 if sys.platform == 'OpenVMS' and code & 1:
197 code = 0
197 code = 0
198 if code:
198 if code:
199 raise Abort(_("command '%s' failed: %s") %
199 raise Abort(_("command '%s' failed: %s") %
200 (cmd, explain_exit(code)))
200 (cmd, explain_exit(code)))
201 return open(outname, 'rb').read()
201 return open(outname, 'rb').read()
202 finally:
202 finally:
203 try:
203 try:
204 if inname:
204 if inname:
205 os.unlink(inname)
205 os.unlink(inname)
206 except:
206 except:
207 pass
207 pass
208 try:
208 try:
209 if outname:
209 if outname:
210 os.unlink(outname)
210 os.unlink(outname)
211 except:
211 except:
212 pass
212 pass
213
213
214 filtertable = {
214 filtertable = {
215 'tempfile:': tempfilter,
215 'tempfile:': tempfilter,
216 'pipe:': pipefilter,
216 'pipe:': pipefilter,
217 }
217 }
218
218
219 def filter(s, cmd):
219 def filter(s, cmd):
220 "filter a string through a command that transforms its input to its output"
220 "filter a string through a command that transforms its input to its output"
221 for name, fn in filtertable.iteritems():
221 for name, fn in filtertable.iteritems():
222 if cmd.startswith(name):
222 if cmd.startswith(name):
223 return fn(s, cmd[len(name):].lstrip())
223 return fn(s, cmd[len(name):].lstrip())
224 return pipefilter(s, cmd)
224 return pipefilter(s, cmd)
225
225
226 def binary(s):
226 def binary(s):
227 """return true if a string is binary data"""
227 """return true if a string is binary data"""
228 return bool(s and '\0' in s)
228 return bool(s and '\0' in s)
229
229
230 def increasingchunks(source, min=1024, max=65536):
230 def increasingchunks(source, min=1024, max=65536):
231 '''return no less than min bytes per chunk while data remains,
231 '''return no less than min bytes per chunk while data remains,
232 doubling min after each chunk until it reaches max'''
232 doubling min after each chunk until it reaches max'''
233 def log2(x):
233 def log2(x):
234 if not x:
234 if not x:
235 return 0
235 return 0
236 i = 0
236 i = 0
237 while x:
237 while x:
238 x >>= 1
238 x >>= 1
239 i += 1
239 i += 1
240 return i - 1
240 return i - 1
241
241
242 buf = []
242 buf = []
243 blen = 0
243 blen = 0
244 for chunk in source:
244 for chunk in source:
245 buf.append(chunk)
245 buf.append(chunk)
246 blen += len(chunk)
246 blen += len(chunk)
247 if blen >= min:
247 if blen >= min:
248 if min < max:
248 if min < max:
249 min = min << 1
249 min = min << 1
250 nmin = 1 << log2(blen)
250 nmin = 1 << log2(blen)
251 if nmin > min:
251 if nmin > min:
252 min = nmin
252 min = nmin
253 if min > max:
253 if min > max:
254 min = max
254 min = max
255 yield ''.join(buf)
255 yield ''.join(buf)
256 blen = 0
256 blen = 0
257 buf = []
257 buf = []
258 if buf:
258 if buf:
259 yield ''.join(buf)
259 yield ''.join(buf)
260
260
261 Abort = error.Abort
261 Abort = error.Abort
262
262
263 def always(fn):
263 def always(fn):
264 return True
264 return True
265
265
266 def never(fn):
266 def never(fn):
267 return False
267 return False
268
268
269 def pathto(root, n1, n2):
269 def pathto(root, n1, n2):
270 '''return the relative path from one place to another.
270 '''return the relative path from one place to another.
271 root should use os.sep to separate directories
271 root should use os.sep to separate directories
272 n1 should use os.sep to separate directories
272 n1 should use os.sep to separate directories
273 n2 should use "/" to separate directories
273 n2 should use "/" to separate directories
274 returns an os.sep-separated path.
274 returns an os.sep-separated path.
275
275
276 If n1 is a relative path, it's assumed it's
276 If n1 is a relative path, it's assumed it's
277 relative to root.
277 relative to root.
278 n2 should always be relative to root.
278 n2 should always be relative to root.
279 '''
279 '''
280 if not n1:
280 if not n1:
281 return localpath(n2)
281 return localpath(n2)
282 if os.path.isabs(n1):
282 if os.path.isabs(n1):
283 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
283 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
284 return os.path.join(root, localpath(n2))
284 return os.path.join(root, localpath(n2))
285 n2 = '/'.join((pconvert(root), n2))
285 n2 = '/'.join((pconvert(root), n2))
286 a, b = splitpath(n1), n2.split('/')
286 a, b = splitpath(n1), n2.split('/')
287 a.reverse()
287 a.reverse()
288 b.reverse()
288 b.reverse()
289 while a and b and a[-1] == b[-1]:
289 while a and b and a[-1] == b[-1]:
290 a.pop()
290 a.pop()
291 b.pop()
291 b.pop()
292 b.reverse()
292 b.reverse()
293 return os.sep.join((['..'] * len(a)) + b) or '.'
293 return os.sep.join((['..'] * len(a)) + b) or '.'
294
294
295 def canonpath(root, cwd, myname, auditor=None):
295 def canonpath(root, cwd, myname, auditor=None):
296 """return the canonical path of myname, given cwd and root"""
296 """return the canonical path of myname, given cwd and root"""
297 if endswithsep(root):
297 if endswithsep(root):
298 rootsep = root
298 rootsep = root
299 else:
299 else:
300 rootsep = root + os.sep
300 rootsep = root + os.sep
301 name = myname
301 name = myname
302 if not os.path.isabs(name):
302 if not os.path.isabs(name):
303 name = os.path.join(root, cwd, name)
303 name = os.path.join(root, cwd, name)
304 name = os.path.normpath(name)
304 name = os.path.normpath(name)
305 if auditor is None:
305 if auditor is None:
306 auditor = path_auditor(root)
306 auditor = path_auditor(root)
307 if name != rootsep and name.startswith(rootsep):
307 if name != rootsep and name.startswith(rootsep):
308 name = name[len(rootsep):]
308 name = name[len(rootsep):]
309 auditor(name)
309 auditor(name)
310 return pconvert(name)
310 return pconvert(name)
311 elif name == root:
311 elif name == root:
312 return ''
312 return ''
313 else:
313 else:
314 # Determine whether `name' is in the hierarchy at or beneath `root',
314 # Determine whether `name' is in the hierarchy at or beneath `root',
315 # by iterating name=dirname(name) until that causes no change (can't
315 # by iterating name=dirname(name) until that causes no change (can't
316 # check name == '/', because that doesn't work on windows). For each
316 # check name == '/', because that doesn't work on windows). For each
317 # `name', compare dev/inode numbers. If they match, the list `rel'
317 # `name', compare dev/inode numbers. If they match, the list `rel'
318 # holds the reversed list of components making up the relative file
318 # holds the reversed list of components making up the relative file
319 # name we want.
319 # name we want.
320 root_st = os.stat(root)
320 root_st = os.stat(root)
321 rel = []
321 rel = []
322 while True:
322 while True:
323 try:
323 try:
324 name_st = os.stat(name)
324 name_st = os.stat(name)
325 except OSError:
325 except OSError:
326 break
326 break
327 if samestat(name_st, root_st):
327 if samestat(name_st, root_st):
328 if not rel:
328 if not rel:
329 # name was actually the same as root (maybe a symlink)
329 # name was actually the same as root (maybe a symlink)
330 return ''
330 return ''
331 rel.reverse()
331 rel.reverse()
332 name = os.path.join(*rel)
332 name = os.path.join(*rel)
333 auditor(name)
333 auditor(name)
334 return pconvert(name)
334 return pconvert(name)
335 dirname, basename = os.path.split(name)
335 dirname, basename = os.path.split(name)
336 rel.append(basename)
336 rel.append(basename)
337 if dirname == name:
337 if dirname == name:
338 break
338 break
339 name = dirname
339 name = dirname
340
340
341 raise Abort('%s not under root' % myname)
341 raise Abort('%s not under root' % myname)
342
342
343 _hgexecutable = None
343 _hgexecutable = None
344
344
345 def main_is_frozen():
345 def main_is_frozen():
346 """return True if we are a frozen executable.
346 """return True if we are a frozen executable.
347
347
348 The code supports py2exe (most common, Windows only) and tools/freeze
348 The code supports py2exe (most common, Windows only) and tools/freeze
349 (portable, not much used).
349 (portable, not much used).
350 """
350 """
351 return (hasattr(sys, "frozen") or # new py2exe
351 return (hasattr(sys, "frozen") or # new py2exe
352 hasattr(sys, "importers") or # old py2exe
352 hasattr(sys, "importers") or # old py2exe
353 imp.is_frozen("__main__")) # tools/freeze
353 imp.is_frozen("__main__")) # tools/freeze
354
354
355 def hgexecutable():
355 def hgexecutable():
356 """return location of the 'hg' executable.
356 """return location of the 'hg' executable.
357
357
358 Defaults to $HG or 'hg' in the search path.
358 Defaults to $HG or 'hg' in the search path.
359 """
359 """
360 if _hgexecutable is None:
360 if _hgexecutable is None:
361 hg = os.environ.get('HG')
361 hg = os.environ.get('HG')
362 if hg:
362 if hg:
363 set_hgexecutable(hg)
363 set_hgexecutable(hg)
364 elif main_is_frozen():
364 elif main_is_frozen():
365 set_hgexecutable(sys.executable)
365 set_hgexecutable(sys.executable)
366 else:
366 else:
367 exe = find_exe('hg') or os.path.basename(sys.argv[0])
367 exe = find_exe('hg') or os.path.basename(sys.argv[0])
368 set_hgexecutable(exe)
368 set_hgexecutable(exe)
369 return _hgexecutable
369 return _hgexecutable
370
370
371 def set_hgexecutable(path):
371 def set_hgexecutable(path):
372 """set location of the 'hg' executable"""
372 """set location of the 'hg' executable"""
373 global _hgexecutable
373 global _hgexecutable
374 _hgexecutable = path
374 _hgexecutable = path
375
375
376 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
376 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
377 '''enhanced shell command execution.
377 '''enhanced shell command execution.
378 run with environment maybe modified, maybe in different dir.
378 run with environment maybe modified, maybe in different dir.
379
379
380 if command fails and onerr is None, return status. if ui object,
380 if command fails and onerr is None, return status. if ui object,
381 print error message and return status, else raise onerr object as
381 print error message and return status, else raise onerr object as
382 exception.
382 exception.
383
383
384 if out is specified, it is assumed to be a file-like object that has a
384 if out is specified, it is assumed to be a file-like object that has a
385 write() method. stdout and stderr will be redirected to out.'''
385 write() method. stdout and stderr will be redirected to out.'''
386 def py2shell(val):
386 def py2shell(val):
387 'convert python object into string that is useful to shell'
387 'convert python object into string that is useful to shell'
388 if val is None or val is False:
388 if val is None or val is False:
389 return '0'
389 return '0'
390 if val is True:
390 if val is True:
391 return '1'
391 return '1'
392 return str(val)
392 return str(val)
393 origcmd = cmd
393 origcmd = cmd
394 if os.name == 'nt':
394 if os.name == 'nt':
395 cmd = '"%s"' % cmd
395 cmd = '"%s"' % cmd
396 env = dict(os.environ)
396 env = dict(os.environ)
397 env.update((k, py2shell(v)) for k, v in environ.iteritems())
397 env.update((k, py2shell(v)) for k, v in environ.iteritems())
398 env['HG'] = hgexecutable()
398 env['HG'] = hgexecutable()
399 if out is None:
399 if out is None:
400 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
400 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
401 env=env, cwd=cwd)
401 env=env, cwd=cwd)
402 else:
402 else:
403 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
403 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
404 env=env, cwd=cwd, stdout=subprocess.PIPE,
404 env=env, cwd=cwd, stdout=subprocess.PIPE,
405 stderr=subprocess.STDOUT)
405 stderr=subprocess.STDOUT)
406 for line in proc.stdout:
406 for line in proc.stdout:
407 out.write(line)
407 out.write(line)
408 proc.wait()
408 proc.wait()
409 rc = proc.returncode
409 rc = proc.returncode
410 if sys.platform == 'OpenVMS' and rc & 1:
410 if sys.platform == 'OpenVMS' and rc & 1:
411 rc = 0
411 rc = 0
412 if rc and onerr:
412 if rc and onerr:
413 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
413 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
414 explain_exit(rc)[0])
414 explain_exit(rc)[0])
415 if errprefix:
415 if errprefix:
416 errmsg = '%s: %s' % (errprefix, errmsg)
416 errmsg = '%s: %s' % (errprefix, errmsg)
417 try:
417 try:
418 onerr.warn(errmsg + '\n')
418 onerr.warn(errmsg + '\n')
419 except AttributeError:
419 except AttributeError:
420 raise onerr(errmsg)
420 raise onerr(errmsg)
421 return rc
421 return rc
422
422
423 def checksignature(func):
423 def checksignature(func):
424 '''wrap a function with code to check for calling errors'''
424 '''wrap a function with code to check for calling errors'''
425 def check(*args, **kwargs):
425 def check(*args, **kwargs):
426 try:
426 try:
427 return func(*args, **kwargs)
427 return func(*args, **kwargs)
428 except TypeError:
428 except TypeError:
429 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
429 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
430 raise error.SignatureError
430 raise error.SignatureError
431 raise
431 raise
432
432
433 return check
433 return check
434
434
435 def unlink(f):
435 def unlink(f):
436 """unlink and remove the directory if it is empty"""
436 """unlink and remove the directory if it is empty"""
437 os.unlink(f)
437 os.unlink(f)
438 # try removing directories that might now be empty
438 # try removing directories that might now be empty
439 try:
439 try:
440 os.removedirs(os.path.dirname(f))
440 os.removedirs(os.path.dirname(f))
441 except OSError:
441 except OSError:
442 pass
442 pass
443
443
444 def copyfile(src, dest):
444 def copyfile(src, dest):
445 "copy a file, preserving mode and atime/mtime"
445 "copy a file, preserving mode and atime/mtime"
446 if os.path.islink(src):
446 if os.path.islink(src):
447 try:
447 try:
448 os.unlink(dest)
448 os.unlink(dest)
449 except:
449 except:
450 pass
450 pass
451 os.symlink(os.readlink(src), dest)
451 os.symlink(os.readlink(src), dest)
452 else:
452 else:
453 try:
453 try:
454 shutil.copyfile(src, dest)
454 shutil.copyfile(src, dest)
455 shutil.copystat(src, dest)
455 shutil.copystat(src, dest)
456 except shutil.Error, inst:
456 except shutil.Error, inst:
457 raise Abort(str(inst))
457 raise Abort(str(inst))
458
458
459 def copyfiles(src, dst, hardlink=None):
459 def copyfiles(src, dst, hardlink=None):
460 """Copy a directory tree using hardlinks if possible"""
460 """Copy a directory tree using hardlinks if possible"""
461
461
462 if hardlink is None:
462 if hardlink is None:
463 hardlink = (os.stat(src).st_dev ==
463 hardlink = (os.stat(src).st_dev ==
464 os.stat(os.path.dirname(dst)).st_dev)
464 os.stat(os.path.dirname(dst)).st_dev)
465
465
466 num = 0
466 num = 0
467 if os.path.isdir(src):
467 if os.path.isdir(src):
468 os.mkdir(dst)
468 os.mkdir(dst)
469 for name, kind in osutil.listdir(src):
469 for name, kind in osutil.listdir(src):
470 srcname = os.path.join(src, name)
470 srcname = os.path.join(src, name)
471 dstname = os.path.join(dst, name)
471 dstname = os.path.join(dst, name)
472 hardlink, n = copyfiles(srcname, dstname, hardlink)
472 hardlink, n = copyfiles(srcname, dstname, hardlink)
473 num += n
473 num += n
474 else:
474 else:
475 if hardlink:
475 if hardlink:
476 try:
476 try:
477 os_link(src, dst)
477 os_link(src, dst)
478 except (IOError, OSError):
478 except (IOError, OSError):
479 hardlink = False
479 hardlink = False
480 shutil.copy(src, dst)
480 shutil.copy(src, dst)
481 else:
481 else:
482 shutil.copy(src, dst)
482 shutil.copy(src, dst)
483 num += 1
483 num += 1
484
484
485 return hardlink, num
485 return hardlink, num
486
486
487 class path_auditor(object):
487 class path_auditor(object):
488 '''ensure that a filesystem path contains no banned components.
488 '''ensure that a filesystem path contains no banned components.
489 the following properties of a path are checked:
489 the following properties of a path are checked:
490
490
491 - under top-level .hg
491 - under top-level .hg
492 - starts at the root of a windows drive
492 - starts at the root of a windows drive
493 - contains ".."
493 - contains ".."
494 - traverses a symlink (e.g. a/symlink_here/b)
494 - traverses a symlink (e.g. a/symlink_here/b)
495 - inside a nested repository (a callback can be used to approve
495 - inside a nested repository (a callback can be used to approve
496 some nested repositories, e.g., subrepositories)
496 some nested repositories, e.g., subrepositories)
497 '''
497 '''
498
498
499 def __init__(self, root, callback=None):
499 def __init__(self, root, callback=None):
500 self.audited = set()
500 self.audited = set()
501 self.auditeddir = set()
501 self.auditeddir = set()
502 self.root = root
502 self.root = root
503 self.callback = callback
503 self.callback = callback
504
504
505 def __call__(self, path):
505 def __call__(self, path):
506 if path in self.audited:
506 if path in self.audited:
507 return
507 return
508 normpath = os.path.normcase(path)
508 normpath = os.path.normcase(path)
509 parts = splitpath(normpath)
509 parts = splitpath(normpath)
510 if (os.path.splitdrive(path)[0]
510 if (os.path.splitdrive(path)[0]
511 or parts[0].lower() in ('.hg', '.hg.', '')
511 or parts[0].lower() in ('.hg', '.hg.', '')
512 or os.pardir in parts):
512 or os.pardir in parts):
513 raise Abort(_("path contains illegal component: %s") % path)
513 raise Abort(_("path contains illegal component: %s") % path)
514 if '.hg' in path.lower():
514 if '.hg' in path.lower():
515 lparts = [p.lower() for p in parts]
515 lparts = [p.lower() for p in parts]
516 for p in '.hg', '.hg.':
516 for p in '.hg', '.hg.':
517 if p in lparts[1:]:
517 if p in lparts[1:]:
518 pos = lparts.index(p)
518 pos = lparts.index(p)
519 base = os.path.join(*parts[:pos])
519 base = os.path.join(*parts[:pos])
520 raise Abort(_('path %r is inside repo %r') % (path, base))
520 raise Abort(_('path %r is inside repo %r') % (path, base))
521 def check(prefix):
521 def check(prefix):
522 curpath = os.path.join(self.root, prefix)
522 curpath = os.path.join(self.root, prefix)
523 try:
523 try:
524 st = os.lstat(curpath)
524 st = os.lstat(curpath)
525 except OSError, err:
525 except OSError, err:
526 # EINVAL can be raised as invalid path syntax under win32.
526 # EINVAL can be raised as invalid path syntax under win32.
527 # They must be ignored for patterns can be checked too.
527 # They must be ignored for patterns can be checked too.
528 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
528 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
529 raise
529 raise
530 else:
530 else:
531 if stat.S_ISLNK(st.st_mode):
531 if stat.S_ISLNK(st.st_mode):
532 raise Abort(_('path %r traverses symbolic link %r') %
532 raise Abort(_('path %r traverses symbolic link %r') %
533 (path, prefix))
533 (path, prefix))
534 elif (stat.S_ISDIR(st.st_mode) and
534 elif (stat.S_ISDIR(st.st_mode) and
535 os.path.isdir(os.path.join(curpath, '.hg'))):
535 os.path.isdir(os.path.join(curpath, '.hg'))):
536 if not self.callback or not self.callback(curpath):
536 if not self.callback or not self.callback(curpath):
537 raise Abort(_('path %r is inside repo %r') %
537 raise Abort(_('path %r is inside repo %r') %
538 (path, prefix))
538 (path, prefix))
539 parts.pop()
539 parts.pop()
540 prefixes = []
540 prefixes = []
541 while parts:
541 while parts:
542 prefix = os.sep.join(parts)
542 prefix = os.sep.join(parts)
543 if prefix in self.auditeddir:
543 if prefix in self.auditeddir:
544 break
544 break
545 check(prefix)
545 check(prefix)
546 prefixes.append(prefix)
546 prefixes.append(prefix)
547 parts.pop()
547 parts.pop()
548
548
549 self.audited.add(path)
549 self.audited.add(path)
550 # only add prefixes to the cache after checking everything: we don't
550 # only add prefixes to the cache after checking everything: we don't
551 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
551 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
552 self.auditeddir.update(prefixes)
552 self.auditeddir.update(prefixes)
553
553
554 def nlinks(pathname):
554 def nlinks(pathname):
555 """Return number of hardlinks for the given file."""
555 """Return number of hardlinks for the given file."""
556 return os.lstat(pathname).st_nlink
556 return os.lstat(pathname).st_nlink
557
557
558 if hasattr(os, 'link'):
558 if hasattr(os, 'link'):
559 os_link = os.link
559 os_link = os.link
560 else:
560 else:
561 def os_link(src, dst):
561 def os_link(src, dst):
562 raise OSError(0, _("Hardlinks not supported"))
562 raise OSError(0, _("Hardlinks not supported"))
563
563
564 def lookup_reg(key, name=None, scope=None):
564 def lookup_reg(key, name=None, scope=None):
565 return None
565 return None
566
566
567 def hidewindow():
567 def hidewindow():
568 """Hide current shell window.
568 """Hide current shell window.
569
569
570 Used to hide the window opened when starting asynchronous
570 Used to hide the window opened when starting asynchronous
571 child process under Windows, unneeded on other systems.
571 child process under Windows, unneeded on other systems.
572 """
572 """
573 pass
573 pass
574
574
575 if os.name == 'nt':
575 if os.name == 'nt':
576 from windows import *
576 from windows import *
577 else:
577 else:
578 from posix import *
578 from posix import *
579
579
580 def makelock(info, pathname):
580 def makelock(info, pathname):
581 try:
581 try:
582 return os.symlink(info, pathname)
582 return os.symlink(info, pathname)
583 except OSError, why:
583 except OSError, why:
584 if why.errno == errno.EEXIST:
584 if why.errno == errno.EEXIST:
585 raise
585 raise
586 except AttributeError: # no symlink in os
586 except AttributeError: # no symlink in os
587 pass
587 pass
588
588
589 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
589 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
590 os.write(ld, info)
590 os.write(ld, info)
591 os.close(ld)
591 os.close(ld)
592
592
593 def readlock(pathname):
593 def readlock(pathname):
594 try:
594 try:
595 return os.readlink(pathname)
595 return os.readlink(pathname)
596 except OSError, why:
596 except OSError, why:
597 if why.errno not in (errno.EINVAL, errno.ENOSYS):
597 if why.errno not in (errno.EINVAL, errno.ENOSYS):
598 raise
598 raise
599 except AttributeError: # no symlink in os
599 except AttributeError: # no symlink in os
600 pass
600 pass
601 return posixfile(pathname).read()
601 return posixfile(pathname).read()
602
602
603 def fstat(fp):
603 def fstat(fp):
604 '''stat file object that may not have fileno method.'''
604 '''stat file object that may not have fileno method.'''
605 try:
605 try:
606 return os.fstat(fp.fileno())
606 return os.fstat(fp.fileno())
607 except AttributeError:
607 except AttributeError:
608 return os.stat(fp.name)
608 return os.stat(fp.name)
609
609
610 # File system features
610 # File system features
611
611
612 def checkcase(path):
612 def checkcase(path):
613 """
613 """
614 Check whether the given path is on a case-sensitive filesystem
614 Check whether the given path is on a case-sensitive filesystem
615
615
616 Requires a path (like /foo/.hg) ending with a foldable final
616 Requires a path (like /foo/.hg) ending with a foldable final
617 directory component.
617 directory component.
618 """
618 """
619 s1 = os.stat(path)
619 s1 = os.stat(path)
620 d, b = os.path.split(path)
620 d, b = os.path.split(path)
621 p2 = os.path.join(d, b.upper())
621 p2 = os.path.join(d, b.upper())
622 if path == p2:
622 if path == p2:
623 p2 = os.path.join(d, b.lower())
623 p2 = os.path.join(d, b.lower())
624 try:
624 try:
625 s2 = os.stat(p2)
625 s2 = os.stat(p2)
626 if s2 == s1:
626 if s2 == s1:
627 return False
627 return False
628 return True
628 return True
629 except:
629 except:
630 return True
630 return True
631
631
632 _fspathcache = {}
632 _fspathcache = {}
633 def fspath(name, root):
633 def fspath(name, root):
634 '''Get name in the case stored in the filesystem
634 '''Get name in the case stored in the filesystem
635
635
636 The name is either relative to root, or it is an absolute path starting
636 The name is either relative to root, or it is an absolute path starting
637 with root. Note that this function is unnecessary, and should not be
637 with root. Note that this function is unnecessary, and should not be
638 called, for case-sensitive filesystems (simply because it's expensive).
638 called, for case-sensitive filesystems (simply because it's expensive).
639 '''
639 '''
640 # If name is absolute, make it relative
640 # If name is absolute, make it relative
641 if name.lower().startswith(root.lower()):
641 if name.lower().startswith(root.lower()):
642 l = len(root)
642 l = len(root)
643 if name[l] == os.sep or name[l] == os.altsep:
643 if name[l] == os.sep or name[l] == os.altsep:
644 l = l + 1
644 l = l + 1
645 name = name[l:]
645 name = name[l:]
646
646
647 if not os.path.exists(os.path.join(root, name)):
647 if not os.path.exists(os.path.join(root, name)):
648 return None
648 return None
649
649
650 seps = os.sep
650 seps = os.sep
651 if os.altsep:
651 if os.altsep:
652 seps = seps + os.altsep
652 seps = seps + os.altsep
653 # Protect backslashes. This gets silly very quickly.
653 # Protect backslashes. This gets silly very quickly.
654 seps.replace('\\','\\\\')
654 seps.replace('\\','\\\\')
655 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
655 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
656 dir = os.path.normcase(os.path.normpath(root))
656 dir = os.path.normcase(os.path.normpath(root))
657 result = []
657 result = []
658 for part, sep in pattern.findall(name):
658 for part, sep in pattern.findall(name):
659 if sep:
659 if sep:
660 result.append(sep)
660 result.append(sep)
661 continue
661 continue
662
662
663 if dir not in _fspathcache:
663 if dir not in _fspathcache:
664 _fspathcache[dir] = os.listdir(dir)
664 _fspathcache[dir] = os.listdir(dir)
665 contents = _fspathcache[dir]
665 contents = _fspathcache[dir]
666
666
667 lpart = part.lower()
667 lpart = part.lower()
668 lenp = len(part)
668 lenp = len(part)
669 for n in contents:
669 for n in contents:
670 if lenp == len(n) and n.lower() == lpart:
670 if lenp == len(n) and n.lower() == lpart:
671 result.append(n)
671 result.append(n)
672 break
672 break
673 else:
673 else:
674 # Cannot happen, as the file exists!
674 # Cannot happen, as the file exists!
675 result.append(part)
675 result.append(part)
676 dir = os.path.join(dir, lpart)
676 dir = os.path.join(dir, lpart)
677
677
678 return ''.join(result)
678 return ''.join(result)
679
679
680 def checkexec(path):
680 def checkexec(path):
681 """
681 """
682 Check whether the given path is on a filesystem with UNIX-like exec flags
682 Check whether the given path is on a filesystem with UNIX-like exec flags
683
683
684 Requires a directory (like /foo/.hg)
684 Requires a directory (like /foo/.hg)
685 """
685 """
686
686
687 # VFAT on some Linux versions can flip mode but it doesn't persist
687 # VFAT on some Linux versions can flip mode but it doesn't persist
688 # a FS remount. Frequently we can detect it if files are created
688 # a FS remount. Frequently we can detect it if files are created
689 # with exec bit on.
689 # with exec bit on.
690
690
691 try:
691 try:
692 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
692 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
693 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
693 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
694 try:
694 try:
695 os.close(fh)
695 os.close(fh)
696 m = os.stat(fn).st_mode & 0777
696 m = os.stat(fn).st_mode & 0777
697 new_file_has_exec = m & EXECFLAGS
697 new_file_has_exec = m & EXECFLAGS
698 os.chmod(fn, m ^ EXECFLAGS)
698 os.chmod(fn, m ^ EXECFLAGS)
699 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
699 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
700 finally:
700 finally:
701 os.unlink(fn)
701 os.unlink(fn)
702 except (IOError, OSError):
702 except (IOError, OSError):
703 # we don't care, the user probably won't be able to commit anyway
703 # we don't care, the user probably won't be able to commit anyway
704 return False
704 return False
705 return not (new_file_has_exec or exec_flags_cannot_flip)
705 return not (new_file_has_exec or exec_flags_cannot_flip)
706
706
707 def checklink(path):
707 def checklink(path):
708 """check whether the given path is on a symlink-capable filesystem"""
708 """check whether the given path is on a symlink-capable filesystem"""
709 # mktemp is not racy because symlink creation will fail if the
709 # mktemp is not racy because symlink creation will fail if the
710 # file already exists
710 # file already exists
711 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
711 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
712 try:
712 try:
713 os.symlink(".", name)
713 os.symlink(".", name)
714 os.unlink(name)
714 os.unlink(name)
715 return True
715 return True
716 except (OSError, AttributeError):
716 except (OSError, AttributeError):
717 return False
717 return False
718
718
719 def needbinarypatch():
719 def needbinarypatch():
720 """return True if patches should be applied in binary mode by default."""
720 """return True if patches should be applied in binary mode by default."""
721 return os.name == 'nt'
721 return os.name == 'nt'
722
722
723 def endswithsep(path):
723 def endswithsep(path):
724 '''Check path ends with os.sep or os.altsep.'''
724 '''Check path ends with os.sep or os.altsep.'''
725 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
725 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
726
726
727 def splitpath(path):
727 def splitpath(path):
728 '''Split path by os.sep.
728 '''Split path by os.sep.
729 Note that this function does not use os.altsep because this is
729 Note that this function does not use os.altsep because this is
730 an alternative of simple "xxx.split(os.sep)".
730 an alternative of simple "xxx.split(os.sep)".
731 It is recommended to use os.path.normpath() before using this
731 It is recommended to use os.path.normpath() before using this
732 function if need.'''
732 function if need.'''
733 return path.split(os.sep)
733 return path.split(os.sep)
734
734
735 def gui():
735 def gui():
736 '''Are we running in a GUI?'''
736 '''Are we running in a GUI?'''
737 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
737 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
738
738
739 def mktempcopy(name, emptyok=False, createmode=None):
739 def mktempcopy(name, emptyok=False, createmode=None):
740 """Create a temporary file with the same contents from name
740 """Create a temporary file with the same contents from name
741
741
742 The permission bits are copied from the original file.
742 The permission bits are copied from the original file.
743
743
744 If the temporary file is going to be truncated immediately, you
744 If the temporary file is going to be truncated immediately, you
745 can use emptyok=True as an optimization.
745 can use emptyok=True as an optimization.
746
746
747 Returns the name of the temporary file.
747 Returns the name of the temporary file.
748 """
748 """
749 d, fn = os.path.split(name)
749 d, fn = os.path.split(name)
750 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
750 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
751 os.close(fd)
751 os.close(fd)
752 # Temporary files are created with mode 0600, which is usually not
752 # Temporary files are created with mode 0600, which is usually not
753 # what we want. If the original file already exists, just copy
753 # what we want. If the original file already exists, just copy
754 # its mode. Otherwise, manually obey umask.
754 # its mode. Otherwise, manually obey umask.
755 try:
755 try:
756 st_mode = os.lstat(name).st_mode & 0777
756 st_mode = os.lstat(name).st_mode & 0777
757 except OSError, inst:
757 except OSError, inst:
758 if inst.errno != errno.ENOENT:
758 if inst.errno != errno.ENOENT:
759 raise
759 raise
760 st_mode = createmode
760 st_mode = createmode
761 if st_mode is None:
761 if st_mode is None:
762 st_mode = ~umask
762 st_mode = ~umask
763 st_mode &= 0666
763 st_mode &= 0666
764 os.chmod(temp, st_mode)
764 os.chmod(temp, st_mode)
765 if emptyok:
765 if emptyok:
766 return temp
766 return temp
767 try:
767 try:
768 try:
768 try:
769 ifp = posixfile(name, "rb")
769 ifp = posixfile(name, "rb")
770 except IOError, inst:
770 except IOError, inst:
771 if inst.errno == errno.ENOENT:
771 if inst.errno == errno.ENOENT:
772 return temp
772 return temp
773 if not getattr(inst, 'filename', None):
773 if not getattr(inst, 'filename', None):
774 inst.filename = name
774 inst.filename = name
775 raise
775 raise
776 ofp = posixfile(temp, "wb")
776 ofp = posixfile(temp, "wb")
777 for chunk in filechunkiter(ifp):
777 for chunk in filechunkiter(ifp):
778 ofp.write(chunk)
778 ofp.write(chunk)
779 ifp.close()
779 ifp.close()
780 ofp.close()
780 ofp.close()
781 except:
781 except:
782 try: os.unlink(temp)
782 try: os.unlink(temp)
783 except: pass
783 except: pass
784 raise
784 raise
785 return temp
785 return temp
786
786
787 class atomictempfile(object):
787 class atomictempfile(object):
788 """file-like object that atomically updates a file
788 """file-like object that atomically updates a file
789
789
790 All writes will be redirected to a temporary copy of the original
790 All writes will be redirected to a temporary copy of the original
791 file. When rename is called, the copy is renamed to the original
791 file. When rename is called, the copy is renamed to the original
792 name, making the changes visible.
792 name, making the changes visible.
793 """
793 """
794 def __init__(self, name, mode='w+b', createmode=None):
794 def __init__(self, name, mode='w+b', createmode=None):
795 self.__name = name
795 self.__name = name
796 self._fp = None
796 self._fp = None
797 self.temp = mktempcopy(name, emptyok=('w' in mode),
797 self.temp = mktempcopy(name, emptyok=('w' in mode),
798 createmode=createmode)
798 createmode=createmode)
799 self._fp = posixfile(self.temp, mode)
799 self._fp = posixfile(self.temp, mode)
800
800
801 def __getattr__(self, name):
801 def __getattr__(self, name):
802 return getattr(self._fp, name)
802 return getattr(self._fp, name)
803
803
804 def rename(self):
804 def rename(self):
805 if not self._fp.closed:
805 if not self._fp.closed:
806 self._fp.close()
806 self._fp.close()
807 rename(self.temp, localpath(self.__name))
807 rename(self.temp, localpath(self.__name))
808
808
809 def __del__(self):
809 def __del__(self):
810 if not self._fp:
810 if not self._fp:
811 return
811 return
812 if not self._fp.closed:
812 if not self._fp.closed:
813 try:
813 try:
814 os.unlink(self.temp)
814 os.unlink(self.temp)
815 except: pass
815 except: pass
816 self._fp.close()
816 self._fp.close()
817
817
818 def makedirs(name, mode=None):
818 def makedirs(name, mode=None):
819 """recursive directory creation with parent mode inheritance"""
819 """recursive directory creation with parent mode inheritance"""
820 try:
820 try:
821 os.mkdir(name)
821 os.mkdir(name)
822 if mode is not None:
822 if mode is not None:
823 os.chmod(name, mode)
823 os.chmod(name, mode)
824 return
824 return
825 except OSError, err:
825 except OSError, err:
826 if err.errno == errno.EEXIST:
826 if err.errno == errno.EEXIST:
827 return
827 return
828 if err.errno != errno.ENOENT:
828 if err.errno != errno.ENOENT:
829 raise
829 raise
830 parent = os.path.abspath(os.path.dirname(name))
830 parent = os.path.abspath(os.path.dirname(name))
831 makedirs(parent, mode)
831 makedirs(parent, mode)
832 makedirs(name, mode)
832 makedirs(name, mode)
833
833
834 class opener(object):
834 class opener(object):
835 """Open files relative to a base directory
835 """Open files relative to a base directory
836
836
837 This class is used to hide the details of COW semantics and
837 This class is used to hide the details of COW semantics and
838 remote file access from higher level code.
838 remote file access from higher level code.
839 """
839 """
840 def __init__(self, base, audit=True):
840 def __init__(self, base, audit=True):
841 self.base = base
841 self.base = base
842 if audit:
842 if audit:
843 self.auditor = path_auditor(base)
843 self.auditor = path_auditor(base)
844 else:
844 else:
845 self.auditor = always
845 self.auditor = always
846 self.createmode = None
846 self.createmode = None
847
847
848 @propertycache
848 @propertycache
849 def _can_symlink(self):
849 def _can_symlink(self):
850 return checklink(self.base)
850 return checklink(self.base)
851
851
852 def _fixfilemode(self, name):
852 def _fixfilemode(self, name):
853 if self.createmode is None:
853 if self.createmode is None:
854 return
854 return
855 os.chmod(name, self.createmode & 0666)
855 os.chmod(name, self.createmode & 0666)
856
856
857 def __call__(self, path, mode="r", text=False, atomictemp=False):
857 def __call__(self, path, mode="r", text=False, atomictemp=False):
858 self.auditor(path)
858 self.auditor(path)
859 f = os.path.join(self.base, path)
859 f = os.path.join(self.base, path)
860
860
861 if not text and "b" not in mode:
861 if not text and "b" not in mode:
862 mode += "b" # for that other OS
862 mode += "b" # for that other OS
863
863
864 nlink = -1
864 nlink = -1
865 if mode not in ("r", "rb"):
865 if mode not in ("r", "rb"):
866 try:
866 try:
867 nlink = nlinks(f)
867 nlink = nlinks(f)
868 except OSError:
868 except OSError:
869 nlink = 0
869 nlink = 0
870 d = os.path.dirname(f)
870 d = os.path.dirname(f)
871 if not os.path.isdir(d):
871 if not os.path.isdir(d):
872 makedirs(d, self.createmode)
872 makedirs(d, self.createmode)
873 if atomictemp:
873 if atomictemp:
874 return atomictempfile(f, mode, self.createmode)
874 return atomictempfile(f, mode, self.createmode)
875 if nlink > 1:
875 if nlink > 1:
876 rename(mktempcopy(f), f)
876 rename(mktempcopy(f), f)
877 fp = posixfile(f, mode)
877 fp = posixfile(f, mode)
878 if nlink == 0:
878 if nlink == 0:
879 self._fixfilemode(f)
879 self._fixfilemode(f)
880 return fp
880 return fp
881
881
882 def symlink(self, src, dst):
882 def symlink(self, src, dst):
883 self.auditor(dst)
883 self.auditor(dst)
884 linkname = os.path.join(self.base, dst)
884 linkname = os.path.join(self.base, dst)
885 try:
885 try:
886 os.unlink(linkname)
886 os.unlink(linkname)
887 except OSError:
887 except OSError:
888 pass
888 pass
889
889
890 dirname = os.path.dirname(linkname)
890 dirname = os.path.dirname(linkname)
891 if not os.path.exists(dirname):
891 if not os.path.exists(dirname):
892 makedirs(dirname, self.createmode)
892 makedirs(dirname, self.createmode)
893
893
894 if self._can_symlink:
894 if self._can_symlink:
895 try:
895 try:
896 os.symlink(src, linkname)
896 os.symlink(src, linkname)
897 except OSError, err:
897 except OSError, err:
898 raise OSError(err.errno, _('could not symlink to %r: %s') %
898 raise OSError(err.errno, _('could not symlink to %r: %s') %
899 (src, err.strerror), linkname)
899 (src, err.strerror), linkname)
900 else:
900 else:
901 f = self(dst, "w")
901 f = self(dst, "w")
902 f.write(src)
902 f.write(src)
903 f.close()
903 f.close()
904 self._fixfilemode(dst)
904 self._fixfilemode(dst)
905
905
906 class chunkbuffer(object):
906 class chunkbuffer(object):
907 """Allow arbitrary sized chunks of data to be efficiently read from an
907 """Allow arbitrary sized chunks of data to be efficiently read from an
908 iterator over chunks of arbitrary size."""
908 iterator over chunks of arbitrary size."""
909
909
910 def __init__(self, in_iter):
910 def __init__(self, in_iter):
911 """in_iter is the iterator that's iterating over the input chunks.
911 """in_iter is the iterator that's iterating over the input chunks.
912 targetsize is how big a buffer to try to maintain."""
912 targetsize is how big a buffer to try to maintain."""
913 def splitbig(chunks):
913 def splitbig(chunks):
914 for chunk in chunks:
914 for chunk in chunks:
915 if len(chunk) > 2**20:
915 if len(chunk) > 2**20:
916 pos = 0
916 pos = 0
917 while pos < len(chunk):
917 while pos < len(chunk):
918 end = pos + 2 ** 18
918 end = pos + 2 ** 18
919 yield chunk[pos:end]
919 yield chunk[pos:end]
920 pos = end
920 pos = end
921 else:
921 else:
922 yield chunk
922 yield chunk
923 self.iter = splitbig(in_iter)
923 self.iter = splitbig(in_iter)
924 self._queue = []
924 self._queue = []
925
925
926 def read(self, l):
926 def read(self, l):
927 """Read L bytes of data from the iterator of chunks of data.
927 """Read L bytes of data from the iterator of chunks of data.
928 Returns less than L bytes if the iterator runs dry."""
928 Returns less than L bytes if the iterator runs dry."""
929 left = l
929 left = l
930 buf = ''
930 buf = ''
931 queue = self._queue
931 queue = self._queue
932 while left > 0:
932 while left > 0:
933 # refill the queue
933 # refill the queue
934 if not queue:
934 if not queue:
935 target = 2**18
935 target = 2**18
936 for chunk in self.iter:
936 for chunk in self.iter:
937 queue.append(chunk)
937 queue.append(chunk)
938 target -= len(chunk)
938 target -= len(chunk)
939 if target <= 0:
939 if target <= 0:
940 break
940 break
941 if not queue:
941 if not queue:
942 break
942 break
943
943
944 chunk = queue.pop(0)
944 chunk = queue.pop(0)
945 left -= len(chunk)
945 left -= len(chunk)
946 if left < 0:
946 if left < 0:
947 queue.insert(0, chunk[left:])
947 queue.insert(0, chunk[left:])
948 buf += chunk[:left]
948 buf += chunk[:left]
949 else:
949 else:
950 buf += chunk
950 buf += chunk
951
951
952 return buf
952 return buf
953
953
954 def filechunkiter(f, size=65536, limit=None):
954 def filechunkiter(f, size=65536, limit=None):
955 """Create a generator that produces the data in the file size
955 """Create a generator that produces the data in the file size
956 (default 65536) bytes at a time, up to optional limit (default is
956 (default 65536) bytes at a time, up to optional limit (default is
957 to read all data). Chunks may be less than size bytes if the
957 to read all data). Chunks may be less than size bytes if the
958 chunk is the last chunk in the file, or the file is a socket or
958 chunk is the last chunk in the file, or the file is a socket or
959 some other type of file that sometimes reads less data than is
959 some other type of file that sometimes reads less data than is
960 requested."""
960 requested."""
961 assert size >= 0
961 assert size >= 0
962 assert limit is None or limit >= 0
962 assert limit is None or limit >= 0
963 while True:
963 while True:
964 if limit is None:
964 if limit is None:
965 nbytes = size
965 nbytes = size
966 else:
966 else:
967 nbytes = min(limit, size)
967 nbytes = min(limit, size)
968 s = nbytes and f.read(nbytes)
968 s = nbytes and f.read(nbytes)
969 if not s:
969 if not s:
970 break
970 break
971 if limit:
971 if limit:
972 limit -= len(s)
972 limit -= len(s)
973 yield s
973 yield s
974
974
975 def makedate():
975 def makedate():
976 lt = time.localtime()
976 lt = time.localtime()
977 if lt[8] == 1 and time.daylight:
977 if lt[8] == 1 and time.daylight:
978 tz = time.altzone
978 tz = time.altzone
979 else:
979 else:
980 tz = time.timezone
980 tz = time.timezone
981 return time.mktime(lt), tz
981 return time.mktime(lt), tz
982
982
983 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
983 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
984 """represent a (unixtime, offset) tuple as a localized time.
984 """represent a (unixtime, offset) tuple as a localized time.
985 unixtime is seconds since the epoch, and offset is the time zone's
985 unixtime is seconds since the epoch, and offset is the time zone's
986 number of seconds away from UTC. if timezone is false, do not
986 number of seconds away from UTC. if timezone is false, do not
987 append time zone to string."""
987 append time zone to string."""
988 t, tz = date or makedate()
988 t, tz = date or makedate()
989 if "%1" in format or "%2" in format:
989 if "%1" in format or "%2" in format:
990 sign = (tz > 0) and "-" or "+"
990 sign = (tz > 0) and "-" or "+"
991 minutes = abs(tz) // 60
991 minutes = abs(tz) // 60
992 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
992 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
993 format = format.replace("%2", "%02d" % (minutes % 60))
993 format = format.replace("%2", "%02d" % (minutes % 60))
994 s = time.strftime(format, time.gmtime(float(t) - tz))
994 s = time.strftime(format, time.gmtime(float(t) - tz))
995 return s
995 return s
996
996
997 def shortdate(date=None):
997 def shortdate(date=None):
998 """turn (timestamp, tzoff) tuple into iso 8631 date."""
998 """turn (timestamp, tzoff) tuple into iso 8631 date."""
999 return datestr(date, format='%Y-%m-%d')
999 return datestr(date, format='%Y-%m-%d')
1000
1000
1001 def strdate(string, format, defaults=[]):
1001 def strdate(string, format, defaults=[]):
1002 """parse a localized time string and return a (unixtime, offset) tuple.
1002 """parse a localized time string and return a (unixtime, offset) tuple.
1003 if the string cannot be parsed, ValueError is raised."""
1003 if the string cannot be parsed, ValueError is raised."""
1004 def timezone(string):
1004 def timezone(string):
1005 tz = string.split()[-1]
1005 tz = string.split()[-1]
1006 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1006 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1007 sign = (tz[0] == "+") and 1 or -1
1007 sign = (tz[0] == "+") and 1 or -1
1008 hours = int(tz[1:3])
1008 hours = int(tz[1:3])
1009 minutes = int(tz[3:5])
1009 minutes = int(tz[3:5])
1010 return -sign * (hours * 60 + minutes) * 60
1010 return -sign * (hours * 60 + minutes) * 60
1011 if tz == "GMT" or tz == "UTC":
1011 if tz == "GMT" or tz == "UTC":
1012 return 0
1012 return 0
1013 return None
1013 return None
1014
1014
1015 # NOTE: unixtime = localunixtime + offset
1015 # NOTE: unixtime = localunixtime + offset
1016 offset, date = timezone(string), string
1016 offset, date = timezone(string), string
1017 if offset != None:
1017 if offset != None:
1018 date = " ".join(string.split()[:-1])
1018 date = " ".join(string.split()[:-1])
1019
1019
1020 # add missing elements from defaults
1020 # add missing elements from defaults
1021 for part in defaults:
1021 for part in defaults:
1022 found = [True for p in part if ("%"+p) in format]
1022 found = [True for p in part if ("%"+p) in format]
1023 if not found:
1023 if not found:
1024 date += "@" + defaults[part]
1024 date += "@" + defaults[part]
1025 format += "@%" + part[0]
1025 format += "@%" + part[0]
1026
1026
1027 timetuple = time.strptime(date, format)
1027 timetuple = time.strptime(date, format)
1028 localunixtime = int(calendar.timegm(timetuple))
1028 localunixtime = int(calendar.timegm(timetuple))
1029 if offset is None:
1029 if offset is None:
1030 # local timezone
1030 # local timezone
1031 unixtime = int(time.mktime(timetuple))
1031 unixtime = int(time.mktime(timetuple))
1032 offset = unixtime - localunixtime
1032 offset = unixtime - localunixtime
1033 else:
1033 else:
1034 unixtime = localunixtime + offset
1034 unixtime = localunixtime + offset
1035 return unixtime, offset
1035 return unixtime, offset
1036
1036
1037 def parsedate(date, formats=None, defaults=None):
1037 def parsedate(date, formats=None, defaults=None):
1038 """parse a localized date/time string and return a (unixtime, offset) tuple.
1038 """parse a localized date/time string and return a (unixtime, offset) tuple.
1039
1039
1040 The date may be a "unixtime offset" string or in one of the specified
1040 The date may be a "unixtime offset" string or in one of the specified
1041 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1041 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1042 """
1042 """
1043 if not date:
1043 if not date:
1044 return 0, 0
1044 return 0, 0
1045 if isinstance(date, tuple) and len(date) == 2:
1045 if isinstance(date, tuple) and len(date) == 2:
1046 return date
1046 return date
1047 if not formats:
1047 if not formats:
1048 formats = defaultdateformats
1048 formats = defaultdateformats
1049 date = date.strip()
1049 date = date.strip()
1050 try:
1050 try:
1051 when, offset = map(int, date.split(' '))
1051 when, offset = map(int, date.split(' '))
1052 except ValueError:
1052 except ValueError:
1053 # fill out defaults
1053 # fill out defaults
1054 if not defaults:
1054 if not defaults:
1055 defaults = {}
1055 defaults = {}
1056 now = makedate()
1056 now = makedate()
1057 for part in "d mb yY HI M S".split():
1057 for part in "d mb yY HI M S".split():
1058 if part not in defaults:
1058 if part not in defaults:
1059 if part[0] in "HMS":
1059 if part[0] in "HMS":
1060 defaults[part] = "00"
1060 defaults[part] = "00"
1061 else:
1061 else:
1062 defaults[part] = datestr(now, "%" + part[0])
1062 defaults[part] = datestr(now, "%" + part[0])
1063
1063
1064 for format in formats:
1064 for format in formats:
1065 try:
1065 try:
1066 when, offset = strdate(date, format, defaults)
1066 when, offset = strdate(date, format, defaults)
1067 except (ValueError, OverflowError):
1067 except (ValueError, OverflowError):
1068 pass
1068 pass
1069 else:
1069 else:
1070 break
1070 break
1071 else:
1071 else:
1072 raise Abort(_('invalid date: %r ') % date)
1072 raise Abort(_('invalid date: %r ') % date)
1073 # validate explicit (probably user-specified) date and
1073 # validate explicit (probably user-specified) date and
1074 # time zone offset. values must fit in signed 32 bits for
1074 # time zone offset. values must fit in signed 32 bits for
1075 # current 32-bit linux runtimes. timezones go from UTC-12
1075 # current 32-bit linux runtimes. timezones go from UTC-12
1076 # to UTC+14
1076 # to UTC+14
1077 if abs(when) > 0x7fffffff:
1077 if abs(when) > 0x7fffffff:
1078 raise Abort(_('date exceeds 32 bits: %d') % when)
1078 raise Abort(_('date exceeds 32 bits: %d') % when)
1079 if offset < -50400 or offset > 43200:
1079 if offset < -50400 or offset > 43200:
1080 raise Abort(_('impossible time zone offset: %d') % offset)
1080 raise Abort(_('impossible time zone offset: %d') % offset)
1081 return when, offset
1081 return when, offset
1082
1082
1083 def matchdate(date):
1083 def matchdate(date):
1084 """Return a function that matches a given date match specifier
1084 """Return a function that matches a given date match specifier
1085
1085
1086 Formats include:
1086 Formats include:
1087
1087
1088 '{date}' match a given date to the accuracy provided
1088 '{date}' match a given date to the accuracy provided
1089
1089
1090 '<{date}' on or before a given date
1090 '<{date}' on or before a given date
1091
1091
1092 '>{date}' on or after a given date
1092 '>{date}' on or after a given date
1093
1093
1094 """
1094 """
1095
1095
1096 def lower(date):
1096 def lower(date):
1097 d = dict(mb="1", d="1")
1097 d = dict(mb="1", d="1")
1098 return parsedate(date, extendeddateformats, d)[0]
1098 return parsedate(date, extendeddateformats, d)[0]
1099
1099
1100 def upper(date):
1100 def upper(date):
1101 d = dict(mb="12", HI="23", M="59", S="59")
1101 d = dict(mb="12", HI="23", M="59", S="59")
1102 for days in "31 30 29".split():
1102 for days in "31 30 29".split():
1103 try:
1103 try:
1104 d["d"] = days
1104 d["d"] = days
1105 return parsedate(date, extendeddateformats, d)[0]
1105 return parsedate(date, extendeddateformats, d)[0]
1106 except:
1106 except:
1107 pass
1107 pass
1108 d["d"] = "28"
1108 d["d"] = "28"
1109 return parsedate(date, extendeddateformats, d)[0]
1109 return parsedate(date, extendeddateformats, d)[0]
1110
1110
1111 date = date.strip()
1111 date = date.strip()
1112 if date[0] == "<":
1112 if date[0] == "<":
1113 when = upper(date[1:])
1113 when = upper(date[1:])
1114 return lambda x: x <= when
1114 return lambda x: x <= when
1115 elif date[0] == ">":
1115 elif date[0] == ">":
1116 when = lower(date[1:])
1116 when = lower(date[1:])
1117 return lambda x: x >= when
1117 return lambda x: x >= when
1118 elif date[0] == "-":
1118 elif date[0] == "-":
1119 try:
1119 try:
1120 days = int(date[1:])
1120 days = int(date[1:])
1121 except ValueError:
1121 except ValueError:
1122 raise Abort(_("invalid day spec: %s") % date[1:])
1122 raise Abort(_("invalid day spec: %s") % date[1:])
1123 when = makedate()[0] - days * 3600 * 24
1123 when = makedate()[0] - days * 3600 * 24
1124 return lambda x: x >= when
1124 return lambda x: x >= when
1125 elif " to " in date:
1125 elif " to " in date:
1126 a, b = date.split(" to ")
1126 a, b = date.split(" to ")
1127 start, stop = lower(a), upper(b)
1127 start, stop = lower(a), upper(b)
1128 return lambda x: x >= start and x <= stop
1128 return lambda x: x >= start and x <= stop
1129 else:
1129 else:
1130 start, stop = lower(date), upper(date)
1130 start, stop = lower(date), upper(date)
1131 return lambda x: x >= start and x <= stop
1131 return lambda x: x >= start and x <= stop
1132
1132
1133 def shortuser(user):
1133 def shortuser(user):
1134 """Return a short representation of a user name or email address."""
1134 """Return a short representation of a user name or email address."""
1135 f = user.find('@')
1135 f = user.find('@')
1136 if f >= 0:
1136 if f >= 0:
1137 user = user[:f]
1137 user = user[:f]
1138 f = user.find('<')
1138 f = user.find('<')
1139 if f >= 0:
1139 if f >= 0:
1140 user = user[f + 1:]
1140 user = user[f + 1:]
1141 f = user.find(' ')
1141 f = user.find(' ')
1142 if f >= 0:
1142 if f >= 0:
1143 user = user[:f]
1143 user = user[:f]
1144 f = user.find('.')
1144 f = user.find('.')
1145 if f >= 0:
1145 if f >= 0:
1146 user = user[:f]
1146 user = user[:f]
1147 return user
1147 return user
1148
1148
1149 def email(author):
1149 def email(author):
1150 '''get email of author.'''
1150 '''get email of author.'''
1151 r = author.find('>')
1151 r = author.find('>')
1152 if r == -1:
1152 if r == -1:
1153 r = None
1153 r = None
1154 return author[author.find('<') + 1:r]
1154 return author[author.find('<') + 1:r]
1155
1155
1156 def ellipsis(text, maxlength=400):
1156 def ellipsis(text, maxlength=400):
1157 """Trim string to at most maxlength (default: 400) characters."""
1157 """Trim string to at most maxlength (default: 400) characters."""
1158 if len(text) <= maxlength:
1158 if len(text) <= maxlength:
1159 return text
1159 return text
1160 else:
1160 else:
1161 return "%s..." % (text[:maxlength - 3])
1161 return "%s..." % (text[:maxlength - 3])
1162
1162
1163 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1163 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1164 '''yield every hg repository under path, recursively.'''
1164 '''yield every hg repository under path, recursively.'''
1165 def errhandler(err):
1165 def errhandler(err):
1166 if err.filename == path:
1166 if err.filename == path:
1167 raise err
1167 raise err
1168 if followsym and hasattr(os.path, 'samestat'):
1168 if followsym and hasattr(os.path, 'samestat'):
1169 def _add_dir_if_not_there(dirlst, dirname):
1169 def _add_dir_if_not_there(dirlst, dirname):
1170 match = False
1170 match = False
1171 samestat = os.path.samestat
1171 samestat = os.path.samestat
1172 dirstat = os.stat(dirname)
1172 dirstat = os.stat(dirname)
1173 for lstdirstat in dirlst:
1173 for lstdirstat in dirlst:
1174 if samestat(dirstat, lstdirstat):
1174 if samestat(dirstat, lstdirstat):
1175 match = True
1175 match = True
1176 break
1176 break
1177 if not match:
1177 if not match:
1178 dirlst.append(dirstat)
1178 dirlst.append(dirstat)
1179 return not match
1179 return not match
1180 else:
1180 else:
1181 followsym = False
1181 followsym = False
1182
1182
1183 if (seen_dirs is None) and followsym:
1183 if (seen_dirs is None) and followsym:
1184 seen_dirs = []
1184 seen_dirs = []
1185 _add_dir_if_not_there(seen_dirs, path)
1185 _add_dir_if_not_there(seen_dirs, path)
1186 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1186 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1187 dirs.sort()
1187 dirs.sort()
1188 if '.hg' in dirs:
1188 if '.hg' in dirs:
1189 yield root # found a repository
1189 yield root # found a repository
1190 qroot = os.path.join(root, '.hg', 'patches')
1190 qroot = os.path.join(root, '.hg', 'patches')
1191 if os.path.isdir(os.path.join(qroot, '.hg')):
1191 if os.path.isdir(os.path.join(qroot, '.hg')):
1192 yield qroot # we have a patch queue repo here
1192 yield qroot # we have a patch queue repo here
1193 if recurse:
1193 if recurse:
1194 # avoid recursing inside the .hg directory
1194 # avoid recursing inside the .hg directory
1195 dirs.remove('.hg')
1195 dirs.remove('.hg')
1196 else:
1196 else:
1197 dirs[:] = [] # don't descend further
1197 dirs[:] = [] # don't descend further
1198 elif followsym:
1198 elif followsym:
1199 newdirs = []
1199 newdirs = []
1200 for d in dirs:
1200 for d in dirs:
1201 fname = os.path.join(root, d)
1201 fname = os.path.join(root, d)
1202 if _add_dir_if_not_there(seen_dirs, fname):
1202 if _add_dir_if_not_there(seen_dirs, fname):
1203 if os.path.islink(fname):
1203 if os.path.islink(fname):
1204 for hgname in walkrepos(fname, True, seen_dirs):
1204 for hgname in walkrepos(fname, True, seen_dirs):
1205 yield hgname
1205 yield hgname
1206 else:
1206 else:
1207 newdirs.append(d)
1207 newdirs.append(d)
1208 dirs[:] = newdirs
1208 dirs[:] = newdirs
1209
1209
1210 _rcpath = None
1210 _rcpath = None
1211
1211
1212 def os_rcpath():
1212 def os_rcpath():
1213 '''return default os-specific hgrc search path'''
1213 '''return default os-specific hgrc search path'''
1214 path = system_rcpath()
1214 path = system_rcpath()
1215 path.extend(user_rcpath())
1215 path.extend(user_rcpath())
1216 path = [os.path.normpath(f) for f in path]
1216 path = [os.path.normpath(f) for f in path]
1217 return path
1217 return path
1218
1218
1219 def rcpath():
1219 def rcpath():
1220 '''return hgrc search path. if env var HGRCPATH is set, use it.
1220 '''return hgrc search path. if env var HGRCPATH is set, use it.
1221 for each item in path, if directory, use files ending in .rc,
1221 for each item in path, if directory, use files ending in .rc,
1222 else use item.
1222 else use item.
1223 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1223 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1224 if no HGRCPATH, use default os-specific path.'''
1224 if no HGRCPATH, use default os-specific path.'''
1225 global _rcpath
1225 global _rcpath
1226 if _rcpath is None:
1226 if _rcpath is None:
1227 if 'HGRCPATH' in os.environ:
1227 if 'HGRCPATH' in os.environ:
1228 _rcpath = []
1228 _rcpath = []
1229 for p in os.environ['HGRCPATH'].split(os.pathsep):
1229 for p in os.environ['HGRCPATH'].split(os.pathsep):
1230 if not p:
1230 if not p:
1231 continue
1231 continue
1232 p = expandpath(p)
1232 p = expandpath(p)
1233 if os.path.isdir(p):
1233 if os.path.isdir(p):
1234 for f, kind in osutil.listdir(p):
1234 for f, kind in osutil.listdir(p):
1235 if f.endswith('.rc'):
1235 if f.endswith('.rc'):
1236 _rcpath.append(os.path.join(p, f))
1236 _rcpath.append(os.path.join(p, f))
1237 else:
1237 else:
1238 _rcpath.append(p)
1238 _rcpath.append(p)
1239 else:
1239 else:
1240 _rcpath = os_rcpath()
1240 _rcpath = os_rcpath()
1241 return _rcpath
1241 return _rcpath
1242
1242
1243 def bytecount(nbytes):
1243 def bytecount(nbytes):
1244 '''return byte count formatted as readable string, with units'''
1244 '''return byte count formatted as readable string, with units'''
1245
1245
1246 units = (
1246 units = (
1247 (100, 1 << 30, _('%.0f GB')),
1247 (100, 1 << 30, _('%.0f GB')),
1248 (10, 1 << 30, _('%.1f GB')),
1248 (10, 1 << 30, _('%.1f GB')),
1249 (1, 1 << 30, _('%.2f GB')),
1249 (1, 1 << 30, _('%.2f GB')),
1250 (100, 1 << 20, _('%.0f MB')),
1250 (100, 1 << 20, _('%.0f MB')),
1251 (10, 1 << 20, _('%.1f MB')),
1251 (10, 1 << 20, _('%.1f MB')),
1252 (1, 1 << 20, _('%.2f MB')),
1252 (1, 1 << 20, _('%.2f MB')),
1253 (100, 1 << 10, _('%.0f KB')),
1253 (100, 1 << 10, _('%.0f KB')),
1254 (10, 1 << 10, _('%.1f KB')),
1254 (10, 1 << 10, _('%.1f KB')),
1255 (1, 1 << 10, _('%.2f KB')),
1255 (1, 1 << 10, _('%.2f KB')),
1256 (1, 1, _('%.0f bytes')),
1256 (1, 1, _('%.0f bytes')),
1257 )
1257 )
1258
1258
1259 for multiplier, divisor, format in units:
1259 for multiplier, divisor, format in units:
1260 if nbytes >= divisor * multiplier:
1260 if nbytes >= divisor * multiplier:
1261 return format % (nbytes / float(divisor))
1261 return format % (nbytes / float(divisor))
1262 return units[-1][2] % nbytes
1262 return units[-1][2] % nbytes
1263
1263
1264 def drop_scheme(scheme, path):
1264 def drop_scheme(scheme, path):
1265 sc = scheme + ':'
1265 sc = scheme + ':'
1266 if path.startswith(sc):
1266 if path.startswith(sc):
1267 path = path[len(sc):]
1267 path = path[len(sc):]
1268 if path.startswith('//'):
1268 if path.startswith('//'):
1269 if scheme == 'file':
1269 if scheme == 'file':
1270 i = path.find('/', 2)
1270 i = path.find('/', 2)
1271 if i == -1:
1271 if i == -1:
1272 return ''
1272 return ''
1273 # On Windows, absolute paths are rooted at the current drive
1273 # On Windows, absolute paths are rooted at the current drive
1274 # root. On POSIX they are rooted at the file system root.
1274 # root. On POSIX they are rooted at the file system root.
1275 if os.name == 'nt':
1275 if os.name == 'nt':
1276 droot = os.path.splitdrive(os.getcwd())[0] + '/'
1276 droot = os.path.splitdrive(os.getcwd())[0] + '/'
1277 path = os.path.join(droot, path[i + 1:])
1277 path = os.path.join(droot, path[i + 1:])
1278 else:
1278 else:
1279 path = path[i:]
1279 path = path[i:]
1280 else:
1280 else:
1281 path = path[2:]
1281 path = path[2:]
1282 return path
1282 return path
1283
1283
1284 def uirepr(s):
1284 def uirepr(s):
1285 # Avoid double backslash in Windows path repr()
1285 # Avoid double backslash in Windows path repr()
1286 return repr(s).replace('\\\\', '\\')
1286 return repr(s).replace('\\\\', '\\')
1287
1287
1288 #### naming convention of below implementation follows 'textwrap' module
1288 #### naming convention of below implementation follows 'textwrap' module
1289
1289
1290 class MBTextWrapper(textwrap.TextWrapper):
1290 class MBTextWrapper(textwrap.TextWrapper):
1291 def __init__(self, **kwargs):
1291 def __init__(self, **kwargs):
1292 textwrap.TextWrapper.__init__(self, **kwargs)
1292 textwrap.TextWrapper.__init__(self, **kwargs)
1293
1293
1294 def _cutdown(self, str, space_left):
1294 def _cutdown(self, str, space_left):
1295 l = 0
1295 l = 0
1296 ucstr = unicode(str, encoding.encoding)
1296 ucstr = unicode(str, encoding.encoding)
1297 w = unicodedata.east_asian_width
1297 w = unicodedata.east_asian_width
1298 for i in xrange(len(ucstr)):
1298 for i in xrange(len(ucstr)):
1299 l += w(ucstr[i]) in 'WFA' and 2 or 1
1299 l += w(ucstr[i]) in 'WFA' and 2 or 1
1300 if space_left < l:
1300 if space_left < l:
1301 return (ucstr[:i].encode(encoding.encoding),
1301 return (ucstr[:i].encode(encoding.encoding),
1302 ucstr[i:].encode(encoding.encoding))
1302 ucstr[i:].encode(encoding.encoding))
1303 return str, ''
1303 return str, ''
1304
1304
1305 # ----------------------------------------
1305 # ----------------------------------------
1306 # overriding of base class
1306 # overriding of base class
1307
1307
1308 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1308 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1309 space_left = max(width - cur_len, 1)
1309 space_left = max(width - cur_len, 1)
1310
1310
1311 if self.break_long_words:
1311 if self.break_long_words:
1312 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1312 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1313 cur_line.append(cut)
1313 cur_line.append(cut)
1314 reversed_chunks[-1] = res
1314 reversed_chunks[-1] = res
1315 elif not cur_line:
1315 elif not cur_line:
1316 cur_line.append(reversed_chunks.pop())
1316 cur_line.append(reversed_chunks.pop())
1317
1317
1318 #### naming convention of above implementation follows 'textwrap' module
1318 #### naming convention of above implementation follows 'textwrap' module
1319
1319
1320 def wrap(line, width=None, initindent='', hangindent=''):
1320 def wrap(line, width=None, initindent='', hangindent=''):
1321 if width is None:
1321 if width is None:
1322 width = termwidth() - 2
1322 width = termwidth() - 2
1323 maxindent = max(len(hangindent), len(initindent))
1323 maxindent = max(len(hangindent), len(initindent))
1324 if width <= maxindent:
1324 if width <= maxindent:
1325 # adjust for weird terminal size
1325 # adjust for weird terminal size
1326 width = max(78, maxindent + 1)
1326 width = max(78, maxindent + 1)
1327 wrapper = MBTextWrapper(width=width,
1327 wrapper = MBTextWrapper(width=width,
1328 initial_indent=initindent,
1328 initial_indent=initindent,
1329 subsequent_indent=hangindent)
1329 subsequent_indent=hangindent)
1330 return wrapper.fill(line)
1330 return wrapper.fill(line)
1331
1331
1332 def iterlines(iterator):
1332 def iterlines(iterator):
1333 for chunk in iterator:
1333 for chunk in iterator:
1334 for line in chunk.splitlines():
1334 for line in chunk.splitlines():
1335 yield line
1335 yield line
1336
1336
1337 def expandpath(path):
1337 def expandpath(path):
1338 return os.path.expanduser(os.path.expandvars(path))
1338 return os.path.expanduser(os.path.expandvars(path))
1339
1339
1340 def hgcmd():
1340 def hgcmd():
1341 """Return the command used to execute current hg
1341 """Return the command used to execute current hg
1342
1342
1343 This is different from hgexecutable() because on Windows we want
1343 This is different from hgexecutable() because on Windows we want
1344 to avoid things opening new shell windows like batch files, so we
1344 to avoid things opening new shell windows like batch files, so we
1345 get either the python call or current executable.
1345 get either the python call or current executable.
1346 """
1346 """
1347 if main_is_frozen():
1347 if main_is_frozen():
1348 return [sys.executable]
1348 return [sys.executable]
1349 return gethgcmd()
1349 return gethgcmd()
1350
1350
1351 def rundetached(args, condfn):
1351 def rundetached(args, condfn):
1352 """Execute the argument list in a detached process.
1352 """Execute the argument list in a detached process.
1353
1353
1354 condfn is a callable which is called repeatedly and should return
1354 condfn is a callable which is called repeatedly and should return
1355 True once the child process is known to have started successfully.
1355 True once the child process is known to have started successfully.
1356 At this point, the child process PID is returned. If the child
1356 At this point, the child process PID is returned. If the child
1357 process fails to start or finishes before condfn() evaluates to
1357 process fails to start or finishes before condfn() evaluates to
1358 True, return -1.
1358 True, return -1.
1359 """
1359 """
1360 # Windows case is easier because the child process is either
1360 # Windows case is easier because the child process is either
1361 # successfully starting and validating the condition or exiting
1361 # successfully starting and validating the condition or exiting
1362 # on failure. We just poll on its PID. On Unix, if the child
1362 # on failure. We just poll on its PID. On Unix, if the child
1363 # process fails to start, it will be left in a zombie state until
1363 # process fails to start, it will be left in a zombie state until
1364 # the parent wait on it, which we cannot do since we expect a long
1364 # the parent wait on it, which we cannot do since we expect a long
1365 # running process on success. Instead we listen for SIGCHLD telling
1365 # running process on success. Instead we listen for SIGCHLD telling
1366 # us our child process terminated.
1366 # us our child process terminated.
1367 terminated = set()
1367 terminated = set()
1368 def handler(signum, frame):
1368 def handler(signum, frame):
1369 terminated.add(os.wait())
1369 terminated.add(os.wait())
1370 prevhandler = None
1370 prevhandler = None
1371 if hasattr(signal, 'SIGCHLD'):
1371 if hasattr(signal, 'SIGCHLD'):
1372 prevhandler = signal.signal(signal.SIGCHLD, handler)
1372 prevhandler = signal.signal(signal.SIGCHLD, handler)
1373 try:
1373 try:
1374 pid = spawndetached(args)
1374 pid = spawndetached(args)
1375 while not condfn():
1375 while not condfn():
1376 if ((pid in terminated or not testpid(pid))
1376 if ((pid in terminated or not testpid(pid))
1377 and not condfn()):
1377 and not condfn()):
1378 return -1
1378 return -1
1379 time.sleep(0.1)
1379 time.sleep(0.1)
1380 return pid
1380 return pid
1381 finally:
1381 finally:
1382 if prevhandler is not None:
1382 if prevhandler is not None:
1383 signal.signal(signal.SIGCHLD, prevhandler)
1383 signal.signal(signal.SIGCHLD, prevhandler)
1384
1384
1385 try:
1385 try:
1386 any, all = any, all
1386 any, all = any, all
1387 except NameError:
1387 except NameError:
1388 def any(iterable):
1388 def any(iterable):
1389 for i in iterable:
1389 for i in iterable:
1390 if i:
1390 if i:
1391 return True
1391 return True
1392 return False
1392 return False
1393
1393
1394 def all(iterable):
1394 def all(iterable):
1395 for i in iterable:
1395 for i in iterable:
1396 if not i:
1396 if not i:
1397 return False
1397 return False
1398 return True
1398 return True
1399
1399
1400 def termwidth():
1400 def termwidth():
1401 if 'COLUMNS' in os.environ:
1401 if 'COLUMNS' in os.environ:
1402 try:
1402 try:
1403 return int(os.environ['COLUMNS'])
1403 return int(os.environ['COLUMNS'])
1404 except ValueError:
1404 except ValueError:
1405 pass
1405 pass
1406 return termwidth_()
1406 return termwidth_()
1407
1407
1408 def interpolate(prefix, mapping, s, fn=None):
1408 def interpolate(prefix, mapping, s, fn=None):
1409 """Return the result of interpolating items in the mapping into string s.
1409 """Return the result of interpolating items in the mapping into string s.
1410
1410
1411 prefix is a single character string, or a two character string with
1411 prefix is a single character string, or a two character string with
1412 a backslash as the first character if the prefix needs to be escaped in
1412 a backslash as the first character if the prefix needs to be escaped in
1413 a regular expression.
1413 a regular expression.
1414
1414
1415 fn is an optional function that will be applied to the replacement text
1415 fn is an optional function that will be applied to the replacement text
1416 just before replacement.
1416 just before replacement.
1417 """
1417 """
1418 fn = fn or (lambda s: s)
1418 fn = fn or (lambda s: s)
1419 r = re.compile(r'%s(%s)' % (prefix, '|'.join(mapping.keys())))
1419 r = re.compile(r'%s(%s)' % (prefix, '|'.join(mapping.keys())))
1420 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1420 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1421
1421
1422 def getport(port):
1422 def getport(port):
1423 """Return the port for a given network service.
1423 """Return the port for a given network service.
1424
1424
1425 If port is an integer, it's returned as is. If it's a string, it's
1425 If port is an integer, it's returned as is. If it's a string, it's
1426 looked up using socket.getservbyname(). If there's no matching
1426 looked up using socket.getservbyname(). If there's no matching
1427 service, util.Abort is raised.
1427 service, util.Abort is raised.
1428 """
1428 """
1429 try:
1429 try:
1430 return int(port)
1430 return int(port)
1431 except ValueError:
1431 except ValueError:
1432 pass
1432 pass
1433
1433
1434 try:
1434 try:
1435 return socket.getservbyname(port)
1435 return socket.getservbyname(port)
1436 except socket.error:
1436 except socket.error:
1437 raise Abort(_("no port number associated with service '%s'") % port)
1437 raise Abort(_("no port number associated with service '%s'") % port)
General Comments 0
You need to be logged in to leave comments. Login now