##// END OF EJS Templates
util: remove dead code which used to be for old python2 versions...
Alex Gaynor -
r33549:9a2ee959 default
parent child Browse files
Show More
@@ -1,3702 +1,3696
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import codecs
20 import codecs
21 import collections
21 import collections
22 import contextlib
22 import contextlib
23 import datetime
23 import datetime
24 import errno
24 import errno
25 import gc
25 import gc
26 import hashlib
26 import hashlib
27 import imp
27 import imp
28 import os
28 import os
29 import platform as pyplatform
29 import platform as pyplatform
30 import re as remod
30 import re as remod
31 import shutil
31 import shutil
32 import signal
32 import signal
33 import socket
33 import socket
34 import stat
34 import stat
35 import string
35 import string
36 import subprocess
36 import subprocess
37 import sys
37 import sys
38 import tempfile
38 import tempfile
39 import textwrap
39 import textwrap
40 import time
40 import time
41 import traceback
41 import traceback
42 import warnings
42 import warnings
43 import zlib
43 import zlib
44
44
45 from . import (
45 from . import (
46 encoding,
46 encoding,
47 error,
47 error,
48 i18n,
48 i18n,
49 policy,
49 policy,
50 pycompat,
50 pycompat,
51 )
51 )
52
52
53 base85 = policy.importmod(r'base85')
53 base85 = policy.importmod(r'base85')
54 osutil = policy.importmod(r'osutil')
54 osutil = policy.importmod(r'osutil')
55 parsers = policy.importmod(r'parsers')
55 parsers = policy.importmod(r'parsers')
56
56
57 b85decode = base85.b85decode
57 b85decode = base85.b85decode
58 b85encode = base85.b85encode
58 b85encode = base85.b85encode
59
59
60 cookielib = pycompat.cookielib
60 cookielib = pycompat.cookielib
61 empty = pycompat.empty
61 empty = pycompat.empty
62 httplib = pycompat.httplib
62 httplib = pycompat.httplib
63 httpserver = pycompat.httpserver
63 httpserver = pycompat.httpserver
64 pickle = pycompat.pickle
64 pickle = pycompat.pickle
65 queue = pycompat.queue
65 queue = pycompat.queue
66 socketserver = pycompat.socketserver
66 socketserver = pycompat.socketserver
67 stderr = pycompat.stderr
67 stderr = pycompat.stderr
68 stdin = pycompat.stdin
68 stdin = pycompat.stdin
69 stdout = pycompat.stdout
69 stdout = pycompat.stdout
70 stringio = pycompat.stringio
70 stringio = pycompat.stringio
71 urlerr = pycompat.urlerr
71 urlerr = pycompat.urlerr
72 urlreq = pycompat.urlreq
72 urlreq = pycompat.urlreq
73 xmlrpclib = pycompat.xmlrpclib
73 xmlrpclib = pycompat.xmlrpclib
74
74
75 # workaround for win32mbcs
75 # workaround for win32mbcs
76 _filenamebytestr = pycompat.bytestr
76 _filenamebytestr = pycompat.bytestr
77
77
78 def isatty(fp):
78 def isatty(fp):
79 try:
79 try:
80 return fp.isatty()
80 return fp.isatty()
81 except AttributeError:
81 except AttributeError:
82 return False
82 return False
83
83
84 # glibc determines buffering on first write to stdout - if we replace a TTY
84 # glibc determines buffering on first write to stdout - if we replace a TTY
85 # destined stdout with a pipe destined stdout (e.g. pager), we want line
85 # destined stdout with a pipe destined stdout (e.g. pager), we want line
86 # buffering
86 # buffering
87 if isatty(stdout):
87 if isatty(stdout):
88 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
88 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
89
89
90 if pycompat.osname == 'nt':
90 if pycompat.osname == 'nt':
91 from . import windows as platform
91 from . import windows as platform
92 stdout = platform.winstdout(stdout)
92 stdout = platform.winstdout(stdout)
93 else:
93 else:
94 from . import posix as platform
94 from . import posix as platform
95
95
96 _ = i18n._
96 _ = i18n._
97
97
98 bindunixsocket = platform.bindunixsocket
98 bindunixsocket = platform.bindunixsocket
99 cachestat = platform.cachestat
99 cachestat = platform.cachestat
100 checkexec = platform.checkexec
100 checkexec = platform.checkexec
101 checklink = platform.checklink
101 checklink = platform.checklink
102 copymode = platform.copymode
102 copymode = platform.copymode
103 executablepath = platform.executablepath
103 executablepath = platform.executablepath
104 expandglobs = platform.expandglobs
104 expandglobs = platform.expandglobs
105 explainexit = platform.explainexit
105 explainexit = platform.explainexit
106 findexe = platform.findexe
106 findexe = platform.findexe
107 gethgcmd = platform.gethgcmd
107 gethgcmd = platform.gethgcmd
108 getuser = platform.getuser
108 getuser = platform.getuser
109 getpid = os.getpid
109 getpid = os.getpid
110 groupmembers = platform.groupmembers
110 groupmembers = platform.groupmembers
111 groupname = platform.groupname
111 groupname = platform.groupname
112 hidewindow = platform.hidewindow
112 hidewindow = platform.hidewindow
113 isexec = platform.isexec
113 isexec = platform.isexec
114 isowner = platform.isowner
114 isowner = platform.isowner
115 listdir = osutil.listdir
115 listdir = osutil.listdir
116 localpath = platform.localpath
116 localpath = platform.localpath
117 lookupreg = platform.lookupreg
117 lookupreg = platform.lookupreg
118 makedir = platform.makedir
118 makedir = platform.makedir
119 nlinks = platform.nlinks
119 nlinks = platform.nlinks
120 normpath = platform.normpath
120 normpath = platform.normpath
121 normcase = platform.normcase
121 normcase = platform.normcase
122 normcasespec = platform.normcasespec
122 normcasespec = platform.normcasespec
123 normcasefallback = platform.normcasefallback
123 normcasefallback = platform.normcasefallback
124 openhardlinks = platform.openhardlinks
124 openhardlinks = platform.openhardlinks
125 oslink = platform.oslink
125 oslink = platform.oslink
126 parsepatchoutput = platform.parsepatchoutput
126 parsepatchoutput = platform.parsepatchoutput
127 pconvert = platform.pconvert
127 pconvert = platform.pconvert
128 poll = platform.poll
128 poll = platform.poll
129 popen = platform.popen
129 popen = platform.popen
130 posixfile = platform.posixfile
130 posixfile = platform.posixfile
131 quotecommand = platform.quotecommand
131 quotecommand = platform.quotecommand
132 readpipe = platform.readpipe
132 readpipe = platform.readpipe
133 rename = platform.rename
133 rename = platform.rename
134 removedirs = platform.removedirs
134 removedirs = platform.removedirs
135 samedevice = platform.samedevice
135 samedevice = platform.samedevice
136 samefile = platform.samefile
136 samefile = platform.samefile
137 samestat = platform.samestat
137 samestat = platform.samestat
138 setbinary = platform.setbinary
138 setbinary = platform.setbinary
139 setflags = platform.setflags
139 setflags = platform.setflags
140 setsignalhandler = platform.setsignalhandler
140 setsignalhandler = platform.setsignalhandler
141 shellquote = platform.shellquote
141 shellquote = platform.shellquote
142 spawndetached = platform.spawndetached
142 spawndetached = platform.spawndetached
143 split = platform.split
143 split = platform.split
144 sshargs = platform.sshargs
144 sshargs = platform.sshargs
145 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
145 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
146 statisexec = platform.statisexec
146 statisexec = platform.statisexec
147 statislink = platform.statislink
147 statislink = platform.statislink
148 testpid = platform.testpid
148 testpid = platform.testpid
149 umask = platform.umask
149 umask = platform.umask
150 unlink = platform.unlink
150 unlink = platform.unlink
151 username = platform.username
151 username = platform.username
152
152
153 try:
153 try:
154 recvfds = osutil.recvfds
154 recvfds = osutil.recvfds
155 except AttributeError:
155 except AttributeError:
156 pass
156 pass
157 try:
157 try:
158 setprocname = osutil.setprocname
158 setprocname = osutil.setprocname
159 except AttributeError:
159 except AttributeError:
160 pass
160 pass
161
161
162 # Python compatibility
162 # Python compatibility
163
163
164 _notset = object()
164 _notset = object()
165
165
166 # disable Python's problematic floating point timestamps (issue4836)
166 # disable Python's problematic floating point timestamps (issue4836)
167 # (Python hypocritically says you shouldn't change this behavior in
167 # (Python hypocritically says you shouldn't change this behavior in
168 # libraries, and sure enough Mercurial is not a library.)
168 # libraries, and sure enough Mercurial is not a library.)
169 os.stat_float_times(False)
169 os.stat_float_times(False)
170
170
171 def safehasattr(thing, attr):
171 def safehasattr(thing, attr):
172 return getattr(thing, attr, _notset) is not _notset
172 return getattr(thing, attr, _notset) is not _notset
173
173
174 def bitsfrom(container):
174 def bitsfrom(container):
175 bits = 0
175 bits = 0
176 for bit in container:
176 for bit in container:
177 bits |= bit
177 bits |= bit
178 return bits
178 return bits
179
179
180 # python 2.6 still have deprecation warning enabled by default. We do not want
180 # python 2.6 still have deprecation warning enabled by default. We do not want
181 # to display anything to standard user so detect if we are running test and
181 # to display anything to standard user so detect if we are running test and
182 # only use python deprecation warning in this case.
182 # only use python deprecation warning in this case.
183 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
183 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
184 if _dowarn:
184 if _dowarn:
185 # explicitly unfilter our warning for python 2.7
185 # explicitly unfilter our warning for python 2.7
186 #
186 #
187 # The option of setting PYTHONWARNINGS in the test runner was investigated.
187 # The option of setting PYTHONWARNINGS in the test runner was investigated.
188 # However, module name set through PYTHONWARNINGS was exactly matched, so
188 # However, module name set through PYTHONWARNINGS was exactly matched, so
189 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
189 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
190 # makes the whole PYTHONWARNINGS thing useless for our usecase.
190 # makes the whole PYTHONWARNINGS thing useless for our usecase.
191 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
191 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
192 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
192 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
193 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
193 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
194
194
195 def nouideprecwarn(msg, version, stacklevel=1):
195 def nouideprecwarn(msg, version, stacklevel=1):
196 """Issue an python native deprecation warning
196 """Issue an python native deprecation warning
197
197
198 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
198 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
199 """
199 """
200 if _dowarn:
200 if _dowarn:
201 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
201 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
202 " update your code.)") % version
202 " update your code.)") % version
203 warnings.warn(msg, DeprecationWarning, stacklevel + 1)
203 warnings.warn(msg, DeprecationWarning, stacklevel + 1)
204
204
205 DIGESTS = {
205 DIGESTS = {
206 'md5': hashlib.md5,
206 'md5': hashlib.md5,
207 'sha1': hashlib.sha1,
207 'sha1': hashlib.sha1,
208 'sha512': hashlib.sha512,
208 'sha512': hashlib.sha512,
209 }
209 }
210 # List of digest types from strongest to weakest
210 # List of digest types from strongest to weakest
211 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
211 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
212
212
213 for k in DIGESTS_BY_STRENGTH:
213 for k in DIGESTS_BY_STRENGTH:
214 assert k in DIGESTS
214 assert k in DIGESTS
215
215
216 class digester(object):
216 class digester(object):
217 """helper to compute digests.
217 """helper to compute digests.
218
218
219 This helper can be used to compute one or more digests given their name.
219 This helper can be used to compute one or more digests given their name.
220
220
221 >>> d = digester(['md5', 'sha1'])
221 >>> d = digester(['md5', 'sha1'])
222 >>> d.update('foo')
222 >>> d.update('foo')
223 >>> [k for k in sorted(d)]
223 >>> [k for k in sorted(d)]
224 ['md5', 'sha1']
224 ['md5', 'sha1']
225 >>> d['md5']
225 >>> d['md5']
226 'acbd18db4cc2f85cedef654fccc4a4d8'
226 'acbd18db4cc2f85cedef654fccc4a4d8'
227 >>> d['sha1']
227 >>> d['sha1']
228 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
228 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
229 >>> digester.preferred(['md5', 'sha1'])
229 >>> digester.preferred(['md5', 'sha1'])
230 'sha1'
230 'sha1'
231 """
231 """
232
232
233 def __init__(self, digests, s=''):
233 def __init__(self, digests, s=''):
234 self._hashes = {}
234 self._hashes = {}
235 for k in digests:
235 for k in digests:
236 if k not in DIGESTS:
236 if k not in DIGESTS:
237 raise Abort(_('unknown digest type: %s') % k)
237 raise Abort(_('unknown digest type: %s') % k)
238 self._hashes[k] = DIGESTS[k]()
238 self._hashes[k] = DIGESTS[k]()
239 if s:
239 if s:
240 self.update(s)
240 self.update(s)
241
241
242 def update(self, data):
242 def update(self, data):
243 for h in self._hashes.values():
243 for h in self._hashes.values():
244 h.update(data)
244 h.update(data)
245
245
246 def __getitem__(self, key):
246 def __getitem__(self, key):
247 if key not in DIGESTS:
247 if key not in DIGESTS:
248 raise Abort(_('unknown digest type: %s') % k)
248 raise Abort(_('unknown digest type: %s') % k)
249 return self._hashes[key].hexdigest()
249 return self._hashes[key].hexdigest()
250
250
251 def __iter__(self):
251 def __iter__(self):
252 return iter(self._hashes)
252 return iter(self._hashes)
253
253
254 @staticmethod
254 @staticmethod
255 def preferred(supported):
255 def preferred(supported):
256 """returns the strongest digest type in both supported and DIGESTS."""
256 """returns the strongest digest type in both supported and DIGESTS."""
257
257
258 for k in DIGESTS_BY_STRENGTH:
258 for k in DIGESTS_BY_STRENGTH:
259 if k in supported:
259 if k in supported:
260 return k
260 return k
261 return None
261 return None
262
262
263 class digestchecker(object):
263 class digestchecker(object):
264 """file handle wrapper that additionally checks content against a given
264 """file handle wrapper that additionally checks content against a given
265 size and digests.
265 size and digests.
266
266
267 d = digestchecker(fh, size, {'md5': '...'})
267 d = digestchecker(fh, size, {'md5': '...'})
268
268
269 When multiple digests are given, all of them are validated.
269 When multiple digests are given, all of them are validated.
270 """
270 """
271
271
272 def __init__(self, fh, size, digests):
272 def __init__(self, fh, size, digests):
273 self._fh = fh
273 self._fh = fh
274 self._size = size
274 self._size = size
275 self._got = 0
275 self._got = 0
276 self._digests = dict(digests)
276 self._digests = dict(digests)
277 self._digester = digester(self._digests.keys())
277 self._digester = digester(self._digests.keys())
278
278
279 def read(self, length=-1):
279 def read(self, length=-1):
280 content = self._fh.read(length)
280 content = self._fh.read(length)
281 self._digester.update(content)
281 self._digester.update(content)
282 self._got += len(content)
282 self._got += len(content)
283 return content
283 return content
284
284
285 def validate(self):
285 def validate(self):
286 if self._size != self._got:
286 if self._size != self._got:
287 raise Abort(_('size mismatch: expected %d, got %d') %
287 raise Abort(_('size mismatch: expected %d, got %d') %
288 (self._size, self._got))
288 (self._size, self._got))
289 for k, v in self._digests.items():
289 for k, v in self._digests.items():
290 if v != self._digester[k]:
290 if v != self._digester[k]:
291 # i18n: first parameter is a digest name
291 # i18n: first parameter is a digest name
292 raise Abort(_('%s mismatch: expected %s, got %s') %
292 raise Abort(_('%s mismatch: expected %s, got %s') %
293 (k, v, self._digester[k]))
293 (k, v, self._digester[k]))
294
294
295 try:
295 try:
296 buffer = buffer
296 buffer = buffer
297 except NameError:
297 except NameError:
298 if not pycompat.ispy3:
298 def buffer(sliceable, offset=0, length=None):
299 def buffer(sliceable, offset=0, length=None):
299 if length is not None:
300 if length is not None:
300 return memoryview(sliceable)[offset:offset + length]
301 return sliceable[offset:offset + length]
301 return memoryview(sliceable)[offset:]
302 return sliceable[offset:]
303 else:
304 def buffer(sliceable, offset=0, length=None):
305 if length is not None:
306 return memoryview(sliceable)[offset:offset + length]
307 return memoryview(sliceable)[offset:]
308
302
309 closefds = pycompat.osname == 'posix'
303 closefds = pycompat.osname == 'posix'
310
304
311 _chunksize = 4096
305 _chunksize = 4096
312
306
313 class bufferedinputpipe(object):
307 class bufferedinputpipe(object):
314 """a manually buffered input pipe
308 """a manually buffered input pipe
315
309
316 Python will not let us use buffered IO and lazy reading with 'polling' at
310 Python will not let us use buffered IO and lazy reading with 'polling' at
317 the same time. We cannot probe the buffer state and select will not detect
311 the same time. We cannot probe the buffer state and select will not detect
318 that data are ready to read if they are already buffered.
312 that data are ready to read if they are already buffered.
319
313
320 This class let us work around that by implementing its own buffering
314 This class let us work around that by implementing its own buffering
321 (allowing efficient readline) while offering a way to know if the buffer is
315 (allowing efficient readline) while offering a way to know if the buffer is
322 empty from the output (allowing collaboration of the buffer with polling).
316 empty from the output (allowing collaboration of the buffer with polling).
323
317
324 This class lives in the 'util' module because it makes use of the 'os'
318 This class lives in the 'util' module because it makes use of the 'os'
325 module from the python stdlib.
319 module from the python stdlib.
326 """
320 """
327
321
328 def __init__(self, input):
322 def __init__(self, input):
329 self._input = input
323 self._input = input
330 self._buffer = []
324 self._buffer = []
331 self._eof = False
325 self._eof = False
332 self._lenbuf = 0
326 self._lenbuf = 0
333
327
334 @property
328 @property
335 def hasbuffer(self):
329 def hasbuffer(self):
336 """True is any data is currently buffered
330 """True is any data is currently buffered
337
331
338 This will be used externally a pre-step for polling IO. If there is
332 This will be used externally a pre-step for polling IO. If there is
339 already data then no polling should be set in place."""
333 already data then no polling should be set in place."""
340 return bool(self._buffer)
334 return bool(self._buffer)
341
335
342 @property
336 @property
343 def closed(self):
337 def closed(self):
344 return self._input.closed
338 return self._input.closed
345
339
346 def fileno(self):
340 def fileno(self):
347 return self._input.fileno()
341 return self._input.fileno()
348
342
349 def close(self):
343 def close(self):
350 return self._input.close()
344 return self._input.close()
351
345
352 def read(self, size):
346 def read(self, size):
353 while (not self._eof) and (self._lenbuf < size):
347 while (not self._eof) and (self._lenbuf < size):
354 self._fillbuffer()
348 self._fillbuffer()
355 return self._frombuffer(size)
349 return self._frombuffer(size)
356
350
357 def readline(self, *args, **kwargs):
351 def readline(self, *args, **kwargs):
358 if 1 < len(self._buffer):
352 if 1 < len(self._buffer):
359 # this should not happen because both read and readline end with a
353 # this should not happen because both read and readline end with a
360 # _frombuffer call that collapse it.
354 # _frombuffer call that collapse it.
361 self._buffer = [''.join(self._buffer)]
355 self._buffer = [''.join(self._buffer)]
362 self._lenbuf = len(self._buffer[0])
356 self._lenbuf = len(self._buffer[0])
363 lfi = -1
357 lfi = -1
364 if self._buffer:
358 if self._buffer:
365 lfi = self._buffer[-1].find('\n')
359 lfi = self._buffer[-1].find('\n')
366 while (not self._eof) and lfi < 0:
360 while (not self._eof) and lfi < 0:
367 self._fillbuffer()
361 self._fillbuffer()
368 if self._buffer:
362 if self._buffer:
369 lfi = self._buffer[-1].find('\n')
363 lfi = self._buffer[-1].find('\n')
370 size = lfi + 1
364 size = lfi + 1
371 if lfi < 0: # end of file
365 if lfi < 0: # end of file
372 size = self._lenbuf
366 size = self._lenbuf
373 elif 1 < len(self._buffer):
367 elif 1 < len(self._buffer):
374 # we need to take previous chunks into account
368 # we need to take previous chunks into account
375 size += self._lenbuf - len(self._buffer[-1])
369 size += self._lenbuf - len(self._buffer[-1])
376 return self._frombuffer(size)
370 return self._frombuffer(size)
377
371
378 def _frombuffer(self, size):
372 def _frombuffer(self, size):
379 """return at most 'size' data from the buffer
373 """return at most 'size' data from the buffer
380
374
381 The data are removed from the buffer."""
375 The data are removed from the buffer."""
382 if size == 0 or not self._buffer:
376 if size == 0 or not self._buffer:
383 return ''
377 return ''
384 buf = self._buffer[0]
378 buf = self._buffer[0]
385 if 1 < len(self._buffer):
379 if 1 < len(self._buffer):
386 buf = ''.join(self._buffer)
380 buf = ''.join(self._buffer)
387
381
388 data = buf[:size]
382 data = buf[:size]
389 buf = buf[len(data):]
383 buf = buf[len(data):]
390 if buf:
384 if buf:
391 self._buffer = [buf]
385 self._buffer = [buf]
392 self._lenbuf = len(buf)
386 self._lenbuf = len(buf)
393 else:
387 else:
394 self._buffer = []
388 self._buffer = []
395 self._lenbuf = 0
389 self._lenbuf = 0
396 return data
390 return data
397
391
398 def _fillbuffer(self):
392 def _fillbuffer(self):
399 """read data to the buffer"""
393 """read data to the buffer"""
400 data = os.read(self._input.fileno(), _chunksize)
394 data = os.read(self._input.fileno(), _chunksize)
401 if not data:
395 if not data:
402 self._eof = True
396 self._eof = True
403 else:
397 else:
404 self._lenbuf += len(data)
398 self._lenbuf += len(data)
405 self._buffer.append(data)
399 self._buffer.append(data)
406
400
407 def popen2(cmd, env=None, newlines=False):
401 def popen2(cmd, env=None, newlines=False):
408 # Setting bufsize to -1 lets the system decide the buffer size.
402 # Setting bufsize to -1 lets the system decide the buffer size.
409 # The default for bufsize is 0, meaning unbuffered. This leads to
403 # The default for bufsize is 0, meaning unbuffered. This leads to
410 # poor performance on Mac OS X: http://bugs.python.org/issue4194
404 # poor performance on Mac OS X: http://bugs.python.org/issue4194
411 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
405 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
412 close_fds=closefds,
406 close_fds=closefds,
413 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
407 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
414 universal_newlines=newlines,
408 universal_newlines=newlines,
415 env=env)
409 env=env)
416 return p.stdin, p.stdout
410 return p.stdin, p.stdout
417
411
418 def popen3(cmd, env=None, newlines=False):
412 def popen3(cmd, env=None, newlines=False):
419 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
413 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
420 return stdin, stdout, stderr
414 return stdin, stdout, stderr
421
415
422 def popen4(cmd, env=None, newlines=False, bufsize=-1):
416 def popen4(cmd, env=None, newlines=False, bufsize=-1):
423 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
417 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
424 close_fds=closefds,
418 close_fds=closefds,
425 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
419 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
426 stderr=subprocess.PIPE,
420 stderr=subprocess.PIPE,
427 universal_newlines=newlines,
421 universal_newlines=newlines,
428 env=env)
422 env=env)
429 return p.stdin, p.stdout, p.stderr, p
423 return p.stdin, p.stdout, p.stderr, p
430
424
431 def version():
425 def version():
432 """Return version information if available."""
426 """Return version information if available."""
433 try:
427 try:
434 from . import __version__
428 from . import __version__
435 return __version__.version
429 return __version__.version
436 except ImportError:
430 except ImportError:
437 return 'unknown'
431 return 'unknown'
438
432
439 def versiontuple(v=None, n=4):
433 def versiontuple(v=None, n=4):
440 """Parses a Mercurial version string into an N-tuple.
434 """Parses a Mercurial version string into an N-tuple.
441
435
442 The version string to be parsed is specified with the ``v`` argument.
436 The version string to be parsed is specified with the ``v`` argument.
443 If it isn't defined, the current Mercurial version string will be parsed.
437 If it isn't defined, the current Mercurial version string will be parsed.
444
438
445 ``n`` can be 2, 3, or 4. Here is how some version strings map to
439 ``n`` can be 2, 3, or 4. Here is how some version strings map to
446 returned values:
440 returned values:
447
441
448 >>> v = '3.6.1+190-df9b73d2d444'
442 >>> v = '3.6.1+190-df9b73d2d444'
449 >>> versiontuple(v, 2)
443 >>> versiontuple(v, 2)
450 (3, 6)
444 (3, 6)
451 >>> versiontuple(v, 3)
445 >>> versiontuple(v, 3)
452 (3, 6, 1)
446 (3, 6, 1)
453 >>> versiontuple(v, 4)
447 >>> versiontuple(v, 4)
454 (3, 6, 1, '190-df9b73d2d444')
448 (3, 6, 1, '190-df9b73d2d444')
455
449
456 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
450 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
457 (3, 6, 1, '190-df9b73d2d444+20151118')
451 (3, 6, 1, '190-df9b73d2d444+20151118')
458
452
459 >>> v = '3.6'
453 >>> v = '3.6'
460 >>> versiontuple(v, 2)
454 >>> versiontuple(v, 2)
461 (3, 6)
455 (3, 6)
462 >>> versiontuple(v, 3)
456 >>> versiontuple(v, 3)
463 (3, 6, None)
457 (3, 6, None)
464 >>> versiontuple(v, 4)
458 >>> versiontuple(v, 4)
465 (3, 6, None, None)
459 (3, 6, None, None)
466
460
467 >>> v = '3.9-rc'
461 >>> v = '3.9-rc'
468 >>> versiontuple(v, 2)
462 >>> versiontuple(v, 2)
469 (3, 9)
463 (3, 9)
470 >>> versiontuple(v, 3)
464 >>> versiontuple(v, 3)
471 (3, 9, None)
465 (3, 9, None)
472 >>> versiontuple(v, 4)
466 >>> versiontuple(v, 4)
473 (3, 9, None, 'rc')
467 (3, 9, None, 'rc')
474
468
475 >>> v = '3.9-rc+2-02a8fea4289b'
469 >>> v = '3.9-rc+2-02a8fea4289b'
476 >>> versiontuple(v, 2)
470 >>> versiontuple(v, 2)
477 (3, 9)
471 (3, 9)
478 >>> versiontuple(v, 3)
472 >>> versiontuple(v, 3)
479 (3, 9, None)
473 (3, 9, None)
480 >>> versiontuple(v, 4)
474 >>> versiontuple(v, 4)
481 (3, 9, None, 'rc+2-02a8fea4289b')
475 (3, 9, None, 'rc+2-02a8fea4289b')
482 """
476 """
483 if not v:
477 if not v:
484 v = version()
478 v = version()
485 parts = remod.split('[\+-]', v, 1)
479 parts = remod.split('[\+-]', v, 1)
486 if len(parts) == 1:
480 if len(parts) == 1:
487 vparts, extra = parts[0], None
481 vparts, extra = parts[0], None
488 else:
482 else:
489 vparts, extra = parts
483 vparts, extra = parts
490
484
491 vints = []
485 vints = []
492 for i in vparts.split('.'):
486 for i in vparts.split('.'):
493 try:
487 try:
494 vints.append(int(i))
488 vints.append(int(i))
495 except ValueError:
489 except ValueError:
496 break
490 break
497 # (3, 6) -> (3, 6, None)
491 # (3, 6) -> (3, 6, None)
498 while len(vints) < 3:
492 while len(vints) < 3:
499 vints.append(None)
493 vints.append(None)
500
494
501 if n == 2:
495 if n == 2:
502 return (vints[0], vints[1])
496 return (vints[0], vints[1])
503 if n == 3:
497 if n == 3:
504 return (vints[0], vints[1], vints[2])
498 return (vints[0], vints[1], vints[2])
505 if n == 4:
499 if n == 4:
506 return (vints[0], vints[1], vints[2], extra)
500 return (vints[0], vints[1], vints[2], extra)
507
501
508 # used by parsedate
502 # used by parsedate
509 defaultdateformats = (
503 defaultdateformats = (
510 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
504 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
511 '%Y-%m-%dT%H:%M', # without seconds
505 '%Y-%m-%dT%H:%M', # without seconds
512 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
506 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
513 '%Y-%m-%dT%H%M', # without seconds
507 '%Y-%m-%dT%H%M', # without seconds
514 '%Y-%m-%d %H:%M:%S', # our common legal variant
508 '%Y-%m-%d %H:%M:%S', # our common legal variant
515 '%Y-%m-%d %H:%M', # without seconds
509 '%Y-%m-%d %H:%M', # without seconds
516 '%Y-%m-%d %H%M%S', # without :
510 '%Y-%m-%d %H%M%S', # without :
517 '%Y-%m-%d %H%M', # without seconds
511 '%Y-%m-%d %H%M', # without seconds
518 '%Y-%m-%d %I:%M:%S%p',
512 '%Y-%m-%d %I:%M:%S%p',
519 '%Y-%m-%d %H:%M',
513 '%Y-%m-%d %H:%M',
520 '%Y-%m-%d %I:%M%p',
514 '%Y-%m-%d %I:%M%p',
521 '%Y-%m-%d',
515 '%Y-%m-%d',
522 '%m-%d',
516 '%m-%d',
523 '%m/%d',
517 '%m/%d',
524 '%m/%d/%y',
518 '%m/%d/%y',
525 '%m/%d/%Y',
519 '%m/%d/%Y',
526 '%a %b %d %H:%M:%S %Y',
520 '%a %b %d %H:%M:%S %Y',
527 '%a %b %d %I:%M:%S%p %Y',
521 '%a %b %d %I:%M:%S%p %Y',
528 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
522 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
529 '%b %d %H:%M:%S %Y',
523 '%b %d %H:%M:%S %Y',
530 '%b %d %I:%M:%S%p %Y',
524 '%b %d %I:%M:%S%p %Y',
531 '%b %d %H:%M:%S',
525 '%b %d %H:%M:%S',
532 '%b %d %I:%M:%S%p',
526 '%b %d %I:%M:%S%p',
533 '%b %d %H:%M',
527 '%b %d %H:%M',
534 '%b %d %I:%M%p',
528 '%b %d %I:%M%p',
535 '%b %d %Y',
529 '%b %d %Y',
536 '%b %d',
530 '%b %d',
537 '%H:%M:%S',
531 '%H:%M:%S',
538 '%I:%M:%S%p',
532 '%I:%M:%S%p',
539 '%H:%M',
533 '%H:%M',
540 '%I:%M%p',
534 '%I:%M%p',
541 )
535 )
542
536
543 extendeddateformats = defaultdateformats + (
537 extendeddateformats = defaultdateformats + (
544 "%Y",
538 "%Y",
545 "%Y-%m",
539 "%Y-%m",
546 "%b",
540 "%b",
547 "%b %Y",
541 "%b %Y",
548 )
542 )
549
543
550 def cachefunc(func):
544 def cachefunc(func):
551 '''cache the result of function calls'''
545 '''cache the result of function calls'''
552 # XXX doesn't handle keywords args
546 # XXX doesn't handle keywords args
553 if func.__code__.co_argcount == 0:
547 if func.__code__.co_argcount == 0:
554 cache = []
548 cache = []
555 def f():
549 def f():
556 if len(cache) == 0:
550 if len(cache) == 0:
557 cache.append(func())
551 cache.append(func())
558 return cache[0]
552 return cache[0]
559 return f
553 return f
560 cache = {}
554 cache = {}
561 if func.__code__.co_argcount == 1:
555 if func.__code__.co_argcount == 1:
562 # we gain a small amount of time because
556 # we gain a small amount of time because
563 # we don't need to pack/unpack the list
557 # we don't need to pack/unpack the list
564 def f(arg):
558 def f(arg):
565 if arg not in cache:
559 if arg not in cache:
566 cache[arg] = func(arg)
560 cache[arg] = func(arg)
567 return cache[arg]
561 return cache[arg]
568 else:
562 else:
569 def f(*args):
563 def f(*args):
570 if args not in cache:
564 if args not in cache:
571 cache[args] = func(*args)
565 cache[args] = func(*args)
572 return cache[args]
566 return cache[args]
573
567
574 return f
568 return f
575
569
576 class sortdict(collections.OrderedDict):
570 class sortdict(collections.OrderedDict):
577 '''a simple sorted dictionary
571 '''a simple sorted dictionary
578
572
579 >>> d1 = sortdict([('a', 0), ('b', 1)])
573 >>> d1 = sortdict([('a', 0), ('b', 1)])
580 >>> d2 = d1.copy()
574 >>> d2 = d1.copy()
581 >>> d2
575 >>> d2
582 sortdict([('a', 0), ('b', 1)])
576 sortdict([('a', 0), ('b', 1)])
583 >>> d2.update([('a', 2)])
577 >>> d2.update([('a', 2)])
584 >>> d2.keys() # should still be in last-set order
578 >>> d2.keys() # should still be in last-set order
585 ['b', 'a']
579 ['b', 'a']
586 '''
580 '''
587
581
588 def __setitem__(self, key, value):
582 def __setitem__(self, key, value):
589 if key in self:
583 if key in self:
590 del self[key]
584 del self[key]
591 super(sortdict, self).__setitem__(key, value)
585 super(sortdict, self).__setitem__(key, value)
592
586
593 @contextlib.contextmanager
587 @contextlib.contextmanager
594 def acceptintervention(tr=None):
588 def acceptintervention(tr=None):
595 """A context manager that closes the transaction on InterventionRequired
589 """A context manager that closes the transaction on InterventionRequired
596
590
597 If no transaction was provided, this simply runs the body and returns
591 If no transaction was provided, this simply runs the body and returns
598 """
592 """
599 if not tr:
593 if not tr:
600 yield
594 yield
601 return
595 return
602 try:
596 try:
603 yield
597 yield
604 tr.close()
598 tr.close()
605 except error.InterventionRequired:
599 except error.InterventionRequired:
606 tr.close()
600 tr.close()
607 raise
601 raise
608 finally:
602 finally:
609 tr.release()
603 tr.release()
610
604
611 class _lrucachenode(object):
605 class _lrucachenode(object):
612 """A node in a doubly linked list.
606 """A node in a doubly linked list.
613
607
614 Holds a reference to nodes on either side as well as a key-value
608 Holds a reference to nodes on either side as well as a key-value
615 pair for the dictionary entry.
609 pair for the dictionary entry.
616 """
610 """
617 __slots__ = (u'next', u'prev', u'key', u'value')
611 __slots__ = (u'next', u'prev', u'key', u'value')
618
612
619 def __init__(self):
613 def __init__(self):
620 self.next = None
614 self.next = None
621 self.prev = None
615 self.prev = None
622
616
623 self.key = _notset
617 self.key = _notset
624 self.value = None
618 self.value = None
625
619
626 def markempty(self):
620 def markempty(self):
627 """Mark the node as emptied."""
621 """Mark the node as emptied."""
628 self.key = _notset
622 self.key = _notset
629
623
630 class lrucachedict(object):
624 class lrucachedict(object):
631 """Dict that caches most recent accesses and sets.
625 """Dict that caches most recent accesses and sets.
632
626
633 The dict consists of an actual backing dict - indexed by original
627 The dict consists of an actual backing dict - indexed by original
634 key - and a doubly linked circular list defining the order of entries in
628 key - and a doubly linked circular list defining the order of entries in
635 the cache.
629 the cache.
636
630
637 The head node is the newest entry in the cache. If the cache is full,
631 The head node is the newest entry in the cache. If the cache is full,
638 we recycle head.prev and make it the new head. Cache accesses result in
632 we recycle head.prev and make it the new head. Cache accesses result in
639 the node being moved to before the existing head and being marked as the
633 the node being moved to before the existing head and being marked as the
640 new head node.
634 new head node.
641 """
635 """
642 def __init__(self, max):
636 def __init__(self, max):
643 self._cache = {}
637 self._cache = {}
644
638
645 self._head = head = _lrucachenode()
639 self._head = head = _lrucachenode()
646 head.prev = head
640 head.prev = head
647 head.next = head
641 head.next = head
648 self._size = 1
642 self._size = 1
649 self._capacity = max
643 self._capacity = max
650
644
651 def __len__(self):
645 def __len__(self):
652 return len(self._cache)
646 return len(self._cache)
653
647
654 def __contains__(self, k):
648 def __contains__(self, k):
655 return k in self._cache
649 return k in self._cache
656
650
657 def __iter__(self):
651 def __iter__(self):
658 # We don't have to iterate in cache order, but why not.
652 # We don't have to iterate in cache order, but why not.
659 n = self._head
653 n = self._head
660 for i in range(len(self._cache)):
654 for i in range(len(self._cache)):
661 yield n.key
655 yield n.key
662 n = n.next
656 n = n.next
663
657
664 def __getitem__(self, k):
658 def __getitem__(self, k):
665 node = self._cache[k]
659 node = self._cache[k]
666 self._movetohead(node)
660 self._movetohead(node)
667 return node.value
661 return node.value
668
662
669 def __setitem__(self, k, v):
663 def __setitem__(self, k, v):
670 node = self._cache.get(k)
664 node = self._cache.get(k)
671 # Replace existing value and mark as newest.
665 # Replace existing value and mark as newest.
672 if node is not None:
666 if node is not None:
673 node.value = v
667 node.value = v
674 self._movetohead(node)
668 self._movetohead(node)
675 return
669 return
676
670
677 if self._size < self._capacity:
671 if self._size < self._capacity:
678 node = self._addcapacity()
672 node = self._addcapacity()
679 else:
673 else:
680 # Grab the last/oldest item.
674 # Grab the last/oldest item.
681 node = self._head.prev
675 node = self._head.prev
682
676
683 # At capacity. Kill the old entry.
677 # At capacity. Kill the old entry.
684 if node.key is not _notset:
678 if node.key is not _notset:
685 del self._cache[node.key]
679 del self._cache[node.key]
686
680
687 node.key = k
681 node.key = k
688 node.value = v
682 node.value = v
689 self._cache[k] = node
683 self._cache[k] = node
690 # And mark it as newest entry. No need to adjust order since it
684 # And mark it as newest entry. No need to adjust order since it
691 # is already self._head.prev.
685 # is already self._head.prev.
692 self._head = node
686 self._head = node
693
687
694 def __delitem__(self, k):
688 def __delitem__(self, k):
695 node = self._cache.pop(k)
689 node = self._cache.pop(k)
696 node.markempty()
690 node.markempty()
697
691
698 # Temporarily mark as newest item before re-adjusting head to make
692 # Temporarily mark as newest item before re-adjusting head to make
699 # this node the oldest item.
693 # this node the oldest item.
700 self._movetohead(node)
694 self._movetohead(node)
701 self._head = node.next
695 self._head = node.next
702
696
703 # Additional dict methods.
697 # Additional dict methods.
704
698
705 def get(self, k, default=None):
699 def get(self, k, default=None):
706 try:
700 try:
707 return self._cache[k].value
701 return self._cache[k].value
708 except KeyError:
702 except KeyError:
709 return default
703 return default
710
704
711 def clear(self):
705 def clear(self):
712 n = self._head
706 n = self._head
713 while n.key is not _notset:
707 while n.key is not _notset:
714 n.markempty()
708 n.markempty()
715 n = n.next
709 n = n.next
716
710
717 self._cache.clear()
711 self._cache.clear()
718
712
719 def copy(self):
713 def copy(self):
720 result = lrucachedict(self._capacity)
714 result = lrucachedict(self._capacity)
721 n = self._head.prev
715 n = self._head.prev
722 # Iterate in oldest-to-newest order, so the copy has the right ordering
716 # Iterate in oldest-to-newest order, so the copy has the right ordering
723 for i in range(len(self._cache)):
717 for i in range(len(self._cache)):
724 result[n.key] = n.value
718 result[n.key] = n.value
725 n = n.prev
719 n = n.prev
726 return result
720 return result
727
721
728 def _movetohead(self, node):
722 def _movetohead(self, node):
729 """Mark a node as the newest, making it the new head.
723 """Mark a node as the newest, making it the new head.
730
724
731 When a node is accessed, it becomes the freshest entry in the LRU
725 When a node is accessed, it becomes the freshest entry in the LRU
732 list, which is denoted by self._head.
726 list, which is denoted by self._head.
733
727
734 Visually, let's make ``N`` the new head node (* denotes head):
728 Visually, let's make ``N`` the new head node (* denotes head):
735
729
736 previous/oldest <-> head <-> next/next newest
730 previous/oldest <-> head <-> next/next newest
737
731
738 ----<->--- A* ---<->-----
732 ----<->--- A* ---<->-----
739 | |
733 | |
740 E <-> D <-> N <-> C <-> B
734 E <-> D <-> N <-> C <-> B
741
735
742 To:
736 To:
743
737
744 ----<->--- N* ---<->-----
738 ----<->--- N* ---<->-----
745 | |
739 | |
746 E <-> D <-> C <-> B <-> A
740 E <-> D <-> C <-> B <-> A
747
741
748 This requires the following moves:
742 This requires the following moves:
749
743
750 C.next = D (node.prev.next = node.next)
744 C.next = D (node.prev.next = node.next)
751 D.prev = C (node.next.prev = node.prev)
745 D.prev = C (node.next.prev = node.prev)
752 E.next = N (head.prev.next = node)
746 E.next = N (head.prev.next = node)
753 N.prev = E (node.prev = head.prev)
747 N.prev = E (node.prev = head.prev)
754 N.next = A (node.next = head)
748 N.next = A (node.next = head)
755 A.prev = N (head.prev = node)
749 A.prev = N (head.prev = node)
756 """
750 """
757 head = self._head
751 head = self._head
758 # C.next = D
752 # C.next = D
759 node.prev.next = node.next
753 node.prev.next = node.next
760 # D.prev = C
754 # D.prev = C
761 node.next.prev = node.prev
755 node.next.prev = node.prev
762 # N.prev = E
756 # N.prev = E
763 node.prev = head.prev
757 node.prev = head.prev
764 # N.next = A
758 # N.next = A
765 # It is tempting to do just "head" here, however if node is
759 # It is tempting to do just "head" here, however if node is
766 # adjacent to head, this will do bad things.
760 # adjacent to head, this will do bad things.
767 node.next = head.prev.next
761 node.next = head.prev.next
768 # E.next = N
762 # E.next = N
769 node.next.prev = node
763 node.next.prev = node
770 # A.prev = N
764 # A.prev = N
771 node.prev.next = node
765 node.prev.next = node
772
766
773 self._head = node
767 self._head = node
774
768
775 def _addcapacity(self):
769 def _addcapacity(self):
776 """Add a node to the circular linked list.
770 """Add a node to the circular linked list.
777
771
778 The new node is inserted before the head node.
772 The new node is inserted before the head node.
779 """
773 """
780 head = self._head
774 head = self._head
781 node = _lrucachenode()
775 node = _lrucachenode()
782 head.prev.next = node
776 head.prev.next = node
783 node.prev = head.prev
777 node.prev = head.prev
784 node.next = head
778 node.next = head
785 head.prev = node
779 head.prev = node
786 self._size += 1
780 self._size += 1
787 return node
781 return node
788
782
789 def lrucachefunc(func):
783 def lrucachefunc(func):
790 '''cache most recent results of function calls'''
784 '''cache most recent results of function calls'''
791 cache = {}
785 cache = {}
792 order = collections.deque()
786 order = collections.deque()
793 if func.__code__.co_argcount == 1:
787 if func.__code__.co_argcount == 1:
794 def f(arg):
788 def f(arg):
795 if arg not in cache:
789 if arg not in cache:
796 if len(cache) > 20:
790 if len(cache) > 20:
797 del cache[order.popleft()]
791 del cache[order.popleft()]
798 cache[arg] = func(arg)
792 cache[arg] = func(arg)
799 else:
793 else:
800 order.remove(arg)
794 order.remove(arg)
801 order.append(arg)
795 order.append(arg)
802 return cache[arg]
796 return cache[arg]
803 else:
797 else:
804 def f(*args):
798 def f(*args):
805 if args not in cache:
799 if args not in cache:
806 if len(cache) > 20:
800 if len(cache) > 20:
807 del cache[order.popleft()]
801 del cache[order.popleft()]
808 cache[args] = func(*args)
802 cache[args] = func(*args)
809 else:
803 else:
810 order.remove(args)
804 order.remove(args)
811 order.append(args)
805 order.append(args)
812 return cache[args]
806 return cache[args]
813
807
814 return f
808 return f
815
809
816 class propertycache(object):
810 class propertycache(object):
817 def __init__(self, func):
811 def __init__(self, func):
818 self.func = func
812 self.func = func
819 self.name = func.__name__
813 self.name = func.__name__
820 def __get__(self, obj, type=None):
814 def __get__(self, obj, type=None):
821 result = self.func(obj)
815 result = self.func(obj)
822 self.cachevalue(obj, result)
816 self.cachevalue(obj, result)
823 return result
817 return result
824
818
825 def cachevalue(self, obj, value):
819 def cachevalue(self, obj, value):
826 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
820 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
827 obj.__dict__[self.name] = value
821 obj.__dict__[self.name] = value
828
822
829 def pipefilter(s, cmd):
823 def pipefilter(s, cmd):
830 '''filter string S through command CMD, returning its output'''
824 '''filter string S through command CMD, returning its output'''
831 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
825 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
832 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
826 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
833 pout, perr = p.communicate(s)
827 pout, perr = p.communicate(s)
834 return pout
828 return pout
835
829
836 def tempfilter(s, cmd):
830 def tempfilter(s, cmd):
837 '''filter string S through a pair of temporary files with CMD.
831 '''filter string S through a pair of temporary files with CMD.
838 CMD is used as a template to create the real command to be run,
832 CMD is used as a template to create the real command to be run,
839 with the strings INFILE and OUTFILE replaced by the real names of
833 with the strings INFILE and OUTFILE replaced by the real names of
840 the temporary files generated.'''
834 the temporary files generated.'''
841 inname, outname = None, None
835 inname, outname = None, None
842 try:
836 try:
843 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
837 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
844 fp = os.fdopen(infd, pycompat.sysstr('wb'))
838 fp = os.fdopen(infd, pycompat.sysstr('wb'))
845 fp.write(s)
839 fp.write(s)
846 fp.close()
840 fp.close()
847 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
841 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
848 os.close(outfd)
842 os.close(outfd)
849 cmd = cmd.replace('INFILE', inname)
843 cmd = cmd.replace('INFILE', inname)
850 cmd = cmd.replace('OUTFILE', outname)
844 cmd = cmd.replace('OUTFILE', outname)
851 code = os.system(cmd)
845 code = os.system(cmd)
852 if pycompat.sysplatform == 'OpenVMS' and code & 1:
846 if pycompat.sysplatform == 'OpenVMS' and code & 1:
853 code = 0
847 code = 0
854 if code:
848 if code:
855 raise Abort(_("command '%s' failed: %s") %
849 raise Abort(_("command '%s' failed: %s") %
856 (cmd, explainexit(code)))
850 (cmd, explainexit(code)))
857 return readfile(outname)
851 return readfile(outname)
858 finally:
852 finally:
859 try:
853 try:
860 if inname:
854 if inname:
861 os.unlink(inname)
855 os.unlink(inname)
862 except OSError:
856 except OSError:
863 pass
857 pass
864 try:
858 try:
865 if outname:
859 if outname:
866 os.unlink(outname)
860 os.unlink(outname)
867 except OSError:
861 except OSError:
868 pass
862 pass
869
863
870 filtertable = {
864 filtertable = {
871 'tempfile:': tempfilter,
865 'tempfile:': tempfilter,
872 'pipe:': pipefilter,
866 'pipe:': pipefilter,
873 }
867 }
874
868
875 def filter(s, cmd):
869 def filter(s, cmd):
876 "filter a string through a command that transforms its input to its output"
870 "filter a string through a command that transforms its input to its output"
877 for name, fn in filtertable.iteritems():
871 for name, fn in filtertable.iteritems():
878 if cmd.startswith(name):
872 if cmd.startswith(name):
879 return fn(s, cmd[len(name):].lstrip())
873 return fn(s, cmd[len(name):].lstrip())
880 return pipefilter(s, cmd)
874 return pipefilter(s, cmd)
881
875
882 def binary(s):
876 def binary(s):
883 """return true if a string is binary data"""
877 """return true if a string is binary data"""
884 return bool(s and '\0' in s)
878 return bool(s and '\0' in s)
885
879
886 def increasingchunks(source, min=1024, max=65536):
880 def increasingchunks(source, min=1024, max=65536):
887 '''return no less than min bytes per chunk while data remains,
881 '''return no less than min bytes per chunk while data remains,
888 doubling min after each chunk until it reaches max'''
882 doubling min after each chunk until it reaches max'''
889 def log2(x):
883 def log2(x):
890 if not x:
884 if not x:
891 return 0
885 return 0
892 i = 0
886 i = 0
893 while x:
887 while x:
894 x >>= 1
888 x >>= 1
895 i += 1
889 i += 1
896 return i - 1
890 return i - 1
897
891
898 buf = []
892 buf = []
899 blen = 0
893 blen = 0
900 for chunk in source:
894 for chunk in source:
901 buf.append(chunk)
895 buf.append(chunk)
902 blen += len(chunk)
896 blen += len(chunk)
903 if blen >= min:
897 if blen >= min:
904 if min < max:
898 if min < max:
905 min = min << 1
899 min = min << 1
906 nmin = 1 << log2(blen)
900 nmin = 1 << log2(blen)
907 if nmin > min:
901 if nmin > min:
908 min = nmin
902 min = nmin
909 if min > max:
903 if min > max:
910 min = max
904 min = max
911 yield ''.join(buf)
905 yield ''.join(buf)
912 blen = 0
906 blen = 0
913 buf = []
907 buf = []
914 if buf:
908 if buf:
915 yield ''.join(buf)
909 yield ''.join(buf)
916
910
917 Abort = error.Abort
911 Abort = error.Abort
918
912
919 def always(fn):
913 def always(fn):
920 return True
914 return True
921
915
922 def never(fn):
916 def never(fn):
923 return False
917 return False
924
918
925 def nogc(func):
919 def nogc(func):
926 """disable garbage collector
920 """disable garbage collector
927
921
928 Python's garbage collector triggers a GC each time a certain number of
922 Python's garbage collector triggers a GC each time a certain number of
929 container objects (the number being defined by gc.get_threshold()) are
923 container objects (the number being defined by gc.get_threshold()) are
930 allocated even when marked not to be tracked by the collector. Tracking has
924 allocated even when marked not to be tracked by the collector. Tracking has
931 no effect on when GCs are triggered, only on what objects the GC looks
925 no effect on when GCs are triggered, only on what objects the GC looks
932 into. As a workaround, disable GC while building complex (huge)
926 into. As a workaround, disable GC while building complex (huge)
933 containers.
927 containers.
934
928
935 This garbage collector issue have been fixed in 2.7.
929 This garbage collector issue have been fixed in 2.7.
936 """
930 """
937 if sys.version_info >= (2, 7):
931 if sys.version_info >= (2, 7):
938 return func
932 return func
939 def wrapper(*args, **kwargs):
933 def wrapper(*args, **kwargs):
940 gcenabled = gc.isenabled()
934 gcenabled = gc.isenabled()
941 gc.disable()
935 gc.disable()
942 try:
936 try:
943 return func(*args, **kwargs)
937 return func(*args, **kwargs)
944 finally:
938 finally:
945 if gcenabled:
939 if gcenabled:
946 gc.enable()
940 gc.enable()
947 return wrapper
941 return wrapper
948
942
949 def pathto(root, n1, n2):
943 def pathto(root, n1, n2):
950 '''return the relative path from one place to another.
944 '''return the relative path from one place to another.
951 root should use os.sep to separate directories
945 root should use os.sep to separate directories
952 n1 should use os.sep to separate directories
946 n1 should use os.sep to separate directories
953 n2 should use "/" to separate directories
947 n2 should use "/" to separate directories
954 returns an os.sep-separated path.
948 returns an os.sep-separated path.
955
949
956 If n1 is a relative path, it's assumed it's
950 If n1 is a relative path, it's assumed it's
957 relative to root.
951 relative to root.
958 n2 should always be relative to root.
952 n2 should always be relative to root.
959 '''
953 '''
960 if not n1:
954 if not n1:
961 return localpath(n2)
955 return localpath(n2)
962 if os.path.isabs(n1):
956 if os.path.isabs(n1):
963 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
957 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
964 return os.path.join(root, localpath(n2))
958 return os.path.join(root, localpath(n2))
965 n2 = '/'.join((pconvert(root), n2))
959 n2 = '/'.join((pconvert(root), n2))
966 a, b = splitpath(n1), n2.split('/')
960 a, b = splitpath(n1), n2.split('/')
967 a.reverse()
961 a.reverse()
968 b.reverse()
962 b.reverse()
969 while a and b and a[-1] == b[-1]:
963 while a and b and a[-1] == b[-1]:
970 a.pop()
964 a.pop()
971 b.pop()
965 b.pop()
972 b.reverse()
966 b.reverse()
973 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
967 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
974
968
975 def mainfrozen():
969 def mainfrozen():
976 """return True if we are a frozen executable.
970 """return True if we are a frozen executable.
977
971
978 The code supports py2exe (most common, Windows only) and tools/freeze
972 The code supports py2exe (most common, Windows only) and tools/freeze
979 (portable, not much used).
973 (portable, not much used).
980 """
974 """
981 return (safehasattr(sys, "frozen") or # new py2exe
975 return (safehasattr(sys, "frozen") or # new py2exe
982 safehasattr(sys, "importers") or # old py2exe
976 safehasattr(sys, "importers") or # old py2exe
983 imp.is_frozen(u"__main__")) # tools/freeze
977 imp.is_frozen(u"__main__")) # tools/freeze
984
978
985 # the location of data files matching the source code
979 # the location of data files matching the source code
986 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
980 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
987 # executable version (py2exe) doesn't support __file__
981 # executable version (py2exe) doesn't support __file__
988 datapath = os.path.dirname(pycompat.sysexecutable)
982 datapath = os.path.dirname(pycompat.sysexecutable)
989 else:
983 else:
990 datapath = os.path.dirname(pycompat.fsencode(__file__))
984 datapath = os.path.dirname(pycompat.fsencode(__file__))
991
985
992 i18n.setdatapath(datapath)
986 i18n.setdatapath(datapath)
993
987
994 _hgexecutable = None
988 _hgexecutable = None
995
989
996 def hgexecutable():
990 def hgexecutable():
997 """return location of the 'hg' executable.
991 """return location of the 'hg' executable.
998
992
999 Defaults to $HG or 'hg' in the search path.
993 Defaults to $HG or 'hg' in the search path.
1000 """
994 """
1001 if _hgexecutable is None:
995 if _hgexecutable is None:
1002 hg = encoding.environ.get('HG')
996 hg = encoding.environ.get('HG')
1003 mainmod = sys.modules[pycompat.sysstr('__main__')]
997 mainmod = sys.modules[pycompat.sysstr('__main__')]
1004 if hg:
998 if hg:
1005 _sethgexecutable(hg)
999 _sethgexecutable(hg)
1006 elif mainfrozen():
1000 elif mainfrozen():
1007 if getattr(sys, 'frozen', None) == 'macosx_app':
1001 if getattr(sys, 'frozen', None) == 'macosx_app':
1008 # Env variable set by py2app
1002 # Env variable set by py2app
1009 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1003 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1010 else:
1004 else:
1011 _sethgexecutable(pycompat.sysexecutable)
1005 _sethgexecutable(pycompat.sysexecutable)
1012 elif (os.path.basename(
1006 elif (os.path.basename(
1013 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1007 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1014 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1008 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1015 else:
1009 else:
1016 exe = findexe('hg') or os.path.basename(sys.argv[0])
1010 exe = findexe('hg') or os.path.basename(sys.argv[0])
1017 _sethgexecutable(exe)
1011 _sethgexecutable(exe)
1018 return _hgexecutable
1012 return _hgexecutable
1019
1013
1020 def _sethgexecutable(path):
1014 def _sethgexecutable(path):
1021 """set location of the 'hg' executable"""
1015 """set location of the 'hg' executable"""
1022 global _hgexecutable
1016 global _hgexecutable
1023 _hgexecutable = path
1017 _hgexecutable = path
1024
1018
1025 def _isstdout(f):
1019 def _isstdout(f):
1026 fileno = getattr(f, 'fileno', None)
1020 fileno = getattr(f, 'fileno', None)
1027 return fileno and fileno() == sys.__stdout__.fileno()
1021 return fileno and fileno() == sys.__stdout__.fileno()
1028
1022
1029 def shellenviron(environ=None):
1023 def shellenviron(environ=None):
1030 """return environ with optional override, useful for shelling out"""
1024 """return environ with optional override, useful for shelling out"""
1031 def py2shell(val):
1025 def py2shell(val):
1032 'convert python object into string that is useful to shell'
1026 'convert python object into string that is useful to shell'
1033 if val is None or val is False:
1027 if val is None or val is False:
1034 return '0'
1028 return '0'
1035 if val is True:
1029 if val is True:
1036 return '1'
1030 return '1'
1037 return str(val)
1031 return str(val)
1038 env = dict(encoding.environ)
1032 env = dict(encoding.environ)
1039 if environ:
1033 if environ:
1040 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1034 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1041 env['HG'] = hgexecutable()
1035 env['HG'] = hgexecutable()
1042 return env
1036 return env
1043
1037
1044 def system(cmd, environ=None, cwd=None, out=None):
1038 def system(cmd, environ=None, cwd=None, out=None):
1045 '''enhanced shell command execution.
1039 '''enhanced shell command execution.
1046 run with environment maybe modified, maybe in different dir.
1040 run with environment maybe modified, maybe in different dir.
1047
1041
1048 if out is specified, it is assumed to be a file-like object that has a
1042 if out is specified, it is assumed to be a file-like object that has a
1049 write() method. stdout and stderr will be redirected to out.'''
1043 write() method. stdout and stderr will be redirected to out.'''
1050 try:
1044 try:
1051 stdout.flush()
1045 stdout.flush()
1052 except Exception:
1046 except Exception:
1053 pass
1047 pass
1054 cmd = quotecommand(cmd)
1048 cmd = quotecommand(cmd)
1055 env = shellenviron(environ)
1049 env = shellenviron(environ)
1056 if out is None or _isstdout(out):
1050 if out is None or _isstdout(out):
1057 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1051 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1058 env=env, cwd=cwd)
1052 env=env, cwd=cwd)
1059 else:
1053 else:
1060 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1054 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1061 env=env, cwd=cwd, stdout=subprocess.PIPE,
1055 env=env, cwd=cwd, stdout=subprocess.PIPE,
1062 stderr=subprocess.STDOUT)
1056 stderr=subprocess.STDOUT)
1063 for line in iter(proc.stdout.readline, ''):
1057 for line in iter(proc.stdout.readline, ''):
1064 out.write(line)
1058 out.write(line)
1065 proc.wait()
1059 proc.wait()
1066 rc = proc.returncode
1060 rc = proc.returncode
1067 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1061 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1068 rc = 0
1062 rc = 0
1069 return rc
1063 return rc
1070
1064
1071 def checksignature(func):
1065 def checksignature(func):
1072 '''wrap a function with code to check for calling errors'''
1066 '''wrap a function with code to check for calling errors'''
1073 def check(*args, **kwargs):
1067 def check(*args, **kwargs):
1074 try:
1068 try:
1075 return func(*args, **kwargs)
1069 return func(*args, **kwargs)
1076 except TypeError:
1070 except TypeError:
1077 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1071 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1078 raise error.SignatureError
1072 raise error.SignatureError
1079 raise
1073 raise
1080
1074
1081 return check
1075 return check
1082
1076
1083 # a whilelist of known filesystems where hardlink works reliably
1077 # a whilelist of known filesystems where hardlink works reliably
1084 _hardlinkfswhitelist = {
1078 _hardlinkfswhitelist = {
1085 'btrfs',
1079 'btrfs',
1086 'ext2',
1080 'ext2',
1087 'ext3',
1081 'ext3',
1088 'ext4',
1082 'ext4',
1089 'hfs',
1083 'hfs',
1090 'jfs',
1084 'jfs',
1091 'reiserfs',
1085 'reiserfs',
1092 'tmpfs',
1086 'tmpfs',
1093 'ufs',
1087 'ufs',
1094 'xfs',
1088 'xfs',
1095 'zfs',
1089 'zfs',
1096 }
1090 }
1097
1091
1098 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1092 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1099 '''copy a file, preserving mode and optionally other stat info like
1093 '''copy a file, preserving mode and optionally other stat info like
1100 atime/mtime
1094 atime/mtime
1101
1095
1102 checkambig argument is used with filestat, and is useful only if
1096 checkambig argument is used with filestat, and is useful only if
1103 destination file is guarded by any lock (e.g. repo.lock or
1097 destination file is guarded by any lock (e.g. repo.lock or
1104 repo.wlock).
1098 repo.wlock).
1105
1099
1106 copystat and checkambig should be exclusive.
1100 copystat and checkambig should be exclusive.
1107 '''
1101 '''
1108 assert not (copystat and checkambig)
1102 assert not (copystat and checkambig)
1109 oldstat = None
1103 oldstat = None
1110 if os.path.lexists(dest):
1104 if os.path.lexists(dest):
1111 if checkambig:
1105 if checkambig:
1112 oldstat = checkambig and filestat.frompath(dest)
1106 oldstat = checkambig and filestat.frompath(dest)
1113 unlink(dest)
1107 unlink(dest)
1114 if hardlink:
1108 if hardlink:
1115 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1109 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1116 # unless we are confident that dest is on a whitelisted filesystem.
1110 # unless we are confident that dest is on a whitelisted filesystem.
1117 try:
1111 try:
1118 fstype = getfstype(os.path.dirname(dest))
1112 fstype = getfstype(os.path.dirname(dest))
1119 except OSError:
1113 except OSError:
1120 fstype = None
1114 fstype = None
1121 if fstype not in _hardlinkfswhitelist:
1115 if fstype not in _hardlinkfswhitelist:
1122 hardlink = False
1116 hardlink = False
1123 if hardlink:
1117 if hardlink:
1124 try:
1118 try:
1125 oslink(src, dest)
1119 oslink(src, dest)
1126 return
1120 return
1127 except (IOError, OSError):
1121 except (IOError, OSError):
1128 pass # fall back to normal copy
1122 pass # fall back to normal copy
1129 if os.path.islink(src):
1123 if os.path.islink(src):
1130 os.symlink(os.readlink(src), dest)
1124 os.symlink(os.readlink(src), dest)
1131 # copytime is ignored for symlinks, but in general copytime isn't needed
1125 # copytime is ignored for symlinks, but in general copytime isn't needed
1132 # for them anyway
1126 # for them anyway
1133 else:
1127 else:
1134 try:
1128 try:
1135 shutil.copyfile(src, dest)
1129 shutil.copyfile(src, dest)
1136 if copystat:
1130 if copystat:
1137 # copystat also copies mode
1131 # copystat also copies mode
1138 shutil.copystat(src, dest)
1132 shutil.copystat(src, dest)
1139 else:
1133 else:
1140 shutil.copymode(src, dest)
1134 shutil.copymode(src, dest)
1141 if oldstat and oldstat.stat:
1135 if oldstat and oldstat.stat:
1142 newstat = filestat.frompath(dest)
1136 newstat = filestat.frompath(dest)
1143 if newstat.isambig(oldstat):
1137 if newstat.isambig(oldstat):
1144 # stat of copied file is ambiguous to original one
1138 # stat of copied file is ambiguous to original one
1145 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1139 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1146 os.utime(dest, (advanced, advanced))
1140 os.utime(dest, (advanced, advanced))
1147 except shutil.Error as inst:
1141 except shutil.Error as inst:
1148 raise Abort(str(inst))
1142 raise Abort(str(inst))
1149
1143
1150 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1144 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1151 """Copy a directory tree using hardlinks if possible."""
1145 """Copy a directory tree using hardlinks if possible."""
1152 num = 0
1146 num = 0
1153
1147
1154 gettopic = lambda: hardlink and _('linking') or _('copying')
1148 gettopic = lambda: hardlink and _('linking') or _('copying')
1155
1149
1156 if os.path.isdir(src):
1150 if os.path.isdir(src):
1157 if hardlink is None:
1151 if hardlink is None:
1158 hardlink = (os.stat(src).st_dev ==
1152 hardlink = (os.stat(src).st_dev ==
1159 os.stat(os.path.dirname(dst)).st_dev)
1153 os.stat(os.path.dirname(dst)).st_dev)
1160 topic = gettopic()
1154 topic = gettopic()
1161 os.mkdir(dst)
1155 os.mkdir(dst)
1162 for name, kind in listdir(src):
1156 for name, kind in listdir(src):
1163 srcname = os.path.join(src, name)
1157 srcname = os.path.join(src, name)
1164 dstname = os.path.join(dst, name)
1158 dstname = os.path.join(dst, name)
1165 def nprog(t, pos):
1159 def nprog(t, pos):
1166 if pos is not None:
1160 if pos is not None:
1167 return progress(t, pos + num)
1161 return progress(t, pos + num)
1168 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1162 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1169 num += n
1163 num += n
1170 else:
1164 else:
1171 if hardlink is None:
1165 if hardlink is None:
1172 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1166 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1173 os.stat(os.path.dirname(dst)).st_dev)
1167 os.stat(os.path.dirname(dst)).st_dev)
1174 topic = gettopic()
1168 topic = gettopic()
1175
1169
1176 if hardlink:
1170 if hardlink:
1177 try:
1171 try:
1178 oslink(src, dst)
1172 oslink(src, dst)
1179 except (IOError, OSError):
1173 except (IOError, OSError):
1180 hardlink = False
1174 hardlink = False
1181 shutil.copy(src, dst)
1175 shutil.copy(src, dst)
1182 else:
1176 else:
1183 shutil.copy(src, dst)
1177 shutil.copy(src, dst)
1184 num += 1
1178 num += 1
1185 progress(topic, num)
1179 progress(topic, num)
1186 progress(topic, None)
1180 progress(topic, None)
1187
1181
1188 return hardlink, num
1182 return hardlink, num
1189
1183
1190 _winreservednames = b'''con prn aux nul
1184 _winreservednames = b'''con prn aux nul
1191 com1 com2 com3 com4 com5 com6 com7 com8 com9
1185 com1 com2 com3 com4 com5 com6 com7 com8 com9
1192 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1186 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1193 _winreservedchars = ':*?"<>|'
1187 _winreservedchars = ':*?"<>|'
1194 def checkwinfilename(path):
1188 def checkwinfilename(path):
1195 r'''Check that the base-relative path is a valid filename on Windows.
1189 r'''Check that the base-relative path is a valid filename on Windows.
1196 Returns None if the path is ok, or a UI string describing the problem.
1190 Returns None if the path is ok, or a UI string describing the problem.
1197
1191
1198 >>> checkwinfilename("just/a/normal/path")
1192 >>> checkwinfilename("just/a/normal/path")
1199 >>> checkwinfilename("foo/bar/con.xml")
1193 >>> checkwinfilename("foo/bar/con.xml")
1200 "filename contains 'con', which is reserved on Windows"
1194 "filename contains 'con', which is reserved on Windows"
1201 >>> checkwinfilename("foo/con.xml/bar")
1195 >>> checkwinfilename("foo/con.xml/bar")
1202 "filename contains 'con', which is reserved on Windows"
1196 "filename contains 'con', which is reserved on Windows"
1203 >>> checkwinfilename("foo/bar/xml.con")
1197 >>> checkwinfilename("foo/bar/xml.con")
1204 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1198 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1205 "filename contains 'AUX', which is reserved on Windows"
1199 "filename contains 'AUX', which is reserved on Windows"
1206 >>> checkwinfilename("foo/bar/bla:.txt")
1200 >>> checkwinfilename("foo/bar/bla:.txt")
1207 "filename contains ':', which is reserved on Windows"
1201 "filename contains ':', which is reserved on Windows"
1208 >>> checkwinfilename("foo/bar/b\07la.txt")
1202 >>> checkwinfilename("foo/bar/b\07la.txt")
1209 "filename contains '\\x07', which is invalid on Windows"
1203 "filename contains '\\x07', which is invalid on Windows"
1210 >>> checkwinfilename("foo/bar/bla ")
1204 >>> checkwinfilename("foo/bar/bla ")
1211 "filename ends with ' ', which is not allowed on Windows"
1205 "filename ends with ' ', which is not allowed on Windows"
1212 >>> checkwinfilename("../bar")
1206 >>> checkwinfilename("../bar")
1213 >>> checkwinfilename("foo\\")
1207 >>> checkwinfilename("foo\\")
1214 "filename ends with '\\', which is invalid on Windows"
1208 "filename ends with '\\', which is invalid on Windows"
1215 >>> checkwinfilename("foo\\/bar")
1209 >>> checkwinfilename("foo\\/bar")
1216 "directory name ends with '\\', which is invalid on Windows"
1210 "directory name ends with '\\', which is invalid on Windows"
1217 '''
1211 '''
1218 if path.endswith('\\'):
1212 if path.endswith('\\'):
1219 return _("filename ends with '\\', which is invalid on Windows")
1213 return _("filename ends with '\\', which is invalid on Windows")
1220 if '\\/' in path:
1214 if '\\/' in path:
1221 return _("directory name ends with '\\', which is invalid on Windows")
1215 return _("directory name ends with '\\', which is invalid on Windows")
1222 for n in path.replace('\\', '/').split('/'):
1216 for n in path.replace('\\', '/').split('/'):
1223 if not n:
1217 if not n:
1224 continue
1218 continue
1225 for c in _filenamebytestr(n):
1219 for c in _filenamebytestr(n):
1226 if c in _winreservedchars:
1220 if c in _winreservedchars:
1227 return _("filename contains '%s', which is reserved "
1221 return _("filename contains '%s', which is reserved "
1228 "on Windows") % c
1222 "on Windows") % c
1229 if ord(c) <= 31:
1223 if ord(c) <= 31:
1230 return _("filename contains %r, which is invalid "
1224 return _("filename contains %r, which is invalid "
1231 "on Windows") % c
1225 "on Windows") % c
1232 base = n.split('.')[0]
1226 base = n.split('.')[0]
1233 if base and base.lower() in _winreservednames:
1227 if base and base.lower() in _winreservednames:
1234 return _("filename contains '%s', which is reserved "
1228 return _("filename contains '%s', which is reserved "
1235 "on Windows") % base
1229 "on Windows") % base
1236 t = n[-1]
1230 t = n[-1]
1237 if t in '. ' and n not in '..':
1231 if t in '. ' and n not in '..':
1238 return _("filename ends with '%s', which is not allowed "
1232 return _("filename ends with '%s', which is not allowed "
1239 "on Windows") % t
1233 "on Windows") % t
1240
1234
1241 if pycompat.osname == 'nt':
1235 if pycompat.osname == 'nt':
1242 checkosfilename = checkwinfilename
1236 checkosfilename = checkwinfilename
1243 timer = time.clock
1237 timer = time.clock
1244 else:
1238 else:
1245 checkosfilename = platform.checkosfilename
1239 checkosfilename = platform.checkosfilename
1246 timer = time.time
1240 timer = time.time
1247
1241
1248 if safehasattr(time, "perf_counter"):
1242 if safehasattr(time, "perf_counter"):
1249 timer = time.perf_counter
1243 timer = time.perf_counter
1250
1244
1251 def makelock(info, pathname):
1245 def makelock(info, pathname):
1252 try:
1246 try:
1253 return os.symlink(info, pathname)
1247 return os.symlink(info, pathname)
1254 except OSError as why:
1248 except OSError as why:
1255 if why.errno == errno.EEXIST:
1249 if why.errno == errno.EEXIST:
1256 raise
1250 raise
1257 except AttributeError: # no symlink in os
1251 except AttributeError: # no symlink in os
1258 pass
1252 pass
1259
1253
1260 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1254 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1261 os.write(ld, info)
1255 os.write(ld, info)
1262 os.close(ld)
1256 os.close(ld)
1263
1257
1264 def readlock(pathname):
1258 def readlock(pathname):
1265 try:
1259 try:
1266 return os.readlink(pathname)
1260 return os.readlink(pathname)
1267 except OSError as why:
1261 except OSError as why:
1268 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1262 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1269 raise
1263 raise
1270 except AttributeError: # no symlink in os
1264 except AttributeError: # no symlink in os
1271 pass
1265 pass
1272 fp = posixfile(pathname)
1266 fp = posixfile(pathname)
1273 r = fp.read()
1267 r = fp.read()
1274 fp.close()
1268 fp.close()
1275 return r
1269 return r
1276
1270
1277 def fstat(fp):
1271 def fstat(fp):
1278 '''stat file object that may not have fileno method.'''
1272 '''stat file object that may not have fileno method.'''
1279 try:
1273 try:
1280 return os.fstat(fp.fileno())
1274 return os.fstat(fp.fileno())
1281 except AttributeError:
1275 except AttributeError:
1282 return os.stat(fp.name)
1276 return os.stat(fp.name)
1283
1277
1284 # File system features
1278 # File system features
1285
1279
1286 def fscasesensitive(path):
1280 def fscasesensitive(path):
1287 """
1281 """
1288 Return true if the given path is on a case-sensitive filesystem
1282 Return true if the given path is on a case-sensitive filesystem
1289
1283
1290 Requires a path (like /foo/.hg) ending with a foldable final
1284 Requires a path (like /foo/.hg) ending with a foldable final
1291 directory component.
1285 directory component.
1292 """
1286 """
1293 s1 = os.lstat(path)
1287 s1 = os.lstat(path)
1294 d, b = os.path.split(path)
1288 d, b = os.path.split(path)
1295 b2 = b.upper()
1289 b2 = b.upper()
1296 if b == b2:
1290 if b == b2:
1297 b2 = b.lower()
1291 b2 = b.lower()
1298 if b == b2:
1292 if b == b2:
1299 return True # no evidence against case sensitivity
1293 return True # no evidence against case sensitivity
1300 p2 = os.path.join(d, b2)
1294 p2 = os.path.join(d, b2)
1301 try:
1295 try:
1302 s2 = os.lstat(p2)
1296 s2 = os.lstat(p2)
1303 if s2 == s1:
1297 if s2 == s1:
1304 return False
1298 return False
1305 return True
1299 return True
1306 except OSError:
1300 except OSError:
1307 return True
1301 return True
1308
1302
1309 try:
1303 try:
1310 import re2
1304 import re2
1311 _re2 = None
1305 _re2 = None
1312 except ImportError:
1306 except ImportError:
1313 _re2 = False
1307 _re2 = False
1314
1308
1315 class _re(object):
1309 class _re(object):
1316 def _checkre2(self):
1310 def _checkre2(self):
1317 global _re2
1311 global _re2
1318 try:
1312 try:
1319 # check if match works, see issue3964
1313 # check if match works, see issue3964
1320 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1314 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1321 except ImportError:
1315 except ImportError:
1322 _re2 = False
1316 _re2 = False
1323
1317
1324 def compile(self, pat, flags=0):
1318 def compile(self, pat, flags=0):
1325 '''Compile a regular expression, using re2 if possible
1319 '''Compile a regular expression, using re2 if possible
1326
1320
1327 For best performance, use only re2-compatible regexp features. The
1321 For best performance, use only re2-compatible regexp features. The
1328 only flags from the re module that are re2-compatible are
1322 only flags from the re module that are re2-compatible are
1329 IGNORECASE and MULTILINE.'''
1323 IGNORECASE and MULTILINE.'''
1330 if _re2 is None:
1324 if _re2 is None:
1331 self._checkre2()
1325 self._checkre2()
1332 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1326 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1333 if flags & remod.IGNORECASE:
1327 if flags & remod.IGNORECASE:
1334 pat = '(?i)' + pat
1328 pat = '(?i)' + pat
1335 if flags & remod.MULTILINE:
1329 if flags & remod.MULTILINE:
1336 pat = '(?m)' + pat
1330 pat = '(?m)' + pat
1337 try:
1331 try:
1338 return re2.compile(pat)
1332 return re2.compile(pat)
1339 except re2.error:
1333 except re2.error:
1340 pass
1334 pass
1341 return remod.compile(pat, flags)
1335 return remod.compile(pat, flags)
1342
1336
1343 @propertycache
1337 @propertycache
1344 def escape(self):
1338 def escape(self):
1345 '''Return the version of escape corresponding to self.compile.
1339 '''Return the version of escape corresponding to self.compile.
1346
1340
1347 This is imperfect because whether re2 or re is used for a particular
1341 This is imperfect because whether re2 or re is used for a particular
1348 function depends on the flags, etc, but it's the best we can do.
1342 function depends on the flags, etc, but it's the best we can do.
1349 '''
1343 '''
1350 global _re2
1344 global _re2
1351 if _re2 is None:
1345 if _re2 is None:
1352 self._checkre2()
1346 self._checkre2()
1353 if _re2:
1347 if _re2:
1354 return re2.escape
1348 return re2.escape
1355 else:
1349 else:
1356 return remod.escape
1350 return remod.escape
1357
1351
1358 re = _re()
1352 re = _re()
1359
1353
1360 _fspathcache = {}
1354 _fspathcache = {}
1361 def fspath(name, root):
1355 def fspath(name, root):
1362 '''Get name in the case stored in the filesystem
1356 '''Get name in the case stored in the filesystem
1363
1357
1364 The name should be relative to root, and be normcase-ed for efficiency.
1358 The name should be relative to root, and be normcase-ed for efficiency.
1365
1359
1366 Note that this function is unnecessary, and should not be
1360 Note that this function is unnecessary, and should not be
1367 called, for case-sensitive filesystems (simply because it's expensive).
1361 called, for case-sensitive filesystems (simply because it's expensive).
1368
1362
1369 The root should be normcase-ed, too.
1363 The root should be normcase-ed, too.
1370 '''
1364 '''
1371 def _makefspathcacheentry(dir):
1365 def _makefspathcacheentry(dir):
1372 return dict((normcase(n), n) for n in os.listdir(dir))
1366 return dict((normcase(n), n) for n in os.listdir(dir))
1373
1367
1374 seps = pycompat.ossep
1368 seps = pycompat.ossep
1375 if pycompat.osaltsep:
1369 if pycompat.osaltsep:
1376 seps = seps + pycompat.osaltsep
1370 seps = seps + pycompat.osaltsep
1377 # Protect backslashes. This gets silly very quickly.
1371 # Protect backslashes. This gets silly very quickly.
1378 seps.replace('\\','\\\\')
1372 seps.replace('\\','\\\\')
1379 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1373 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1380 dir = os.path.normpath(root)
1374 dir = os.path.normpath(root)
1381 result = []
1375 result = []
1382 for part, sep in pattern.findall(name):
1376 for part, sep in pattern.findall(name):
1383 if sep:
1377 if sep:
1384 result.append(sep)
1378 result.append(sep)
1385 continue
1379 continue
1386
1380
1387 if dir not in _fspathcache:
1381 if dir not in _fspathcache:
1388 _fspathcache[dir] = _makefspathcacheentry(dir)
1382 _fspathcache[dir] = _makefspathcacheentry(dir)
1389 contents = _fspathcache[dir]
1383 contents = _fspathcache[dir]
1390
1384
1391 found = contents.get(part)
1385 found = contents.get(part)
1392 if not found:
1386 if not found:
1393 # retry "once per directory" per "dirstate.walk" which
1387 # retry "once per directory" per "dirstate.walk" which
1394 # may take place for each patches of "hg qpush", for example
1388 # may take place for each patches of "hg qpush", for example
1395 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1389 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1396 found = contents.get(part)
1390 found = contents.get(part)
1397
1391
1398 result.append(found or part)
1392 result.append(found or part)
1399 dir = os.path.join(dir, part)
1393 dir = os.path.join(dir, part)
1400
1394
1401 return ''.join(result)
1395 return ''.join(result)
1402
1396
1403 def getfstype(dirpath):
1397 def getfstype(dirpath):
1404 '''Get the filesystem type name from a directory (best-effort)
1398 '''Get the filesystem type name from a directory (best-effort)
1405
1399
1406 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
1400 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
1407 '''
1401 '''
1408 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
1402 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
1409
1403
1410 def checknlink(testfile):
1404 def checknlink(testfile):
1411 '''check whether hardlink count reporting works properly'''
1405 '''check whether hardlink count reporting works properly'''
1412
1406
1413 # testfile may be open, so we need a separate file for checking to
1407 # testfile may be open, so we need a separate file for checking to
1414 # work around issue2543 (or testfile may get lost on Samba shares)
1408 # work around issue2543 (or testfile may get lost on Samba shares)
1415 f1 = testfile + ".hgtmp1"
1409 f1 = testfile + ".hgtmp1"
1416 if os.path.lexists(f1):
1410 if os.path.lexists(f1):
1417 return False
1411 return False
1418 try:
1412 try:
1419 posixfile(f1, 'w').close()
1413 posixfile(f1, 'w').close()
1420 except IOError:
1414 except IOError:
1421 try:
1415 try:
1422 os.unlink(f1)
1416 os.unlink(f1)
1423 except OSError:
1417 except OSError:
1424 pass
1418 pass
1425 return False
1419 return False
1426
1420
1427 f2 = testfile + ".hgtmp2"
1421 f2 = testfile + ".hgtmp2"
1428 fd = None
1422 fd = None
1429 try:
1423 try:
1430 oslink(f1, f2)
1424 oslink(f1, f2)
1431 # nlinks() may behave differently for files on Windows shares if
1425 # nlinks() may behave differently for files on Windows shares if
1432 # the file is open.
1426 # the file is open.
1433 fd = posixfile(f2)
1427 fd = posixfile(f2)
1434 return nlinks(f2) > 1
1428 return nlinks(f2) > 1
1435 except OSError:
1429 except OSError:
1436 return False
1430 return False
1437 finally:
1431 finally:
1438 if fd is not None:
1432 if fd is not None:
1439 fd.close()
1433 fd.close()
1440 for f in (f1, f2):
1434 for f in (f1, f2):
1441 try:
1435 try:
1442 os.unlink(f)
1436 os.unlink(f)
1443 except OSError:
1437 except OSError:
1444 pass
1438 pass
1445
1439
1446 def endswithsep(path):
1440 def endswithsep(path):
1447 '''Check path ends with os.sep or os.altsep.'''
1441 '''Check path ends with os.sep or os.altsep.'''
1448 return (path.endswith(pycompat.ossep)
1442 return (path.endswith(pycompat.ossep)
1449 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1443 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1450
1444
1451 def splitpath(path):
1445 def splitpath(path):
1452 '''Split path by os.sep.
1446 '''Split path by os.sep.
1453 Note that this function does not use os.altsep because this is
1447 Note that this function does not use os.altsep because this is
1454 an alternative of simple "xxx.split(os.sep)".
1448 an alternative of simple "xxx.split(os.sep)".
1455 It is recommended to use os.path.normpath() before using this
1449 It is recommended to use os.path.normpath() before using this
1456 function if need.'''
1450 function if need.'''
1457 return path.split(pycompat.ossep)
1451 return path.split(pycompat.ossep)
1458
1452
1459 def gui():
1453 def gui():
1460 '''Are we running in a GUI?'''
1454 '''Are we running in a GUI?'''
1461 if pycompat.sysplatform == 'darwin':
1455 if pycompat.sysplatform == 'darwin':
1462 if 'SSH_CONNECTION' in encoding.environ:
1456 if 'SSH_CONNECTION' in encoding.environ:
1463 # handle SSH access to a box where the user is logged in
1457 # handle SSH access to a box where the user is logged in
1464 return False
1458 return False
1465 elif getattr(osutil, 'isgui', None):
1459 elif getattr(osutil, 'isgui', None):
1466 # check if a CoreGraphics session is available
1460 # check if a CoreGraphics session is available
1467 return osutil.isgui()
1461 return osutil.isgui()
1468 else:
1462 else:
1469 # pure build; use a safe default
1463 # pure build; use a safe default
1470 return True
1464 return True
1471 else:
1465 else:
1472 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1466 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1473
1467
1474 def mktempcopy(name, emptyok=False, createmode=None):
1468 def mktempcopy(name, emptyok=False, createmode=None):
1475 """Create a temporary file with the same contents from name
1469 """Create a temporary file with the same contents from name
1476
1470
1477 The permission bits are copied from the original file.
1471 The permission bits are copied from the original file.
1478
1472
1479 If the temporary file is going to be truncated immediately, you
1473 If the temporary file is going to be truncated immediately, you
1480 can use emptyok=True as an optimization.
1474 can use emptyok=True as an optimization.
1481
1475
1482 Returns the name of the temporary file.
1476 Returns the name of the temporary file.
1483 """
1477 """
1484 d, fn = os.path.split(name)
1478 d, fn = os.path.split(name)
1485 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1479 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1486 os.close(fd)
1480 os.close(fd)
1487 # Temporary files are created with mode 0600, which is usually not
1481 # Temporary files are created with mode 0600, which is usually not
1488 # what we want. If the original file already exists, just copy
1482 # what we want. If the original file already exists, just copy
1489 # its mode. Otherwise, manually obey umask.
1483 # its mode. Otherwise, manually obey umask.
1490 copymode(name, temp, createmode)
1484 copymode(name, temp, createmode)
1491 if emptyok:
1485 if emptyok:
1492 return temp
1486 return temp
1493 try:
1487 try:
1494 try:
1488 try:
1495 ifp = posixfile(name, "rb")
1489 ifp = posixfile(name, "rb")
1496 except IOError as inst:
1490 except IOError as inst:
1497 if inst.errno == errno.ENOENT:
1491 if inst.errno == errno.ENOENT:
1498 return temp
1492 return temp
1499 if not getattr(inst, 'filename', None):
1493 if not getattr(inst, 'filename', None):
1500 inst.filename = name
1494 inst.filename = name
1501 raise
1495 raise
1502 ofp = posixfile(temp, "wb")
1496 ofp = posixfile(temp, "wb")
1503 for chunk in filechunkiter(ifp):
1497 for chunk in filechunkiter(ifp):
1504 ofp.write(chunk)
1498 ofp.write(chunk)
1505 ifp.close()
1499 ifp.close()
1506 ofp.close()
1500 ofp.close()
1507 except: # re-raises
1501 except: # re-raises
1508 try: os.unlink(temp)
1502 try: os.unlink(temp)
1509 except OSError: pass
1503 except OSError: pass
1510 raise
1504 raise
1511 return temp
1505 return temp
1512
1506
1513 class filestat(object):
1507 class filestat(object):
1514 """help to exactly detect change of a file
1508 """help to exactly detect change of a file
1515
1509
1516 'stat' attribute is result of 'os.stat()' if specified 'path'
1510 'stat' attribute is result of 'os.stat()' if specified 'path'
1517 exists. Otherwise, it is None. This can avoid preparative
1511 exists. Otherwise, it is None. This can avoid preparative
1518 'exists()' examination on client side of this class.
1512 'exists()' examination on client side of this class.
1519 """
1513 """
1520 def __init__(self, stat):
1514 def __init__(self, stat):
1521 self.stat = stat
1515 self.stat = stat
1522
1516
1523 @classmethod
1517 @classmethod
1524 def frompath(cls, path):
1518 def frompath(cls, path):
1525 try:
1519 try:
1526 stat = os.stat(path)
1520 stat = os.stat(path)
1527 except OSError as err:
1521 except OSError as err:
1528 if err.errno != errno.ENOENT:
1522 if err.errno != errno.ENOENT:
1529 raise
1523 raise
1530 stat = None
1524 stat = None
1531 return cls(stat)
1525 return cls(stat)
1532
1526
1533 @classmethod
1527 @classmethod
1534 def fromfp(cls, fp):
1528 def fromfp(cls, fp):
1535 stat = os.fstat(fp.fileno())
1529 stat = os.fstat(fp.fileno())
1536 return cls(stat)
1530 return cls(stat)
1537
1531
1538 __hash__ = object.__hash__
1532 __hash__ = object.__hash__
1539
1533
1540 def __eq__(self, old):
1534 def __eq__(self, old):
1541 try:
1535 try:
1542 # if ambiguity between stat of new and old file is
1536 # if ambiguity between stat of new and old file is
1543 # avoided, comparison of size, ctime and mtime is enough
1537 # avoided, comparison of size, ctime and mtime is enough
1544 # to exactly detect change of a file regardless of platform
1538 # to exactly detect change of a file regardless of platform
1545 return (self.stat.st_size == old.stat.st_size and
1539 return (self.stat.st_size == old.stat.st_size and
1546 self.stat.st_ctime == old.stat.st_ctime and
1540 self.stat.st_ctime == old.stat.st_ctime and
1547 self.stat.st_mtime == old.stat.st_mtime)
1541 self.stat.st_mtime == old.stat.st_mtime)
1548 except AttributeError:
1542 except AttributeError:
1549 pass
1543 pass
1550 try:
1544 try:
1551 return self.stat is None and old.stat is None
1545 return self.stat is None and old.stat is None
1552 except AttributeError:
1546 except AttributeError:
1553 return False
1547 return False
1554
1548
1555 def isambig(self, old):
1549 def isambig(self, old):
1556 """Examine whether new (= self) stat is ambiguous against old one
1550 """Examine whether new (= self) stat is ambiguous against old one
1557
1551
1558 "S[N]" below means stat of a file at N-th change:
1552 "S[N]" below means stat of a file at N-th change:
1559
1553
1560 - S[n-1].ctime < S[n].ctime: can detect change of a file
1554 - S[n-1].ctime < S[n].ctime: can detect change of a file
1561 - S[n-1].ctime == S[n].ctime
1555 - S[n-1].ctime == S[n].ctime
1562 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1556 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1563 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1557 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1564 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1558 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1565 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1559 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1566
1560
1567 Case (*2) above means that a file was changed twice or more at
1561 Case (*2) above means that a file was changed twice or more at
1568 same time in sec (= S[n-1].ctime), and comparison of timestamp
1562 same time in sec (= S[n-1].ctime), and comparison of timestamp
1569 is ambiguous.
1563 is ambiguous.
1570
1564
1571 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1565 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1572 timestamp is ambiguous".
1566 timestamp is ambiguous".
1573
1567
1574 But advancing mtime only in case (*2) doesn't work as
1568 But advancing mtime only in case (*2) doesn't work as
1575 expected, because naturally advanced S[n].mtime in case (*1)
1569 expected, because naturally advanced S[n].mtime in case (*1)
1576 might be equal to manually advanced S[n-1 or earlier].mtime.
1570 might be equal to manually advanced S[n-1 or earlier].mtime.
1577
1571
1578 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1572 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1579 treated as ambiguous regardless of mtime, to avoid overlooking
1573 treated as ambiguous regardless of mtime, to avoid overlooking
1580 by confliction between such mtime.
1574 by confliction between such mtime.
1581
1575
1582 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1576 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1583 S[n].mtime", even if size of a file isn't changed.
1577 S[n].mtime", even if size of a file isn't changed.
1584 """
1578 """
1585 try:
1579 try:
1586 return (self.stat.st_ctime == old.stat.st_ctime)
1580 return (self.stat.st_ctime == old.stat.st_ctime)
1587 except AttributeError:
1581 except AttributeError:
1588 return False
1582 return False
1589
1583
1590 def avoidambig(self, path, old):
1584 def avoidambig(self, path, old):
1591 """Change file stat of specified path to avoid ambiguity
1585 """Change file stat of specified path to avoid ambiguity
1592
1586
1593 'old' should be previous filestat of 'path'.
1587 'old' should be previous filestat of 'path'.
1594
1588
1595 This skips avoiding ambiguity, if a process doesn't have
1589 This skips avoiding ambiguity, if a process doesn't have
1596 appropriate privileges for 'path'. This returns False in this
1590 appropriate privileges for 'path'. This returns False in this
1597 case.
1591 case.
1598
1592
1599 Otherwise, this returns True, as "ambiguity is avoided".
1593 Otherwise, this returns True, as "ambiguity is avoided".
1600 """
1594 """
1601 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1595 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1602 try:
1596 try:
1603 os.utime(path, (advanced, advanced))
1597 os.utime(path, (advanced, advanced))
1604 except OSError as inst:
1598 except OSError as inst:
1605 if inst.errno == errno.EPERM:
1599 if inst.errno == errno.EPERM:
1606 # utime() on the file created by another user causes EPERM,
1600 # utime() on the file created by another user causes EPERM,
1607 # if a process doesn't have appropriate privileges
1601 # if a process doesn't have appropriate privileges
1608 return False
1602 return False
1609 raise
1603 raise
1610 return True
1604 return True
1611
1605
1612 def __ne__(self, other):
1606 def __ne__(self, other):
1613 return not self == other
1607 return not self == other
1614
1608
1615 class atomictempfile(object):
1609 class atomictempfile(object):
1616 '''writable file object that atomically updates a file
1610 '''writable file object that atomically updates a file
1617
1611
1618 All writes will go to a temporary copy of the original file. Call
1612 All writes will go to a temporary copy of the original file. Call
1619 close() when you are done writing, and atomictempfile will rename
1613 close() when you are done writing, and atomictempfile will rename
1620 the temporary copy to the original name, making the changes
1614 the temporary copy to the original name, making the changes
1621 visible. If the object is destroyed without being closed, all your
1615 visible. If the object is destroyed without being closed, all your
1622 writes are discarded.
1616 writes are discarded.
1623
1617
1624 checkambig argument of constructor is used with filestat, and is
1618 checkambig argument of constructor is used with filestat, and is
1625 useful only if target file is guarded by any lock (e.g. repo.lock
1619 useful only if target file is guarded by any lock (e.g. repo.lock
1626 or repo.wlock).
1620 or repo.wlock).
1627 '''
1621 '''
1628 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1622 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1629 self.__name = name # permanent name
1623 self.__name = name # permanent name
1630 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1624 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1631 createmode=createmode)
1625 createmode=createmode)
1632 self._fp = posixfile(self._tempname, mode)
1626 self._fp = posixfile(self._tempname, mode)
1633 self._checkambig = checkambig
1627 self._checkambig = checkambig
1634
1628
1635 # delegated methods
1629 # delegated methods
1636 self.read = self._fp.read
1630 self.read = self._fp.read
1637 self.write = self._fp.write
1631 self.write = self._fp.write
1638 self.seek = self._fp.seek
1632 self.seek = self._fp.seek
1639 self.tell = self._fp.tell
1633 self.tell = self._fp.tell
1640 self.fileno = self._fp.fileno
1634 self.fileno = self._fp.fileno
1641
1635
1642 def close(self):
1636 def close(self):
1643 if not self._fp.closed:
1637 if not self._fp.closed:
1644 self._fp.close()
1638 self._fp.close()
1645 filename = localpath(self.__name)
1639 filename = localpath(self.__name)
1646 oldstat = self._checkambig and filestat.frompath(filename)
1640 oldstat = self._checkambig and filestat.frompath(filename)
1647 if oldstat and oldstat.stat:
1641 if oldstat and oldstat.stat:
1648 rename(self._tempname, filename)
1642 rename(self._tempname, filename)
1649 newstat = filestat.frompath(filename)
1643 newstat = filestat.frompath(filename)
1650 if newstat.isambig(oldstat):
1644 if newstat.isambig(oldstat):
1651 # stat of changed file is ambiguous to original one
1645 # stat of changed file is ambiguous to original one
1652 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1646 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1653 os.utime(filename, (advanced, advanced))
1647 os.utime(filename, (advanced, advanced))
1654 else:
1648 else:
1655 rename(self._tempname, filename)
1649 rename(self._tempname, filename)
1656
1650
1657 def discard(self):
1651 def discard(self):
1658 if not self._fp.closed:
1652 if not self._fp.closed:
1659 try:
1653 try:
1660 os.unlink(self._tempname)
1654 os.unlink(self._tempname)
1661 except OSError:
1655 except OSError:
1662 pass
1656 pass
1663 self._fp.close()
1657 self._fp.close()
1664
1658
1665 def __del__(self):
1659 def __del__(self):
1666 if safehasattr(self, '_fp'): # constructor actually did something
1660 if safehasattr(self, '_fp'): # constructor actually did something
1667 self.discard()
1661 self.discard()
1668
1662
1669 def __enter__(self):
1663 def __enter__(self):
1670 return self
1664 return self
1671
1665
1672 def __exit__(self, exctype, excvalue, traceback):
1666 def __exit__(self, exctype, excvalue, traceback):
1673 if exctype is not None:
1667 if exctype is not None:
1674 self.discard()
1668 self.discard()
1675 else:
1669 else:
1676 self.close()
1670 self.close()
1677
1671
1678 def unlinkpath(f, ignoremissing=False):
1672 def unlinkpath(f, ignoremissing=False):
1679 """unlink and remove the directory if it is empty"""
1673 """unlink and remove the directory if it is empty"""
1680 if ignoremissing:
1674 if ignoremissing:
1681 tryunlink(f)
1675 tryunlink(f)
1682 else:
1676 else:
1683 unlink(f)
1677 unlink(f)
1684 # try removing directories that might now be empty
1678 # try removing directories that might now be empty
1685 try:
1679 try:
1686 removedirs(os.path.dirname(f))
1680 removedirs(os.path.dirname(f))
1687 except OSError:
1681 except OSError:
1688 pass
1682 pass
1689
1683
1690 def tryunlink(f):
1684 def tryunlink(f):
1691 """Attempt to remove a file, ignoring ENOENT errors."""
1685 """Attempt to remove a file, ignoring ENOENT errors."""
1692 try:
1686 try:
1693 unlink(f)
1687 unlink(f)
1694 except OSError as e:
1688 except OSError as e:
1695 if e.errno != errno.ENOENT:
1689 if e.errno != errno.ENOENT:
1696 raise
1690 raise
1697
1691
1698 def makedirs(name, mode=None, notindexed=False):
1692 def makedirs(name, mode=None, notindexed=False):
1699 """recursive directory creation with parent mode inheritance
1693 """recursive directory creation with parent mode inheritance
1700
1694
1701 Newly created directories are marked as "not to be indexed by
1695 Newly created directories are marked as "not to be indexed by
1702 the content indexing service", if ``notindexed`` is specified
1696 the content indexing service", if ``notindexed`` is specified
1703 for "write" mode access.
1697 for "write" mode access.
1704 """
1698 """
1705 try:
1699 try:
1706 makedir(name, notindexed)
1700 makedir(name, notindexed)
1707 except OSError as err:
1701 except OSError as err:
1708 if err.errno == errno.EEXIST:
1702 if err.errno == errno.EEXIST:
1709 return
1703 return
1710 if err.errno != errno.ENOENT or not name:
1704 if err.errno != errno.ENOENT or not name:
1711 raise
1705 raise
1712 parent = os.path.dirname(os.path.abspath(name))
1706 parent = os.path.dirname(os.path.abspath(name))
1713 if parent == name:
1707 if parent == name:
1714 raise
1708 raise
1715 makedirs(parent, mode, notindexed)
1709 makedirs(parent, mode, notindexed)
1716 try:
1710 try:
1717 makedir(name, notindexed)
1711 makedir(name, notindexed)
1718 except OSError as err:
1712 except OSError as err:
1719 # Catch EEXIST to handle races
1713 # Catch EEXIST to handle races
1720 if err.errno == errno.EEXIST:
1714 if err.errno == errno.EEXIST:
1721 return
1715 return
1722 raise
1716 raise
1723 if mode is not None:
1717 if mode is not None:
1724 os.chmod(name, mode)
1718 os.chmod(name, mode)
1725
1719
1726 def readfile(path):
1720 def readfile(path):
1727 with open(path, 'rb') as fp:
1721 with open(path, 'rb') as fp:
1728 return fp.read()
1722 return fp.read()
1729
1723
1730 def writefile(path, text):
1724 def writefile(path, text):
1731 with open(path, 'wb') as fp:
1725 with open(path, 'wb') as fp:
1732 fp.write(text)
1726 fp.write(text)
1733
1727
1734 def appendfile(path, text):
1728 def appendfile(path, text):
1735 with open(path, 'ab') as fp:
1729 with open(path, 'ab') as fp:
1736 fp.write(text)
1730 fp.write(text)
1737
1731
1738 class chunkbuffer(object):
1732 class chunkbuffer(object):
1739 """Allow arbitrary sized chunks of data to be efficiently read from an
1733 """Allow arbitrary sized chunks of data to be efficiently read from an
1740 iterator over chunks of arbitrary size."""
1734 iterator over chunks of arbitrary size."""
1741
1735
1742 def __init__(self, in_iter):
1736 def __init__(self, in_iter):
1743 """in_iter is the iterator that's iterating over the input chunks."""
1737 """in_iter is the iterator that's iterating over the input chunks."""
1744 def splitbig(chunks):
1738 def splitbig(chunks):
1745 for chunk in chunks:
1739 for chunk in chunks:
1746 if len(chunk) > 2**20:
1740 if len(chunk) > 2**20:
1747 pos = 0
1741 pos = 0
1748 while pos < len(chunk):
1742 while pos < len(chunk):
1749 end = pos + 2 ** 18
1743 end = pos + 2 ** 18
1750 yield chunk[pos:end]
1744 yield chunk[pos:end]
1751 pos = end
1745 pos = end
1752 else:
1746 else:
1753 yield chunk
1747 yield chunk
1754 self.iter = splitbig(in_iter)
1748 self.iter = splitbig(in_iter)
1755 self._queue = collections.deque()
1749 self._queue = collections.deque()
1756 self._chunkoffset = 0
1750 self._chunkoffset = 0
1757
1751
1758 def read(self, l=None):
1752 def read(self, l=None):
1759 """Read L bytes of data from the iterator of chunks of data.
1753 """Read L bytes of data from the iterator of chunks of data.
1760 Returns less than L bytes if the iterator runs dry.
1754 Returns less than L bytes if the iterator runs dry.
1761
1755
1762 If size parameter is omitted, read everything"""
1756 If size parameter is omitted, read everything"""
1763 if l is None:
1757 if l is None:
1764 return ''.join(self.iter)
1758 return ''.join(self.iter)
1765
1759
1766 left = l
1760 left = l
1767 buf = []
1761 buf = []
1768 queue = self._queue
1762 queue = self._queue
1769 while left > 0:
1763 while left > 0:
1770 # refill the queue
1764 # refill the queue
1771 if not queue:
1765 if not queue:
1772 target = 2**18
1766 target = 2**18
1773 for chunk in self.iter:
1767 for chunk in self.iter:
1774 queue.append(chunk)
1768 queue.append(chunk)
1775 target -= len(chunk)
1769 target -= len(chunk)
1776 if target <= 0:
1770 if target <= 0:
1777 break
1771 break
1778 if not queue:
1772 if not queue:
1779 break
1773 break
1780
1774
1781 # The easy way to do this would be to queue.popleft(), modify the
1775 # The easy way to do this would be to queue.popleft(), modify the
1782 # chunk (if necessary), then queue.appendleft(). However, for cases
1776 # chunk (if necessary), then queue.appendleft(). However, for cases
1783 # where we read partial chunk content, this incurs 2 dequeue
1777 # where we read partial chunk content, this incurs 2 dequeue
1784 # mutations and creates a new str for the remaining chunk in the
1778 # mutations and creates a new str for the remaining chunk in the
1785 # queue. Our code below avoids this overhead.
1779 # queue. Our code below avoids this overhead.
1786
1780
1787 chunk = queue[0]
1781 chunk = queue[0]
1788 chunkl = len(chunk)
1782 chunkl = len(chunk)
1789 offset = self._chunkoffset
1783 offset = self._chunkoffset
1790
1784
1791 # Use full chunk.
1785 # Use full chunk.
1792 if offset == 0 and left >= chunkl:
1786 if offset == 0 and left >= chunkl:
1793 left -= chunkl
1787 left -= chunkl
1794 queue.popleft()
1788 queue.popleft()
1795 buf.append(chunk)
1789 buf.append(chunk)
1796 # self._chunkoffset remains at 0.
1790 # self._chunkoffset remains at 0.
1797 continue
1791 continue
1798
1792
1799 chunkremaining = chunkl - offset
1793 chunkremaining = chunkl - offset
1800
1794
1801 # Use all of unconsumed part of chunk.
1795 # Use all of unconsumed part of chunk.
1802 if left >= chunkremaining:
1796 if left >= chunkremaining:
1803 left -= chunkremaining
1797 left -= chunkremaining
1804 queue.popleft()
1798 queue.popleft()
1805 # offset == 0 is enabled by block above, so this won't merely
1799 # offset == 0 is enabled by block above, so this won't merely
1806 # copy via ``chunk[0:]``.
1800 # copy via ``chunk[0:]``.
1807 buf.append(chunk[offset:])
1801 buf.append(chunk[offset:])
1808 self._chunkoffset = 0
1802 self._chunkoffset = 0
1809
1803
1810 # Partial chunk needed.
1804 # Partial chunk needed.
1811 else:
1805 else:
1812 buf.append(chunk[offset:offset + left])
1806 buf.append(chunk[offset:offset + left])
1813 self._chunkoffset += left
1807 self._chunkoffset += left
1814 left -= chunkremaining
1808 left -= chunkremaining
1815
1809
1816 return ''.join(buf)
1810 return ''.join(buf)
1817
1811
1818 def filechunkiter(f, size=131072, limit=None):
1812 def filechunkiter(f, size=131072, limit=None):
1819 """Create a generator that produces the data in the file size
1813 """Create a generator that produces the data in the file size
1820 (default 131072) bytes at a time, up to optional limit (default is
1814 (default 131072) bytes at a time, up to optional limit (default is
1821 to read all data). Chunks may be less than size bytes if the
1815 to read all data). Chunks may be less than size bytes if the
1822 chunk is the last chunk in the file, or the file is a socket or
1816 chunk is the last chunk in the file, or the file is a socket or
1823 some other type of file that sometimes reads less data than is
1817 some other type of file that sometimes reads less data than is
1824 requested."""
1818 requested."""
1825 assert size >= 0
1819 assert size >= 0
1826 assert limit is None or limit >= 0
1820 assert limit is None or limit >= 0
1827 while True:
1821 while True:
1828 if limit is None:
1822 if limit is None:
1829 nbytes = size
1823 nbytes = size
1830 else:
1824 else:
1831 nbytes = min(limit, size)
1825 nbytes = min(limit, size)
1832 s = nbytes and f.read(nbytes)
1826 s = nbytes and f.read(nbytes)
1833 if not s:
1827 if not s:
1834 break
1828 break
1835 if limit:
1829 if limit:
1836 limit -= len(s)
1830 limit -= len(s)
1837 yield s
1831 yield s
1838
1832
1839 def makedate(timestamp=None):
1833 def makedate(timestamp=None):
1840 '''Return a unix timestamp (or the current time) as a (unixtime,
1834 '''Return a unix timestamp (or the current time) as a (unixtime,
1841 offset) tuple based off the local timezone.'''
1835 offset) tuple based off the local timezone.'''
1842 if timestamp is None:
1836 if timestamp is None:
1843 timestamp = time.time()
1837 timestamp = time.time()
1844 if timestamp < 0:
1838 if timestamp < 0:
1845 hint = _("check your clock")
1839 hint = _("check your clock")
1846 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1840 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1847 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1841 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1848 datetime.datetime.fromtimestamp(timestamp))
1842 datetime.datetime.fromtimestamp(timestamp))
1849 tz = delta.days * 86400 + delta.seconds
1843 tz = delta.days * 86400 + delta.seconds
1850 return timestamp, tz
1844 return timestamp, tz
1851
1845
1852 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1846 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1853 """represent a (unixtime, offset) tuple as a localized time.
1847 """represent a (unixtime, offset) tuple as a localized time.
1854 unixtime is seconds since the epoch, and offset is the time zone's
1848 unixtime is seconds since the epoch, and offset is the time zone's
1855 number of seconds away from UTC.
1849 number of seconds away from UTC.
1856
1850
1857 >>> datestr((0, 0))
1851 >>> datestr((0, 0))
1858 'Thu Jan 01 00:00:00 1970 +0000'
1852 'Thu Jan 01 00:00:00 1970 +0000'
1859 >>> datestr((42, 0))
1853 >>> datestr((42, 0))
1860 'Thu Jan 01 00:00:42 1970 +0000'
1854 'Thu Jan 01 00:00:42 1970 +0000'
1861 >>> datestr((-42, 0))
1855 >>> datestr((-42, 0))
1862 'Wed Dec 31 23:59:18 1969 +0000'
1856 'Wed Dec 31 23:59:18 1969 +0000'
1863 >>> datestr((0x7fffffff, 0))
1857 >>> datestr((0x7fffffff, 0))
1864 'Tue Jan 19 03:14:07 2038 +0000'
1858 'Tue Jan 19 03:14:07 2038 +0000'
1865 >>> datestr((-0x80000000, 0))
1859 >>> datestr((-0x80000000, 0))
1866 'Fri Dec 13 20:45:52 1901 +0000'
1860 'Fri Dec 13 20:45:52 1901 +0000'
1867 """
1861 """
1868 t, tz = date or makedate()
1862 t, tz = date or makedate()
1869 if "%1" in format or "%2" in format or "%z" in format:
1863 if "%1" in format or "%2" in format or "%z" in format:
1870 sign = (tz > 0) and "-" or "+"
1864 sign = (tz > 0) and "-" or "+"
1871 minutes = abs(tz) // 60
1865 minutes = abs(tz) // 60
1872 q, r = divmod(minutes, 60)
1866 q, r = divmod(minutes, 60)
1873 format = format.replace("%z", "%1%2")
1867 format = format.replace("%z", "%1%2")
1874 format = format.replace("%1", "%c%02d" % (sign, q))
1868 format = format.replace("%1", "%c%02d" % (sign, q))
1875 format = format.replace("%2", "%02d" % r)
1869 format = format.replace("%2", "%02d" % r)
1876 d = t - tz
1870 d = t - tz
1877 if d > 0x7fffffff:
1871 if d > 0x7fffffff:
1878 d = 0x7fffffff
1872 d = 0x7fffffff
1879 elif d < -0x80000000:
1873 elif d < -0x80000000:
1880 d = -0x80000000
1874 d = -0x80000000
1881 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1875 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1882 # because they use the gmtime() system call which is buggy on Windows
1876 # because they use the gmtime() system call which is buggy on Windows
1883 # for negative values.
1877 # for negative values.
1884 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1878 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1885 s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
1879 s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
1886 return s
1880 return s
1887
1881
1888 def shortdate(date=None):
1882 def shortdate(date=None):
1889 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1883 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1890 return datestr(date, format='%Y-%m-%d')
1884 return datestr(date, format='%Y-%m-%d')
1891
1885
1892 def parsetimezone(s):
1886 def parsetimezone(s):
1893 """find a trailing timezone, if any, in string, and return a
1887 """find a trailing timezone, if any, in string, and return a
1894 (offset, remainder) pair"""
1888 (offset, remainder) pair"""
1895
1889
1896 if s.endswith("GMT") or s.endswith("UTC"):
1890 if s.endswith("GMT") or s.endswith("UTC"):
1897 return 0, s[:-3].rstrip()
1891 return 0, s[:-3].rstrip()
1898
1892
1899 # Unix-style timezones [+-]hhmm
1893 # Unix-style timezones [+-]hhmm
1900 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1894 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1901 sign = (s[-5] == "+") and 1 or -1
1895 sign = (s[-5] == "+") and 1 or -1
1902 hours = int(s[-4:-2])
1896 hours = int(s[-4:-2])
1903 minutes = int(s[-2:])
1897 minutes = int(s[-2:])
1904 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1898 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1905
1899
1906 # ISO8601 trailing Z
1900 # ISO8601 trailing Z
1907 if s.endswith("Z") and s[-2:-1].isdigit():
1901 if s.endswith("Z") and s[-2:-1].isdigit():
1908 return 0, s[:-1]
1902 return 0, s[:-1]
1909
1903
1910 # ISO8601-style [+-]hh:mm
1904 # ISO8601-style [+-]hh:mm
1911 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1905 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1912 s[-5:-3].isdigit() and s[-2:].isdigit()):
1906 s[-5:-3].isdigit() and s[-2:].isdigit()):
1913 sign = (s[-6] == "+") and 1 or -1
1907 sign = (s[-6] == "+") and 1 or -1
1914 hours = int(s[-5:-3])
1908 hours = int(s[-5:-3])
1915 minutes = int(s[-2:])
1909 minutes = int(s[-2:])
1916 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1910 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1917
1911
1918 return None, s
1912 return None, s
1919
1913
1920 def strdate(string, format, defaults=None):
1914 def strdate(string, format, defaults=None):
1921 """parse a localized time string and return a (unixtime, offset) tuple.
1915 """parse a localized time string and return a (unixtime, offset) tuple.
1922 if the string cannot be parsed, ValueError is raised."""
1916 if the string cannot be parsed, ValueError is raised."""
1923 if defaults is None:
1917 if defaults is None:
1924 defaults = {}
1918 defaults = {}
1925
1919
1926 # NOTE: unixtime = localunixtime + offset
1920 # NOTE: unixtime = localunixtime + offset
1927 offset, date = parsetimezone(string)
1921 offset, date = parsetimezone(string)
1928
1922
1929 # add missing elements from defaults
1923 # add missing elements from defaults
1930 usenow = False # default to using biased defaults
1924 usenow = False # default to using biased defaults
1931 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1925 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1932 part = pycompat.bytestr(part)
1926 part = pycompat.bytestr(part)
1933 found = [True for p in part if ("%"+p) in format]
1927 found = [True for p in part if ("%"+p) in format]
1934 if not found:
1928 if not found:
1935 date += "@" + defaults[part][usenow]
1929 date += "@" + defaults[part][usenow]
1936 format += "@%" + part[0]
1930 format += "@%" + part[0]
1937 else:
1931 else:
1938 # We've found a specific time element, less specific time
1932 # We've found a specific time element, less specific time
1939 # elements are relative to today
1933 # elements are relative to today
1940 usenow = True
1934 usenow = True
1941
1935
1942 timetuple = time.strptime(encoding.strfromlocal(date),
1936 timetuple = time.strptime(encoding.strfromlocal(date),
1943 encoding.strfromlocal(format))
1937 encoding.strfromlocal(format))
1944 localunixtime = int(calendar.timegm(timetuple))
1938 localunixtime = int(calendar.timegm(timetuple))
1945 if offset is None:
1939 if offset is None:
1946 # local timezone
1940 # local timezone
1947 unixtime = int(time.mktime(timetuple))
1941 unixtime = int(time.mktime(timetuple))
1948 offset = unixtime - localunixtime
1942 offset = unixtime - localunixtime
1949 else:
1943 else:
1950 unixtime = localunixtime + offset
1944 unixtime = localunixtime + offset
1951 return unixtime, offset
1945 return unixtime, offset
1952
1946
1953 def parsedate(date, formats=None, bias=None):
1947 def parsedate(date, formats=None, bias=None):
1954 """parse a localized date/time and return a (unixtime, offset) tuple.
1948 """parse a localized date/time and return a (unixtime, offset) tuple.
1955
1949
1956 The date may be a "unixtime offset" string or in one of the specified
1950 The date may be a "unixtime offset" string or in one of the specified
1957 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1951 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1958
1952
1959 >>> parsedate(' today ') == parsedate(\
1953 >>> parsedate(' today ') == parsedate(\
1960 datetime.date.today().strftime('%b %d'))
1954 datetime.date.today().strftime('%b %d'))
1961 True
1955 True
1962 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1956 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1963 datetime.timedelta(days=1)\
1957 datetime.timedelta(days=1)\
1964 ).strftime('%b %d'))
1958 ).strftime('%b %d'))
1965 True
1959 True
1966 >>> now, tz = makedate()
1960 >>> now, tz = makedate()
1967 >>> strnow, strtz = parsedate('now')
1961 >>> strnow, strtz = parsedate('now')
1968 >>> (strnow - now) < 1
1962 >>> (strnow - now) < 1
1969 True
1963 True
1970 >>> tz == strtz
1964 >>> tz == strtz
1971 True
1965 True
1972 """
1966 """
1973 if bias is None:
1967 if bias is None:
1974 bias = {}
1968 bias = {}
1975 if not date:
1969 if not date:
1976 return 0, 0
1970 return 0, 0
1977 if isinstance(date, tuple) and len(date) == 2:
1971 if isinstance(date, tuple) and len(date) == 2:
1978 return date
1972 return date
1979 if not formats:
1973 if not formats:
1980 formats = defaultdateformats
1974 formats = defaultdateformats
1981 date = date.strip()
1975 date = date.strip()
1982
1976
1983 if date == 'now' or date == _('now'):
1977 if date == 'now' or date == _('now'):
1984 return makedate()
1978 return makedate()
1985 if date == 'today' or date == _('today'):
1979 if date == 'today' or date == _('today'):
1986 date = datetime.date.today().strftime('%b %d')
1980 date = datetime.date.today().strftime('%b %d')
1987 elif date == 'yesterday' or date == _('yesterday'):
1981 elif date == 'yesterday' or date == _('yesterday'):
1988 date = (datetime.date.today() -
1982 date = (datetime.date.today() -
1989 datetime.timedelta(days=1)).strftime('%b %d')
1983 datetime.timedelta(days=1)).strftime('%b %d')
1990
1984
1991 try:
1985 try:
1992 when, offset = map(int, date.split(' '))
1986 when, offset = map(int, date.split(' '))
1993 except ValueError:
1987 except ValueError:
1994 # fill out defaults
1988 # fill out defaults
1995 now = makedate()
1989 now = makedate()
1996 defaults = {}
1990 defaults = {}
1997 for part in ("d", "mb", "yY", "HI", "M", "S"):
1991 for part in ("d", "mb", "yY", "HI", "M", "S"):
1998 # this piece is for rounding the specific end of unknowns
1992 # this piece is for rounding the specific end of unknowns
1999 b = bias.get(part)
1993 b = bias.get(part)
2000 if b is None:
1994 if b is None:
2001 if part[0:1] in "HMS":
1995 if part[0:1] in "HMS":
2002 b = "00"
1996 b = "00"
2003 else:
1997 else:
2004 b = "0"
1998 b = "0"
2005
1999
2006 # this piece is for matching the generic end to today's date
2000 # this piece is for matching the generic end to today's date
2007 n = datestr(now, "%" + part[0:1])
2001 n = datestr(now, "%" + part[0:1])
2008
2002
2009 defaults[part] = (b, n)
2003 defaults[part] = (b, n)
2010
2004
2011 for format in formats:
2005 for format in formats:
2012 try:
2006 try:
2013 when, offset = strdate(date, format, defaults)
2007 when, offset = strdate(date, format, defaults)
2014 except (ValueError, OverflowError):
2008 except (ValueError, OverflowError):
2015 pass
2009 pass
2016 else:
2010 else:
2017 break
2011 break
2018 else:
2012 else:
2019 raise error.ParseError(_('invalid date: %r') % date)
2013 raise error.ParseError(_('invalid date: %r') % date)
2020 # validate explicit (probably user-specified) date and
2014 # validate explicit (probably user-specified) date and
2021 # time zone offset. values must fit in signed 32 bits for
2015 # time zone offset. values must fit in signed 32 bits for
2022 # current 32-bit linux runtimes. timezones go from UTC-12
2016 # current 32-bit linux runtimes. timezones go from UTC-12
2023 # to UTC+14
2017 # to UTC+14
2024 if when < -0x80000000 or when > 0x7fffffff:
2018 if when < -0x80000000 or when > 0x7fffffff:
2025 raise error.ParseError(_('date exceeds 32 bits: %d') % when)
2019 raise error.ParseError(_('date exceeds 32 bits: %d') % when)
2026 if offset < -50400 or offset > 43200:
2020 if offset < -50400 or offset > 43200:
2027 raise error.ParseError(_('impossible time zone offset: %d') % offset)
2021 raise error.ParseError(_('impossible time zone offset: %d') % offset)
2028 return when, offset
2022 return when, offset
2029
2023
2030 def matchdate(date):
2024 def matchdate(date):
2031 """Return a function that matches a given date match specifier
2025 """Return a function that matches a given date match specifier
2032
2026
2033 Formats include:
2027 Formats include:
2034
2028
2035 '{date}' match a given date to the accuracy provided
2029 '{date}' match a given date to the accuracy provided
2036
2030
2037 '<{date}' on or before a given date
2031 '<{date}' on or before a given date
2038
2032
2039 '>{date}' on or after a given date
2033 '>{date}' on or after a given date
2040
2034
2041 >>> p1 = parsedate("10:29:59")
2035 >>> p1 = parsedate("10:29:59")
2042 >>> p2 = parsedate("10:30:00")
2036 >>> p2 = parsedate("10:30:00")
2043 >>> p3 = parsedate("10:30:59")
2037 >>> p3 = parsedate("10:30:59")
2044 >>> p4 = parsedate("10:31:00")
2038 >>> p4 = parsedate("10:31:00")
2045 >>> p5 = parsedate("Sep 15 10:30:00 1999")
2039 >>> p5 = parsedate("Sep 15 10:30:00 1999")
2046 >>> f = matchdate("10:30")
2040 >>> f = matchdate("10:30")
2047 >>> f(p1[0])
2041 >>> f(p1[0])
2048 False
2042 False
2049 >>> f(p2[0])
2043 >>> f(p2[0])
2050 True
2044 True
2051 >>> f(p3[0])
2045 >>> f(p3[0])
2052 True
2046 True
2053 >>> f(p4[0])
2047 >>> f(p4[0])
2054 False
2048 False
2055 >>> f(p5[0])
2049 >>> f(p5[0])
2056 False
2050 False
2057 """
2051 """
2058
2052
2059 def lower(date):
2053 def lower(date):
2060 d = {'mb': "1", 'd': "1"}
2054 d = {'mb': "1", 'd': "1"}
2061 return parsedate(date, extendeddateformats, d)[0]
2055 return parsedate(date, extendeddateformats, d)[0]
2062
2056
2063 def upper(date):
2057 def upper(date):
2064 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
2058 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
2065 for days in ("31", "30", "29"):
2059 for days in ("31", "30", "29"):
2066 try:
2060 try:
2067 d["d"] = days
2061 d["d"] = days
2068 return parsedate(date, extendeddateformats, d)[0]
2062 return parsedate(date, extendeddateformats, d)[0]
2069 except Abort:
2063 except Abort:
2070 pass
2064 pass
2071 d["d"] = "28"
2065 d["d"] = "28"
2072 return parsedate(date, extendeddateformats, d)[0]
2066 return parsedate(date, extendeddateformats, d)[0]
2073
2067
2074 date = date.strip()
2068 date = date.strip()
2075
2069
2076 if not date:
2070 if not date:
2077 raise Abort(_("dates cannot consist entirely of whitespace"))
2071 raise Abort(_("dates cannot consist entirely of whitespace"))
2078 elif date[0] == "<":
2072 elif date[0] == "<":
2079 if not date[1:]:
2073 if not date[1:]:
2080 raise Abort(_("invalid day spec, use '<DATE'"))
2074 raise Abort(_("invalid day spec, use '<DATE'"))
2081 when = upper(date[1:])
2075 when = upper(date[1:])
2082 return lambda x: x <= when
2076 return lambda x: x <= when
2083 elif date[0] == ">":
2077 elif date[0] == ">":
2084 if not date[1:]:
2078 if not date[1:]:
2085 raise Abort(_("invalid day spec, use '>DATE'"))
2079 raise Abort(_("invalid day spec, use '>DATE'"))
2086 when = lower(date[1:])
2080 when = lower(date[1:])
2087 return lambda x: x >= when
2081 return lambda x: x >= when
2088 elif date[0] == "-":
2082 elif date[0] == "-":
2089 try:
2083 try:
2090 days = int(date[1:])
2084 days = int(date[1:])
2091 except ValueError:
2085 except ValueError:
2092 raise Abort(_("invalid day spec: %s") % date[1:])
2086 raise Abort(_("invalid day spec: %s") % date[1:])
2093 if days < 0:
2087 if days < 0:
2094 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
2088 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
2095 % date[1:])
2089 % date[1:])
2096 when = makedate()[0] - days * 3600 * 24
2090 when = makedate()[0] - days * 3600 * 24
2097 return lambda x: x >= when
2091 return lambda x: x >= when
2098 elif " to " in date:
2092 elif " to " in date:
2099 a, b = date.split(" to ")
2093 a, b = date.split(" to ")
2100 start, stop = lower(a), upper(b)
2094 start, stop = lower(a), upper(b)
2101 return lambda x: x >= start and x <= stop
2095 return lambda x: x >= start and x <= stop
2102 else:
2096 else:
2103 start, stop = lower(date), upper(date)
2097 start, stop = lower(date), upper(date)
2104 return lambda x: x >= start and x <= stop
2098 return lambda x: x >= start and x <= stop
2105
2099
2106 def stringmatcher(pattern, casesensitive=True):
2100 def stringmatcher(pattern, casesensitive=True):
2107 """
2101 """
2108 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2102 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2109 returns the matcher name, pattern, and matcher function.
2103 returns the matcher name, pattern, and matcher function.
2110 missing or unknown prefixes are treated as literal matches.
2104 missing or unknown prefixes are treated as literal matches.
2111
2105
2112 helper for tests:
2106 helper for tests:
2113 >>> def test(pattern, *tests):
2107 >>> def test(pattern, *tests):
2114 ... kind, pattern, matcher = stringmatcher(pattern)
2108 ... kind, pattern, matcher = stringmatcher(pattern)
2115 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2109 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2116 >>> def itest(pattern, *tests):
2110 >>> def itest(pattern, *tests):
2117 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2111 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2118 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2112 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2119
2113
2120 exact matching (no prefix):
2114 exact matching (no prefix):
2121 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2115 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2122 ('literal', 'abcdefg', [False, False, True])
2116 ('literal', 'abcdefg', [False, False, True])
2123
2117
2124 regex matching ('re:' prefix)
2118 regex matching ('re:' prefix)
2125 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2119 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2126 ('re', 'a.+b', [False, False, True])
2120 ('re', 'a.+b', [False, False, True])
2127
2121
2128 force exact matches ('literal:' prefix)
2122 force exact matches ('literal:' prefix)
2129 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2123 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2130 ('literal', 're:foobar', [False, True])
2124 ('literal', 're:foobar', [False, True])
2131
2125
2132 unknown prefixes are ignored and treated as literals
2126 unknown prefixes are ignored and treated as literals
2133 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2127 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2134 ('literal', 'foo:bar', [False, False, True])
2128 ('literal', 'foo:bar', [False, False, True])
2135
2129
2136 case insensitive regex matches
2130 case insensitive regex matches
2137 >>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
2131 >>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
2138 ('re', 'A.+b', [False, False, True])
2132 ('re', 'A.+b', [False, False, True])
2139
2133
2140 case insensitive literal matches
2134 case insensitive literal matches
2141 >>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
2135 >>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
2142 ('literal', 'ABCDEFG', [False, False, True])
2136 ('literal', 'ABCDEFG', [False, False, True])
2143 """
2137 """
2144 if pattern.startswith('re:'):
2138 if pattern.startswith('re:'):
2145 pattern = pattern[3:]
2139 pattern = pattern[3:]
2146 try:
2140 try:
2147 flags = 0
2141 flags = 0
2148 if not casesensitive:
2142 if not casesensitive:
2149 flags = remod.I
2143 flags = remod.I
2150 regex = remod.compile(pattern, flags)
2144 regex = remod.compile(pattern, flags)
2151 except remod.error as e:
2145 except remod.error as e:
2152 raise error.ParseError(_('invalid regular expression: %s')
2146 raise error.ParseError(_('invalid regular expression: %s')
2153 % e)
2147 % e)
2154 return 're', pattern, regex.search
2148 return 're', pattern, regex.search
2155 elif pattern.startswith('literal:'):
2149 elif pattern.startswith('literal:'):
2156 pattern = pattern[8:]
2150 pattern = pattern[8:]
2157
2151
2158 match = pattern.__eq__
2152 match = pattern.__eq__
2159
2153
2160 if not casesensitive:
2154 if not casesensitive:
2161 ipat = encoding.lower(pattern)
2155 ipat = encoding.lower(pattern)
2162 match = lambda s: ipat == encoding.lower(s)
2156 match = lambda s: ipat == encoding.lower(s)
2163 return 'literal', pattern, match
2157 return 'literal', pattern, match
2164
2158
2165 def shortuser(user):
2159 def shortuser(user):
2166 """Return a short representation of a user name or email address."""
2160 """Return a short representation of a user name or email address."""
2167 f = user.find('@')
2161 f = user.find('@')
2168 if f >= 0:
2162 if f >= 0:
2169 user = user[:f]
2163 user = user[:f]
2170 f = user.find('<')
2164 f = user.find('<')
2171 if f >= 0:
2165 if f >= 0:
2172 user = user[f + 1:]
2166 user = user[f + 1:]
2173 f = user.find(' ')
2167 f = user.find(' ')
2174 if f >= 0:
2168 if f >= 0:
2175 user = user[:f]
2169 user = user[:f]
2176 f = user.find('.')
2170 f = user.find('.')
2177 if f >= 0:
2171 if f >= 0:
2178 user = user[:f]
2172 user = user[:f]
2179 return user
2173 return user
2180
2174
2181 def emailuser(user):
2175 def emailuser(user):
2182 """Return the user portion of an email address."""
2176 """Return the user portion of an email address."""
2183 f = user.find('@')
2177 f = user.find('@')
2184 if f >= 0:
2178 if f >= 0:
2185 user = user[:f]
2179 user = user[:f]
2186 f = user.find('<')
2180 f = user.find('<')
2187 if f >= 0:
2181 if f >= 0:
2188 user = user[f + 1:]
2182 user = user[f + 1:]
2189 return user
2183 return user
2190
2184
2191 def email(author):
2185 def email(author):
2192 '''get email of author.'''
2186 '''get email of author.'''
2193 r = author.find('>')
2187 r = author.find('>')
2194 if r == -1:
2188 if r == -1:
2195 r = None
2189 r = None
2196 return author[author.find('<') + 1:r]
2190 return author[author.find('<') + 1:r]
2197
2191
2198 def ellipsis(text, maxlength=400):
2192 def ellipsis(text, maxlength=400):
2199 """Trim string to at most maxlength (default: 400) columns in display."""
2193 """Trim string to at most maxlength (default: 400) columns in display."""
2200 return encoding.trim(text, maxlength, ellipsis='...')
2194 return encoding.trim(text, maxlength, ellipsis='...')
2201
2195
2202 def unitcountfn(*unittable):
2196 def unitcountfn(*unittable):
2203 '''return a function that renders a readable count of some quantity'''
2197 '''return a function that renders a readable count of some quantity'''
2204
2198
2205 def go(count):
2199 def go(count):
2206 for multiplier, divisor, format in unittable:
2200 for multiplier, divisor, format in unittable:
2207 if abs(count) >= divisor * multiplier:
2201 if abs(count) >= divisor * multiplier:
2208 return format % (count / float(divisor))
2202 return format % (count / float(divisor))
2209 return unittable[-1][2] % count
2203 return unittable[-1][2] % count
2210
2204
2211 return go
2205 return go
2212
2206
2213 def processlinerange(fromline, toline):
2207 def processlinerange(fromline, toline):
2214 """Check that linerange <fromline>:<toline> makes sense and return a
2208 """Check that linerange <fromline>:<toline> makes sense and return a
2215 0-based range.
2209 0-based range.
2216
2210
2217 >>> processlinerange(10, 20)
2211 >>> processlinerange(10, 20)
2218 (9, 20)
2212 (9, 20)
2219 >>> processlinerange(2, 1)
2213 >>> processlinerange(2, 1)
2220 Traceback (most recent call last):
2214 Traceback (most recent call last):
2221 ...
2215 ...
2222 ParseError: line range must be positive
2216 ParseError: line range must be positive
2223 >>> processlinerange(0, 5)
2217 >>> processlinerange(0, 5)
2224 Traceback (most recent call last):
2218 Traceback (most recent call last):
2225 ...
2219 ...
2226 ParseError: fromline must be strictly positive
2220 ParseError: fromline must be strictly positive
2227 """
2221 """
2228 if toline - fromline < 0:
2222 if toline - fromline < 0:
2229 raise error.ParseError(_("line range must be positive"))
2223 raise error.ParseError(_("line range must be positive"))
2230 if fromline < 1:
2224 if fromline < 1:
2231 raise error.ParseError(_("fromline must be strictly positive"))
2225 raise error.ParseError(_("fromline must be strictly positive"))
2232 return fromline - 1, toline
2226 return fromline - 1, toline
2233
2227
2234 bytecount = unitcountfn(
2228 bytecount = unitcountfn(
2235 (100, 1 << 30, _('%.0f GB')),
2229 (100, 1 << 30, _('%.0f GB')),
2236 (10, 1 << 30, _('%.1f GB')),
2230 (10, 1 << 30, _('%.1f GB')),
2237 (1, 1 << 30, _('%.2f GB')),
2231 (1, 1 << 30, _('%.2f GB')),
2238 (100, 1 << 20, _('%.0f MB')),
2232 (100, 1 << 20, _('%.0f MB')),
2239 (10, 1 << 20, _('%.1f MB')),
2233 (10, 1 << 20, _('%.1f MB')),
2240 (1, 1 << 20, _('%.2f MB')),
2234 (1, 1 << 20, _('%.2f MB')),
2241 (100, 1 << 10, _('%.0f KB')),
2235 (100, 1 << 10, _('%.0f KB')),
2242 (10, 1 << 10, _('%.1f KB')),
2236 (10, 1 << 10, _('%.1f KB')),
2243 (1, 1 << 10, _('%.2f KB')),
2237 (1, 1 << 10, _('%.2f KB')),
2244 (1, 1, _('%.0f bytes')),
2238 (1, 1, _('%.0f bytes')),
2245 )
2239 )
2246
2240
2247 # Matches a single EOL which can either be a CRLF where repeated CR
2241 # Matches a single EOL which can either be a CRLF where repeated CR
2248 # are removed or a LF. We do not care about old Macintosh files, so a
2242 # are removed or a LF. We do not care about old Macintosh files, so a
2249 # stray CR is an error.
2243 # stray CR is an error.
2250 _eolre = remod.compile(br'\r*\n')
2244 _eolre = remod.compile(br'\r*\n')
2251
2245
2252 def tolf(s):
2246 def tolf(s):
2253 return _eolre.sub('\n', s)
2247 return _eolre.sub('\n', s)
2254
2248
2255 def tocrlf(s):
2249 def tocrlf(s):
2256 return _eolre.sub('\r\n', s)
2250 return _eolre.sub('\r\n', s)
2257
2251
2258 if pycompat.oslinesep == '\r\n':
2252 if pycompat.oslinesep == '\r\n':
2259 tonativeeol = tocrlf
2253 tonativeeol = tocrlf
2260 fromnativeeol = tolf
2254 fromnativeeol = tolf
2261 else:
2255 else:
2262 tonativeeol = pycompat.identity
2256 tonativeeol = pycompat.identity
2263 fromnativeeol = pycompat.identity
2257 fromnativeeol = pycompat.identity
2264
2258
2265 def escapestr(s):
2259 def escapestr(s):
2266 # call underlying function of s.encode('string_escape') directly for
2260 # call underlying function of s.encode('string_escape') directly for
2267 # Python 3 compatibility
2261 # Python 3 compatibility
2268 return codecs.escape_encode(s)[0]
2262 return codecs.escape_encode(s)[0]
2269
2263
2270 def unescapestr(s):
2264 def unescapestr(s):
2271 return codecs.escape_decode(s)[0]
2265 return codecs.escape_decode(s)[0]
2272
2266
2273 def uirepr(s):
2267 def uirepr(s):
2274 # Avoid double backslash in Windows path repr()
2268 # Avoid double backslash in Windows path repr()
2275 return repr(s).replace('\\\\', '\\')
2269 return repr(s).replace('\\\\', '\\')
2276
2270
2277 # delay import of textwrap
2271 # delay import of textwrap
2278 def MBTextWrapper(**kwargs):
2272 def MBTextWrapper(**kwargs):
2279 class tw(textwrap.TextWrapper):
2273 class tw(textwrap.TextWrapper):
2280 """
2274 """
2281 Extend TextWrapper for width-awareness.
2275 Extend TextWrapper for width-awareness.
2282
2276
2283 Neither number of 'bytes' in any encoding nor 'characters' is
2277 Neither number of 'bytes' in any encoding nor 'characters' is
2284 appropriate to calculate terminal columns for specified string.
2278 appropriate to calculate terminal columns for specified string.
2285
2279
2286 Original TextWrapper implementation uses built-in 'len()' directly,
2280 Original TextWrapper implementation uses built-in 'len()' directly,
2287 so overriding is needed to use width information of each characters.
2281 so overriding is needed to use width information of each characters.
2288
2282
2289 In addition, characters classified into 'ambiguous' width are
2283 In addition, characters classified into 'ambiguous' width are
2290 treated as wide in East Asian area, but as narrow in other.
2284 treated as wide in East Asian area, but as narrow in other.
2291
2285
2292 This requires use decision to determine width of such characters.
2286 This requires use decision to determine width of such characters.
2293 """
2287 """
2294 def _cutdown(self, ucstr, space_left):
2288 def _cutdown(self, ucstr, space_left):
2295 l = 0
2289 l = 0
2296 colwidth = encoding.ucolwidth
2290 colwidth = encoding.ucolwidth
2297 for i in xrange(len(ucstr)):
2291 for i in xrange(len(ucstr)):
2298 l += colwidth(ucstr[i])
2292 l += colwidth(ucstr[i])
2299 if space_left < l:
2293 if space_left < l:
2300 return (ucstr[:i], ucstr[i:])
2294 return (ucstr[:i], ucstr[i:])
2301 return ucstr, ''
2295 return ucstr, ''
2302
2296
2303 # overriding of base class
2297 # overriding of base class
2304 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2298 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2305 space_left = max(width - cur_len, 1)
2299 space_left = max(width - cur_len, 1)
2306
2300
2307 if self.break_long_words:
2301 if self.break_long_words:
2308 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2302 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2309 cur_line.append(cut)
2303 cur_line.append(cut)
2310 reversed_chunks[-1] = res
2304 reversed_chunks[-1] = res
2311 elif not cur_line:
2305 elif not cur_line:
2312 cur_line.append(reversed_chunks.pop())
2306 cur_line.append(reversed_chunks.pop())
2313
2307
2314 # this overriding code is imported from TextWrapper of Python 2.6
2308 # this overriding code is imported from TextWrapper of Python 2.6
2315 # to calculate columns of string by 'encoding.ucolwidth()'
2309 # to calculate columns of string by 'encoding.ucolwidth()'
2316 def _wrap_chunks(self, chunks):
2310 def _wrap_chunks(self, chunks):
2317 colwidth = encoding.ucolwidth
2311 colwidth = encoding.ucolwidth
2318
2312
2319 lines = []
2313 lines = []
2320 if self.width <= 0:
2314 if self.width <= 0:
2321 raise ValueError("invalid width %r (must be > 0)" % self.width)
2315 raise ValueError("invalid width %r (must be > 0)" % self.width)
2322
2316
2323 # Arrange in reverse order so items can be efficiently popped
2317 # Arrange in reverse order so items can be efficiently popped
2324 # from a stack of chucks.
2318 # from a stack of chucks.
2325 chunks.reverse()
2319 chunks.reverse()
2326
2320
2327 while chunks:
2321 while chunks:
2328
2322
2329 # Start the list of chunks that will make up the current line.
2323 # Start the list of chunks that will make up the current line.
2330 # cur_len is just the length of all the chunks in cur_line.
2324 # cur_len is just the length of all the chunks in cur_line.
2331 cur_line = []
2325 cur_line = []
2332 cur_len = 0
2326 cur_len = 0
2333
2327
2334 # Figure out which static string will prefix this line.
2328 # Figure out which static string will prefix this line.
2335 if lines:
2329 if lines:
2336 indent = self.subsequent_indent
2330 indent = self.subsequent_indent
2337 else:
2331 else:
2338 indent = self.initial_indent
2332 indent = self.initial_indent
2339
2333
2340 # Maximum width for this line.
2334 # Maximum width for this line.
2341 width = self.width - len(indent)
2335 width = self.width - len(indent)
2342
2336
2343 # First chunk on line is whitespace -- drop it, unless this
2337 # First chunk on line is whitespace -- drop it, unless this
2344 # is the very beginning of the text (i.e. no lines started yet).
2338 # is the very beginning of the text (i.e. no lines started yet).
2345 if self.drop_whitespace and chunks[-1].strip() == r'' and lines:
2339 if self.drop_whitespace and chunks[-1].strip() == r'' and lines:
2346 del chunks[-1]
2340 del chunks[-1]
2347
2341
2348 while chunks:
2342 while chunks:
2349 l = colwidth(chunks[-1])
2343 l = colwidth(chunks[-1])
2350
2344
2351 # Can at least squeeze this chunk onto the current line.
2345 # Can at least squeeze this chunk onto the current line.
2352 if cur_len + l <= width:
2346 if cur_len + l <= width:
2353 cur_line.append(chunks.pop())
2347 cur_line.append(chunks.pop())
2354 cur_len += l
2348 cur_len += l
2355
2349
2356 # Nope, this line is full.
2350 # Nope, this line is full.
2357 else:
2351 else:
2358 break
2352 break
2359
2353
2360 # The current line is full, and the next chunk is too big to
2354 # The current line is full, and the next chunk is too big to
2361 # fit on *any* line (not just this one).
2355 # fit on *any* line (not just this one).
2362 if chunks and colwidth(chunks[-1]) > width:
2356 if chunks and colwidth(chunks[-1]) > width:
2363 self._handle_long_word(chunks, cur_line, cur_len, width)
2357 self._handle_long_word(chunks, cur_line, cur_len, width)
2364
2358
2365 # If the last chunk on this line is all whitespace, drop it.
2359 # If the last chunk on this line is all whitespace, drop it.
2366 if (self.drop_whitespace and
2360 if (self.drop_whitespace and
2367 cur_line and cur_line[-1].strip() == r''):
2361 cur_line and cur_line[-1].strip() == r''):
2368 del cur_line[-1]
2362 del cur_line[-1]
2369
2363
2370 # Convert current line back to a string and store it in list
2364 # Convert current line back to a string and store it in list
2371 # of all lines (return value).
2365 # of all lines (return value).
2372 if cur_line:
2366 if cur_line:
2373 lines.append(indent + r''.join(cur_line))
2367 lines.append(indent + r''.join(cur_line))
2374
2368
2375 return lines
2369 return lines
2376
2370
2377 global MBTextWrapper
2371 global MBTextWrapper
2378 MBTextWrapper = tw
2372 MBTextWrapper = tw
2379 return tw(**kwargs)
2373 return tw(**kwargs)
2380
2374
2381 def wrap(line, width, initindent='', hangindent=''):
2375 def wrap(line, width, initindent='', hangindent=''):
2382 maxindent = max(len(hangindent), len(initindent))
2376 maxindent = max(len(hangindent), len(initindent))
2383 if width <= maxindent:
2377 if width <= maxindent:
2384 # adjust for weird terminal size
2378 # adjust for weird terminal size
2385 width = max(78, maxindent + 1)
2379 width = max(78, maxindent + 1)
2386 line = line.decode(pycompat.sysstr(encoding.encoding),
2380 line = line.decode(pycompat.sysstr(encoding.encoding),
2387 pycompat.sysstr(encoding.encodingmode))
2381 pycompat.sysstr(encoding.encodingmode))
2388 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2382 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2389 pycompat.sysstr(encoding.encodingmode))
2383 pycompat.sysstr(encoding.encodingmode))
2390 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2384 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2391 pycompat.sysstr(encoding.encodingmode))
2385 pycompat.sysstr(encoding.encodingmode))
2392 wrapper = MBTextWrapper(width=width,
2386 wrapper = MBTextWrapper(width=width,
2393 initial_indent=initindent,
2387 initial_indent=initindent,
2394 subsequent_indent=hangindent)
2388 subsequent_indent=hangindent)
2395 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2389 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2396
2390
2397 if (pyplatform.python_implementation() == 'CPython' and
2391 if (pyplatform.python_implementation() == 'CPython' and
2398 sys.version_info < (3, 0)):
2392 sys.version_info < (3, 0)):
2399 # There is an issue in CPython that some IO methods do not handle EINTR
2393 # There is an issue in CPython that some IO methods do not handle EINTR
2400 # correctly. The following table shows what CPython version (and functions)
2394 # correctly. The following table shows what CPython version (and functions)
2401 # are affected (buggy: has the EINTR bug, okay: otherwise):
2395 # are affected (buggy: has the EINTR bug, okay: otherwise):
2402 #
2396 #
2403 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2397 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2404 # --------------------------------------------------
2398 # --------------------------------------------------
2405 # fp.__iter__ | buggy | buggy | okay
2399 # fp.__iter__ | buggy | buggy | okay
2406 # fp.read* | buggy | okay [1] | okay
2400 # fp.read* | buggy | okay [1] | okay
2407 #
2401 #
2408 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2402 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2409 #
2403 #
2410 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2404 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2411 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2405 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2412 #
2406 #
2413 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2407 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2414 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2408 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2415 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2409 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2416 # fp.__iter__ but not other fp.read* methods.
2410 # fp.__iter__ but not other fp.read* methods.
2417 #
2411 #
2418 # On modern systems like Linux, the "read" syscall cannot be interrupted
2412 # On modern systems like Linux, the "read" syscall cannot be interrupted
2419 # when reading "fast" files like on-disk files. So the EINTR issue only
2413 # when reading "fast" files like on-disk files. So the EINTR issue only
2420 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2414 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2421 # files approximately as "fast" files and use the fast (unsafe) code path,
2415 # files approximately as "fast" files and use the fast (unsafe) code path,
2422 # to minimize the performance impact.
2416 # to minimize the performance impact.
2423 if sys.version_info >= (2, 7, 4):
2417 if sys.version_info >= (2, 7, 4):
2424 # fp.readline deals with EINTR correctly, use it as a workaround.
2418 # fp.readline deals with EINTR correctly, use it as a workaround.
2425 def _safeiterfile(fp):
2419 def _safeiterfile(fp):
2426 return iter(fp.readline, '')
2420 return iter(fp.readline, '')
2427 else:
2421 else:
2428 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2422 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2429 # note: this may block longer than necessary because of bufsize.
2423 # note: this may block longer than necessary because of bufsize.
2430 def _safeiterfile(fp, bufsize=4096):
2424 def _safeiterfile(fp, bufsize=4096):
2431 fd = fp.fileno()
2425 fd = fp.fileno()
2432 line = ''
2426 line = ''
2433 while True:
2427 while True:
2434 try:
2428 try:
2435 buf = os.read(fd, bufsize)
2429 buf = os.read(fd, bufsize)
2436 except OSError as ex:
2430 except OSError as ex:
2437 # os.read only raises EINTR before any data is read
2431 # os.read only raises EINTR before any data is read
2438 if ex.errno == errno.EINTR:
2432 if ex.errno == errno.EINTR:
2439 continue
2433 continue
2440 else:
2434 else:
2441 raise
2435 raise
2442 line += buf
2436 line += buf
2443 if '\n' in buf:
2437 if '\n' in buf:
2444 splitted = line.splitlines(True)
2438 splitted = line.splitlines(True)
2445 line = ''
2439 line = ''
2446 for l in splitted:
2440 for l in splitted:
2447 if l[-1] == '\n':
2441 if l[-1] == '\n':
2448 yield l
2442 yield l
2449 else:
2443 else:
2450 line = l
2444 line = l
2451 if not buf:
2445 if not buf:
2452 break
2446 break
2453 if line:
2447 if line:
2454 yield line
2448 yield line
2455
2449
2456 def iterfile(fp):
2450 def iterfile(fp):
2457 fastpath = True
2451 fastpath = True
2458 if type(fp) is file:
2452 if type(fp) is file:
2459 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2453 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2460 if fastpath:
2454 if fastpath:
2461 return fp
2455 return fp
2462 else:
2456 else:
2463 return _safeiterfile(fp)
2457 return _safeiterfile(fp)
2464 else:
2458 else:
2465 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2459 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2466 def iterfile(fp):
2460 def iterfile(fp):
2467 return fp
2461 return fp
2468
2462
2469 def iterlines(iterator):
2463 def iterlines(iterator):
2470 for chunk in iterator:
2464 for chunk in iterator:
2471 for line in chunk.splitlines():
2465 for line in chunk.splitlines():
2472 yield line
2466 yield line
2473
2467
2474 def expandpath(path):
2468 def expandpath(path):
2475 return os.path.expanduser(os.path.expandvars(path))
2469 return os.path.expanduser(os.path.expandvars(path))
2476
2470
2477 def hgcmd():
2471 def hgcmd():
2478 """Return the command used to execute current hg
2472 """Return the command used to execute current hg
2479
2473
2480 This is different from hgexecutable() because on Windows we want
2474 This is different from hgexecutable() because on Windows we want
2481 to avoid things opening new shell windows like batch files, so we
2475 to avoid things opening new shell windows like batch files, so we
2482 get either the python call or current executable.
2476 get either the python call or current executable.
2483 """
2477 """
2484 if mainfrozen():
2478 if mainfrozen():
2485 if getattr(sys, 'frozen', None) == 'macosx_app':
2479 if getattr(sys, 'frozen', None) == 'macosx_app':
2486 # Env variable set by py2app
2480 # Env variable set by py2app
2487 return [encoding.environ['EXECUTABLEPATH']]
2481 return [encoding.environ['EXECUTABLEPATH']]
2488 else:
2482 else:
2489 return [pycompat.sysexecutable]
2483 return [pycompat.sysexecutable]
2490 return gethgcmd()
2484 return gethgcmd()
2491
2485
2492 def rundetached(args, condfn):
2486 def rundetached(args, condfn):
2493 """Execute the argument list in a detached process.
2487 """Execute the argument list in a detached process.
2494
2488
2495 condfn is a callable which is called repeatedly and should return
2489 condfn is a callable which is called repeatedly and should return
2496 True once the child process is known to have started successfully.
2490 True once the child process is known to have started successfully.
2497 At this point, the child process PID is returned. If the child
2491 At this point, the child process PID is returned. If the child
2498 process fails to start or finishes before condfn() evaluates to
2492 process fails to start or finishes before condfn() evaluates to
2499 True, return -1.
2493 True, return -1.
2500 """
2494 """
2501 # Windows case is easier because the child process is either
2495 # Windows case is easier because the child process is either
2502 # successfully starting and validating the condition or exiting
2496 # successfully starting and validating the condition or exiting
2503 # on failure. We just poll on its PID. On Unix, if the child
2497 # on failure. We just poll on its PID. On Unix, if the child
2504 # process fails to start, it will be left in a zombie state until
2498 # process fails to start, it will be left in a zombie state until
2505 # the parent wait on it, which we cannot do since we expect a long
2499 # the parent wait on it, which we cannot do since we expect a long
2506 # running process on success. Instead we listen for SIGCHLD telling
2500 # running process on success. Instead we listen for SIGCHLD telling
2507 # us our child process terminated.
2501 # us our child process terminated.
2508 terminated = set()
2502 terminated = set()
2509 def handler(signum, frame):
2503 def handler(signum, frame):
2510 terminated.add(os.wait())
2504 terminated.add(os.wait())
2511 prevhandler = None
2505 prevhandler = None
2512 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2506 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2513 if SIGCHLD is not None:
2507 if SIGCHLD is not None:
2514 prevhandler = signal.signal(SIGCHLD, handler)
2508 prevhandler = signal.signal(SIGCHLD, handler)
2515 try:
2509 try:
2516 pid = spawndetached(args)
2510 pid = spawndetached(args)
2517 while not condfn():
2511 while not condfn():
2518 if ((pid in terminated or not testpid(pid))
2512 if ((pid in terminated or not testpid(pid))
2519 and not condfn()):
2513 and not condfn()):
2520 return -1
2514 return -1
2521 time.sleep(0.1)
2515 time.sleep(0.1)
2522 return pid
2516 return pid
2523 finally:
2517 finally:
2524 if prevhandler is not None:
2518 if prevhandler is not None:
2525 signal.signal(signal.SIGCHLD, prevhandler)
2519 signal.signal(signal.SIGCHLD, prevhandler)
2526
2520
2527 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2521 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2528 """Return the result of interpolating items in the mapping into string s.
2522 """Return the result of interpolating items in the mapping into string s.
2529
2523
2530 prefix is a single character string, or a two character string with
2524 prefix is a single character string, or a two character string with
2531 a backslash as the first character if the prefix needs to be escaped in
2525 a backslash as the first character if the prefix needs to be escaped in
2532 a regular expression.
2526 a regular expression.
2533
2527
2534 fn is an optional function that will be applied to the replacement text
2528 fn is an optional function that will be applied to the replacement text
2535 just before replacement.
2529 just before replacement.
2536
2530
2537 escape_prefix is an optional flag that allows using doubled prefix for
2531 escape_prefix is an optional flag that allows using doubled prefix for
2538 its escaping.
2532 its escaping.
2539 """
2533 """
2540 fn = fn or (lambda s: s)
2534 fn = fn or (lambda s: s)
2541 patterns = '|'.join(mapping.keys())
2535 patterns = '|'.join(mapping.keys())
2542 if escape_prefix:
2536 if escape_prefix:
2543 patterns += '|' + prefix
2537 patterns += '|' + prefix
2544 if len(prefix) > 1:
2538 if len(prefix) > 1:
2545 prefix_char = prefix[1:]
2539 prefix_char = prefix[1:]
2546 else:
2540 else:
2547 prefix_char = prefix
2541 prefix_char = prefix
2548 mapping[prefix_char] = prefix_char
2542 mapping[prefix_char] = prefix_char
2549 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2543 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2550 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2544 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2551
2545
2552 def getport(port):
2546 def getport(port):
2553 """Return the port for a given network service.
2547 """Return the port for a given network service.
2554
2548
2555 If port is an integer, it's returned as is. If it's a string, it's
2549 If port is an integer, it's returned as is. If it's a string, it's
2556 looked up using socket.getservbyname(). If there's no matching
2550 looked up using socket.getservbyname(). If there's no matching
2557 service, error.Abort is raised.
2551 service, error.Abort is raised.
2558 """
2552 """
2559 try:
2553 try:
2560 return int(port)
2554 return int(port)
2561 except ValueError:
2555 except ValueError:
2562 pass
2556 pass
2563
2557
2564 try:
2558 try:
2565 return socket.getservbyname(port)
2559 return socket.getservbyname(port)
2566 except socket.error:
2560 except socket.error:
2567 raise Abort(_("no port number associated with service '%s'") % port)
2561 raise Abort(_("no port number associated with service '%s'") % port)
2568
2562
2569 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2563 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2570 '0': False, 'no': False, 'false': False, 'off': False,
2564 '0': False, 'no': False, 'false': False, 'off': False,
2571 'never': False}
2565 'never': False}
2572
2566
2573 def parsebool(s):
2567 def parsebool(s):
2574 """Parse s into a boolean.
2568 """Parse s into a boolean.
2575
2569
2576 If s is not a valid boolean, returns None.
2570 If s is not a valid boolean, returns None.
2577 """
2571 """
2578 return _booleans.get(s.lower(), None)
2572 return _booleans.get(s.lower(), None)
2579
2573
2580 _hextochr = dict((a + b, chr(int(a + b, 16)))
2574 _hextochr = dict((a + b, chr(int(a + b, 16)))
2581 for a in string.hexdigits for b in string.hexdigits)
2575 for a in string.hexdigits for b in string.hexdigits)
2582
2576
2583 class url(object):
2577 class url(object):
2584 r"""Reliable URL parser.
2578 r"""Reliable URL parser.
2585
2579
2586 This parses URLs and provides attributes for the following
2580 This parses URLs and provides attributes for the following
2587 components:
2581 components:
2588
2582
2589 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2583 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2590
2584
2591 Missing components are set to None. The only exception is
2585 Missing components are set to None. The only exception is
2592 fragment, which is set to '' if present but empty.
2586 fragment, which is set to '' if present but empty.
2593
2587
2594 If parsefragment is False, fragment is included in query. If
2588 If parsefragment is False, fragment is included in query. If
2595 parsequery is False, query is included in path. If both are
2589 parsequery is False, query is included in path. If both are
2596 False, both fragment and query are included in path.
2590 False, both fragment and query are included in path.
2597
2591
2598 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2592 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2599
2593
2600 Note that for backward compatibility reasons, bundle URLs do not
2594 Note that for backward compatibility reasons, bundle URLs do not
2601 take host names. That means 'bundle://../' has a path of '../'.
2595 take host names. That means 'bundle://../' has a path of '../'.
2602
2596
2603 Examples:
2597 Examples:
2604
2598
2605 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2599 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2606 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2600 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2607 >>> url('ssh://[::1]:2200//home/joe/repo')
2601 >>> url('ssh://[::1]:2200//home/joe/repo')
2608 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2602 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2609 >>> url('file:///home/joe/repo')
2603 >>> url('file:///home/joe/repo')
2610 <url scheme: 'file', path: '/home/joe/repo'>
2604 <url scheme: 'file', path: '/home/joe/repo'>
2611 >>> url('file:///c:/temp/foo/')
2605 >>> url('file:///c:/temp/foo/')
2612 <url scheme: 'file', path: 'c:/temp/foo/'>
2606 <url scheme: 'file', path: 'c:/temp/foo/'>
2613 >>> url('bundle:foo')
2607 >>> url('bundle:foo')
2614 <url scheme: 'bundle', path: 'foo'>
2608 <url scheme: 'bundle', path: 'foo'>
2615 >>> url('bundle://../foo')
2609 >>> url('bundle://../foo')
2616 <url scheme: 'bundle', path: '../foo'>
2610 <url scheme: 'bundle', path: '../foo'>
2617 >>> url(r'c:\foo\bar')
2611 >>> url(r'c:\foo\bar')
2618 <url path: 'c:\\foo\\bar'>
2612 <url path: 'c:\\foo\\bar'>
2619 >>> url(r'\\blah\blah\blah')
2613 >>> url(r'\\blah\blah\blah')
2620 <url path: '\\\\blah\\blah\\blah'>
2614 <url path: '\\\\blah\\blah\\blah'>
2621 >>> url(r'\\blah\blah\blah#baz')
2615 >>> url(r'\\blah\blah\blah#baz')
2622 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2616 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2623 >>> url(r'file:///C:\users\me')
2617 >>> url(r'file:///C:\users\me')
2624 <url scheme: 'file', path: 'C:\\users\\me'>
2618 <url scheme: 'file', path: 'C:\\users\\me'>
2625
2619
2626 Authentication credentials:
2620 Authentication credentials:
2627
2621
2628 >>> url('ssh://joe:xyz@x/repo')
2622 >>> url('ssh://joe:xyz@x/repo')
2629 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2623 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2630 >>> url('ssh://joe@x/repo')
2624 >>> url('ssh://joe@x/repo')
2631 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2625 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2632
2626
2633 Query strings and fragments:
2627 Query strings and fragments:
2634
2628
2635 >>> url('http://host/a?b#c')
2629 >>> url('http://host/a?b#c')
2636 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2630 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2637 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2631 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2638 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2632 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2639
2633
2640 Empty path:
2634 Empty path:
2641
2635
2642 >>> url('')
2636 >>> url('')
2643 <url path: ''>
2637 <url path: ''>
2644 >>> url('#a')
2638 >>> url('#a')
2645 <url path: '', fragment: 'a'>
2639 <url path: '', fragment: 'a'>
2646 >>> url('http://host/')
2640 >>> url('http://host/')
2647 <url scheme: 'http', host: 'host', path: ''>
2641 <url scheme: 'http', host: 'host', path: ''>
2648 >>> url('http://host/#a')
2642 >>> url('http://host/#a')
2649 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2643 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2650
2644
2651 Only scheme:
2645 Only scheme:
2652
2646
2653 >>> url('http:')
2647 >>> url('http:')
2654 <url scheme: 'http'>
2648 <url scheme: 'http'>
2655 """
2649 """
2656
2650
2657 _safechars = "!~*'()+"
2651 _safechars = "!~*'()+"
2658 _safepchars = "/!~*'()+:\\"
2652 _safepchars = "/!~*'()+:\\"
2659 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2653 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2660
2654
2661 def __init__(self, path, parsequery=True, parsefragment=True):
2655 def __init__(self, path, parsequery=True, parsefragment=True):
2662 # We slowly chomp away at path until we have only the path left
2656 # We slowly chomp away at path until we have only the path left
2663 self.scheme = self.user = self.passwd = self.host = None
2657 self.scheme = self.user = self.passwd = self.host = None
2664 self.port = self.path = self.query = self.fragment = None
2658 self.port = self.path = self.query = self.fragment = None
2665 self._localpath = True
2659 self._localpath = True
2666 self._hostport = ''
2660 self._hostport = ''
2667 self._origpath = path
2661 self._origpath = path
2668
2662
2669 if parsefragment and '#' in path:
2663 if parsefragment and '#' in path:
2670 path, self.fragment = path.split('#', 1)
2664 path, self.fragment = path.split('#', 1)
2671
2665
2672 # special case for Windows drive letters and UNC paths
2666 # special case for Windows drive letters and UNC paths
2673 if hasdriveletter(path) or path.startswith('\\\\'):
2667 if hasdriveletter(path) or path.startswith('\\\\'):
2674 self.path = path
2668 self.path = path
2675 return
2669 return
2676
2670
2677 # For compatibility reasons, we can't handle bundle paths as
2671 # For compatibility reasons, we can't handle bundle paths as
2678 # normal URLS
2672 # normal URLS
2679 if path.startswith('bundle:'):
2673 if path.startswith('bundle:'):
2680 self.scheme = 'bundle'
2674 self.scheme = 'bundle'
2681 path = path[7:]
2675 path = path[7:]
2682 if path.startswith('//'):
2676 if path.startswith('//'):
2683 path = path[2:]
2677 path = path[2:]
2684 self.path = path
2678 self.path = path
2685 return
2679 return
2686
2680
2687 if self._matchscheme(path):
2681 if self._matchscheme(path):
2688 parts = path.split(':', 1)
2682 parts = path.split(':', 1)
2689 if parts[0]:
2683 if parts[0]:
2690 self.scheme, path = parts
2684 self.scheme, path = parts
2691 self._localpath = False
2685 self._localpath = False
2692
2686
2693 if not path:
2687 if not path:
2694 path = None
2688 path = None
2695 if self._localpath:
2689 if self._localpath:
2696 self.path = ''
2690 self.path = ''
2697 return
2691 return
2698 else:
2692 else:
2699 if self._localpath:
2693 if self._localpath:
2700 self.path = path
2694 self.path = path
2701 return
2695 return
2702
2696
2703 if parsequery and '?' in path:
2697 if parsequery and '?' in path:
2704 path, self.query = path.split('?', 1)
2698 path, self.query = path.split('?', 1)
2705 if not path:
2699 if not path:
2706 path = None
2700 path = None
2707 if not self.query:
2701 if not self.query:
2708 self.query = None
2702 self.query = None
2709
2703
2710 # // is required to specify a host/authority
2704 # // is required to specify a host/authority
2711 if path and path.startswith('//'):
2705 if path and path.startswith('//'):
2712 parts = path[2:].split('/', 1)
2706 parts = path[2:].split('/', 1)
2713 if len(parts) > 1:
2707 if len(parts) > 1:
2714 self.host, path = parts
2708 self.host, path = parts
2715 else:
2709 else:
2716 self.host = parts[0]
2710 self.host = parts[0]
2717 path = None
2711 path = None
2718 if not self.host:
2712 if not self.host:
2719 self.host = None
2713 self.host = None
2720 # path of file:///d is /d
2714 # path of file:///d is /d
2721 # path of file:///d:/ is d:/, not /d:/
2715 # path of file:///d:/ is d:/, not /d:/
2722 if path and not hasdriveletter(path):
2716 if path and not hasdriveletter(path):
2723 path = '/' + path
2717 path = '/' + path
2724
2718
2725 if self.host and '@' in self.host:
2719 if self.host and '@' in self.host:
2726 self.user, self.host = self.host.rsplit('@', 1)
2720 self.user, self.host = self.host.rsplit('@', 1)
2727 if ':' in self.user:
2721 if ':' in self.user:
2728 self.user, self.passwd = self.user.split(':', 1)
2722 self.user, self.passwd = self.user.split(':', 1)
2729 if not self.host:
2723 if not self.host:
2730 self.host = None
2724 self.host = None
2731
2725
2732 # Don't split on colons in IPv6 addresses without ports
2726 # Don't split on colons in IPv6 addresses without ports
2733 if (self.host and ':' in self.host and
2727 if (self.host and ':' in self.host and
2734 not (self.host.startswith('[') and self.host.endswith(']'))):
2728 not (self.host.startswith('[') and self.host.endswith(']'))):
2735 self._hostport = self.host
2729 self._hostport = self.host
2736 self.host, self.port = self.host.rsplit(':', 1)
2730 self.host, self.port = self.host.rsplit(':', 1)
2737 if not self.host:
2731 if not self.host:
2738 self.host = None
2732 self.host = None
2739
2733
2740 if (self.host and self.scheme == 'file' and
2734 if (self.host and self.scheme == 'file' and
2741 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2735 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2742 raise Abort(_('file:// URLs can only refer to localhost'))
2736 raise Abort(_('file:// URLs can only refer to localhost'))
2743
2737
2744 self.path = path
2738 self.path = path
2745
2739
2746 # leave the query string escaped
2740 # leave the query string escaped
2747 for a in ('user', 'passwd', 'host', 'port',
2741 for a in ('user', 'passwd', 'host', 'port',
2748 'path', 'fragment'):
2742 'path', 'fragment'):
2749 v = getattr(self, a)
2743 v = getattr(self, a)
2750 if v is not None:
2744 if v is not None:
2751 setattr(self, a, urlreq.unquote(v))
2745 setattr(self, a, urlreq.unquote(v))
2752
2746
2753 def __repr__(self):
2747 def __repr__(self):
2754 attrs = []
2748 attrs = []
2755 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2749 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2756 'query', 'fragment'):
2750 'query', 'fragment'):
2757 v = getattr(self, a)
2751 v = getattr(self, a)
2758 if v is not None:
2752 if v is not None:
2759 attrs.append('%s: %r' % (a, v))
2753 attrs.append('%s: %r' % (a, v))
2760 return '<url %s>' % ', '.join(attrs)
2754 return '<url %s>' % ', '.join(attrs)
2761
2755
2762 def __bytes__(self):
2756 def __bytes__(self):
2763 r"""Join the URL's components back into a URL string.
2757 r"""Join the URL's components back into a URL string.
2764
2758
2765 Examples:
2759 Examples:
2766
2760
2767 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2761 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2768 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2762 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2769 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2763 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2770 'http://user:pw@host:80/?foo=bar&baz=42'
2764 'http://user:pw@host:80/?foo=bar&baz=42'
2771 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2765 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2772 'http://user:pw@host:80/?foo=bar%3dbaz'
2766 'http://user:pw@host:80/?foo=bar%3dbaz'
2773 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2767 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2774 'ssh://user:pw@[::1]:2200//home/joe#'
2768 'ssh://user:pw@[::1]:2200//home/joe#'
2775 >>> str(url('http://localhost:80//'))
2769 >>> str(url('http://localhost:80//'))
2776 'http://localhost:80//'
2770 'http://localhost:80//'
2777 >>> str(url('http://localhost:80/'))
2771 >>> str(url('http://localhost:80/'))
2778 'http://localhost:80/'
2772 'http://localhost:80/'
2779 >>> str(url('http://localhost:80'))
2773 >>> str(url('http://localhost:80'))
2780 'http://localhost:80/'
2774 'http://localhost:80/'
2781 >>> str(url('bundle:foo'))
2775 >>> str(url('bundle:foo'))
2782 'bundle:foo'
2776 'bundle:foo'
2783 >>> str(url('bundle://../foo'))
2777 >>> str(url('bundle://../foo'))
2784 'bundle:../foo'
2778 'bundle:../foo'
2785 >>> str(url('path'))
2779 >>> str(url('path'))
2786 'path'
2780 'path'
2787 >>> str(url('file:///tmp/foo/bar'))
2781 >>> str(url('file:///tmp/foo/bar'))
2788 'file:///tmp/foo/bar'
2782 'file:///tmp/foo/bar'
2789 >>> str(url('file:///c:/tmp/foo/bar'))
2783 >>> str(url('file:///c:/tmp/foo/bar'))
2790 'file:///c:/tmp/foo/bar'
2784 'file:///c:/tmp/foo/bar'
2791 >>> print url(r'bundle:foo\bar')
2785 >>> print url(r'bundle:foo\bar')
2792 bundle:foo\bar
2786 bundle:foo\bar
2793 >>> print url(r'file:///D:\data\hg')
2787 >>> print url(r'file:///D:\data\hg')
2794 file:///D:\data\hg
2788 file:///D:\data\hg
2795 """
2789 """
2796 if self._localpath:
2790 if self._localpath:
2797 s = self.path
2791 s = self.path
2798 if self.scheme == 'bundle':
2792 if self.scheme == 'bundle':
2799 s = 'bundle:' + s
2793 s = 'bundle:' + s
2800 if self.fragment:
2794 if self.fragment:
2801 s += '#' + self.fragment
2795 s += '#' + self.fragment
2802 return s
2796 return s
2803
2797
2804 s = self.scheme + ':'
2798 s = self.scheme + ':'
2805 if self.user or self.passwd or self.host:
2799 if self.user or self.passwd or self.host:
2806 s += '//'
2800 s += '//'
2807 elif self.scheme and (not self.path or self.path.startswith('/')
2801 elif self.scheme and (not self.path or self.path.startswith('/')
2808 or hasdriveletter(self.path)):
2802 or hasdriveletter(self.path)):
2809 s += '//'
2803 s += '//'
2810 if hasdriveletter(self.path):
2804 if hasdriveletter(self.path):
2811 s += '/'
2805 s += '/'
2812 if self.user:
2806 if self.user:
2813 s += urlreq.quote(self.user, safe=self._safechars)
2807 s += urlreq.quote(self.user, safe=self._safechars)
2814 if self.passwd:
2808 if self.passwd:
2815 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2809 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2816 if self.user or self.passwd:
2810 if self.user or self.passwd:
2817 s += '@'
2811 s += '@'
2818 if self.host:
2812 if self.host:
2819 if not (self.host.startswith('[') and self.host.endswith(']')):
2813 if not (self.host.startswith('[') and self.host.endswith(']')):
2820 s += urlreq.quote(self.host)
2814 s += urlreq.quote(self.host)
2821 else:
2815 else:
2822 s += self.host
2816 s += self.host
2823 if self.port:
2817 if self.port:
2824 s += ':' + urlreq.quote(self.port)
2818 s += ':' + urlreq.quote(self.port)
2825 if self.host:
2819 if self.host:
2826 s += '/'
2820 s += '/'
2827 if self.path:
2821 if self.path:
2828 # TODO: similar to the query string, we should not unescape the
2822 # TODO: similar to the query string, we should not unescape the
2829 # path when we store it, the path might contain '%2f' = '/',
2823 # path when we store it, the path might contain '%2f' = '/',
2830 # which we should *not* escape.
2824 # which we should *not* escape.
2831 s += urlreq.quote(self.path, safe=self._safepchars)
2825 s += urlreq.quote(self.path, safe=self._safepchars)
2832 if self.query:
2826 if self.query:
2833 # we store the query in escaped form.
2827 # we store the query in escaped form.
2834 s += '?' + self.query
2828 s += '?' + self.query
2835 if self.fragment is not None:
2829 if self.fragment is not None:
2836 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2830 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2837 return s
2831 return s
2838
2832
2839 __str__ = encoding.strmethod(__bytes__)
2833 __str__ = encoding.strmethod(__bytes__)
2840
2834
2841 def authinfo(self):
2835 def authinfo(self):
2842 user, passwd = self.user, self.passwd
2836 user, passwd = self.user, self.passwd
2843 try:
2837 try:
2844 self.user, self.passwd = None, None
2838 self.user, self.passwd = None, None
2845 s = bytes(self)
2839 s = bytes(self)
2846 finally:
2840 finally:
2847 self.user, self.passwd = user, passwd
2841 self.user, self.passwd = user, passwd
2848 if not self.user:
2842 if not self.user:
2849 return (s, None)
2843 return (s, None)
2850 # authinfo[1] is passed to urllib2 password manager, and its
2844 # authinfo[1] is passed to urllib2 password manager, and its
2851 # URIs must not contain credentials. The host is passed in the
2845 # URIs must not contain credentials. The host is passed in the
2852 # URIs list because Python < 2.4.3 uses only that to search for
2846 # URIs list because Python < 2.4.3 uses only that to search for
2853 # a password.
2847 # a password.
2854 return (s, (None, (s, self.host),
2848 return (s, (None, (s, self.host),
2855 self.user, self.passwd or ''))
2849 self.user, self.passwd or ''))
2856
2850
2857 def isabs(self):
2851 def isabs(self):
2858 if self.scheme and self.scheme != 'file':
2852 if self.scheme and self.scheme != 'file':
2859 return True # remote URL
2853 return True # remote URL
2860 if hasdriveletter(self.path):
2854 if hasdriveletter(self.path):
2861 return True # absolute for our purposes - can't be joined()
2855 return True # absolute for our purposes - can't be joined()
2862 if self.path.startswith(br'\\'):
2856 if self.path.startswith(br'\\'):
2863 return True # Windows UNC path
2857 return True # Windows UNC path
2864 if self.path.startswith('/'):
2858 if self.path.startswith('/'):
2865 return True # POSIX-style
2859 return True # POSIX-style
2866 return False
2860 return False
2867
2861
2868 def localpath(self):
2862 def localpath(self):
2869 if self.scheme == 'file' or self.scheme == 'bundle':
2863 if self.scheme == 'file' or self.scheme == 'bundle':
2870 path = self.path or '/'
2864 path = self.path or '/'
2871 # For Windows, we need to promote hosts containing drive
2865 # For Windows, we need to promote hosts containing drive
2872 # letters to paths with drive letters.
2866 # letters to paths with drive letters.
2873 if hasdriveletter(self._hostport):
2867 if hasdriveletter(self._hostport):
2874 path = self._hostport + '/' + self.path
2868 path = self._hostport + '/' + self.path
2875 elif (self.host is not None and self.path
2869 elif (self.host is not None and self.path
2876 and not hasdriveletter(path)):
2870 and not hasdriveletter(path)):
2877 path = '/' + path
2871 path = '/' + path
2878 return path
2872 return path
2879 return self._origpath
2873 return self._origpath
2880
2874
2881 def islocal(self):
2875 def islocal(self):
2882 '''whether localpath will return something that posixfile can open'''
2876 '''whether localpath will return something that posixfile can open'''
2883 return (not self.scheme or self.scheme == 'file'
2877 return (not self.scheme or self.scheme == 'file'
2884 or self.scheme == 'bundle')
2878 or self.scheme == 'bundle')
2885
2879
2886 def hasscheme(path):
2880 def hasscheme(path):
2887 return bool(url(path).scheme)
2881 return bool(url(path).scheme)
2888
2882
2889 def hasdriveletter(path):
2883 def hasdriveletter(path):
2890 return path and path[1:2] == ':' and path[0:1].isalpha()
2884 return path and path[1:2] == ':' and path[0:1].isalpha()
2891
2885
2892 def urllocalpath(path):
2886 def urllocalpath(path):
2893 return url(path, parsequery=False, parsefragment=False).localpath()
2887 return url(path, parsequery=False, parsefragment=False).localpath()
2894
2888
2895 def hidepassword(u):
2889 def hidepassword(u):
2896 '''hide user credential in a url string'''
2890 '''hide user credential in a url string'''
2897 u = url(u)
2891 u = url(u)
2898 if u.passwd:
2892 if u.passwd:
2899 u.passwd = '***'
2893 u.passwd = '***'
2900 return bytes(u)
2894 return bytes(u)
2901
2895
2902 def removeauth(u):
2896 def removeauth(u):
2903 '''remove all authentication information from a url string'''
2897 '''remove all authentication information from a url string'''
2904 u = url(u)
2898 u = url(u)
2905 u.user = u.passwd = None
2899 u.user = u.passwd = None
2906 return str(u)
2900 return str(u)
2907
2901
2908 timecount = unitcountfn(
2902 timecount = unitcountfn(
2909 (1, 1e3, _('%.0f s')),
2903 (1, 1e3, _('%.0f s')),
2910 (100, 1, _('%.1f s')),
2904 (100, 1, _('%.1f s')),
2911 (10, 1, _('%.2f s')),
2905 (10, 1, _('%.2f s')),
2912 (1, 1, _('%.3f s')),
2906 (1, 1, _('%.3f s')),
2913 (100, 0.001, _('%.1f ms')),
2907 (100, 0.001, _('%.1f ms')),
2914 (10, 0.001, _('%.2f ms')),
2908 (10, 0.001, _('%.2f ms')),
2915 (1, 0.001, _('%.3f ms')),
2909 (1, 0.001, _('%.3f ms')),
2916 (100, 0.000001, _('%.1f us')),
2910 (100, 0.000001, _('%.1f us')),
2917 (10, 0.000001, _('%.2f us')),
2911 (10, 0.000001, _('%.2f us')),
2918 (1, 0.000001, _('%.3f us')),
2912 (1, 0.000001, _('%.3f us')),
2919 (100, 0.000000001, _('%.1f ns')),
2913 (100, 0.000000001, _('%.1f ns')),
2920 (10, 0.000000001, _('%.2f ns')),
2914 (10, 0.000000001, _('%.2f ns')),
2921 (1, 0.000000001, _('%.3f ns')),
2915 (1, 0.000000001, _('%.3f ns')),
2922 )
2916 )
2923
2917
2924 _timenesting = [0]
2918 _timenesting = [0]
2925
2919
2926 def timed(func):
2920 def timed(func):
2927 '''Report the execution time of a function call to stderr.
2921 '''Report the execution time of a function call to stderr.
2928
2922
2929 During development, use as a decorator when you need to measure
2923 During development, use as a decorator when you need to measure
2930 the cost of a function, e.g. as follows:
2924 the cost of a function, e.g. as follows:
2931
2925
2932 @util.timed
2926 @util.timed
2933 def foo(a, b, c):
2927 def foo(a, b, c):
2934 pass
2928 pass
2935 '''
2929 '''
2936
2930
2937 def wrapper(*args, **kwargs):
2931 def wrapper(*args, **kwargs):
2938 start = timer()
2932 start = timer()
2939 indent = 2
2933 indent = 2
2940 _timenesting[0] += indent
2934 _timenesting[0] += indent
2941 try:
2935 try:
2942 return func(*args, **kwargs)
2936 return func(*args, **kwargs)
2943 finally:
2937 finally:
2944 elapsed = timer() - start
2938 elapsed = timer() - start
2945 _timenesting[0] -= indent
2939 _timenesting[0] -= indent
2946 stderr.write('%s%s: %s\n' %
2940 stderr.write('%s%s: %s\n' %
2947 (' ' * _timenesting[0], func.__name__,
2941 (' ' * _timenesting[0], func.__name__,
2948 timecount(elapsed)))
2942 timecount(elapsed)))
2949 return wrapper
2943 return wrapper
2950
2944
2951 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2945 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2952 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2946 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2953
2947
2954 def sizetoint(s):
2948 def sizetoint(s):
2955 '''Convert a space specifier to a byte count.
2949 '''Convert a space specifier to a byte count.
2956
2950
2957 >>> sizetoint('30')
2951 >>> sizetoint('30')
2958 30
2952 30
2959 >>> sizetoint('2.2kb')
2953 >>> sizetoint('2.2kb')
2960 2252
2954 2252
2961 >>> sizetoint('6M')
2955 >>> sizetoint('6M')
2962 6291456
2956 6291456
2963 '''
2957 '''
2964 t = s.strip().lower()
2958 t = s.strip().lower()
2965 try:
2959 try:
2966 for k, u in _sizeunits:
2960 for k, u in _sizeunits:
2967 if t.endswith(k):
2961 if t.endswith(k):
2968 return int(float(t[:-len(k)]) * u)
2962 return int(float(t[:-len(k)]) * u)
2969 return int(t)
2963 return int(t)
2970 except ValueError:
2964 except ValueError:
2971 raise error.ParseError(_("couldn't parse size: %s") % s)
2965 raise error.ParseError(_("couldn't parse size: %s") % s)
2972
2966
2973 class hooks(object):
2967 class hooks(object):
2974 '''A collection of hook functions that can be used to extend a
2968 '''A collection of hook functions that can be used to extend a
2975 function's behavior. Hooks are called in lexicographic order,
2969 function's behavior. Hooks are called in lexicographic order,
2976 based on the names of their sources.'''
2970 based on the names of their sources.'''
2977
2971
2978 def __init__(self):
2972 def __init__(self):
2979 self._hooks = []
2973 self._hooks = []
2980
2974
2981 def add(self, source, hook):
2975 def add(self, source, hook):
2982 self._hooks.append((source, hook))
2976 self._hooks.append((source, hook))
2983
2977
2984 def __call__(self, *args):
2978 def __call__(self, *args):
2985 self._hooks.sort(key=lambda x: x[0])
2979 self._hooks.sort(key=lambda x: x[0])
2986 results = []
2980 results = []
2987 for source, hook in self._hooks:
2981 for source, hook in self._hooks:
2988 results.append(hook(*args))
2982 results.append(hook(*args))
2989 return results
2983 return results
2990
2984
2991 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
2985 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
2992 '''Yields lines for a nicely formatted stacktrace.
2986 '''Yields lines for a nicely formatted stacktrace.
2993 Skips the 'skip' last entries, then return the last 'depth' entries.
2987 Skips the 'skip' last entries, then return the last 'depth' entries.
2994 Each file+linenumber is formatted according to fileline.
2988 Each file+linenumber is formatted according to fileline.
2995 Each line is formatted according to line.
2989 Each line is formatted according to line.
2996 If line is None, it yields:
2990 If line is None, it yields:
2997 length of longest filepath+line number,
2991 length of longest filepath+line number,
2998 filepath+linenumber,
2992 filepath+linenumber,
2999 function
2993 function
3000
2994
3001 Not be used in production code but very convenient while developing.
2995 Not be used in production code but very convenient while developing.
3002 '''
2996 '''
3003 entries = [(fileline % (fn, ln), func)
2997 entries = [(fileline % (fn, ln), func)
3004 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2998 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
3005 ][-depth:]
2999 ][-depth:]
3006 if entries:
3000 if entries:
3007 fnmax = max(len(entry[0]) for entry in entries)
3001 fnmax = max(len(entry[0]) for entry in entries)
3008 for fnln, func in entries:
3002 for fnln, func in entries:
3009 if line is None:
3003 if line is None:
3010 yield (fnmax, fnln, func)
3004 yield (fnmax, fnln, func)
3011 else:
3005 else:
3012 yield line % (fnmax, fnln, func)
3006 yield line % (fnmax, fnln, func)
3013
3007
3014 def debugstacktrace(msg='stacktrace', skip=0,
3008 def debugstacktrace(msg='stacktrace', skip=0,
3015 f=stderr, otherf=stdout, depth=0):
3009 f=stderr, otherf=stdout, depth=0):
3016 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3010 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3017 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3011 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3018 By default it will flush stdout first.
3012 By default it will flush stdout first.
3019 It can be used everywhere and intentionally does not require an ui object.
3013 It can be used everywhere and intentionally does not require an ui object.
3020 Not be used in production code but very convenient while developing.
3014 Not be used in production code but very convenient while developing.
3021 '''
3015 '''
3022 if otherf:
3016 if otherf:
3023 otherf.flush()
3017 otherf.flush()
3024 f.write('%s at:\n' % msg.rstrip())
3018 f.write('%s at:\n' % msg.rstrip())
3025 for line in getstackframes(skip + 1, depth=depth):
3019 for line in getstackframes(skip + 1, depth=depth):
3026 f.write(line)
3020 f.write(line)
3027 f.flush()
3021 f.flush()
3028
3022
3029 class dirs(object):
3023 class dirs(object):
3030 '''a multiset of directory names from a dirstate or manifest'''
3024 '''a multiset of directory names from a dirstate or manifest'''
3031
3025
3032 def __init__(self, map, skip=None):
3026 def __init__(self, map, skip=None):
3033 self._dirs = {}
3027 self._dirs = {}
3034 addpath = self.addpath
3028 addpath = self.addpath
3035 if safehasattr(map, 'iteritems') and skip is not None:
3029 if safehasattr(map, 'iteritems') and skip is not None:
3036 for f, s in map.iteritems():
3030 for f, s in map.iteritems():
3037 if s[0] != skip:
3031 if s[0] != skip:
3038 addpath(f)
3032 addpath(f)
3039 else:
3033 else:
3040 for f in map:
3034 for f in map:
3041 addpath(f)
3035 addpath(f)
3042
3036
3043 def addpath(self, path):
3037 def addpath(self, path):
3044 dirs = self._dirs
3038 dirs = self._dirs
3045 for base in finddirs(path):
3039 for base in finddirs(path):
3046 if base in dirs:
3040 if base in dirs:
3047 dirs[base] += 1
3041 dirs[base] += 1
3048 return
3042 return
3049 dirs[base] = 1
3043 dirs[base] = 1
3050
3044
3051 def delpath(self, path):
3045 def delpath(self, path):
3052 dirs = self._dirs
3046 dirs = self._dirs
3053 for base in finddirs(path):
3047 for base in finddirs(path):
3054 if dirs[base] > 1:
3048 if dirs[base] > 1:
3055 dirs[base] -= 1
3049 dirs[base] -= 1
3056 return
3050 return
3057 del dirs[base]
3051 del dirs[base]
3058
3052
3059 def __iter__(self):
3053 def __iter__(self):
3060 return iter(self._dirs)
3054 return iter(self._dirs)
3061
3055
3062 def __contains__(self, d):
3056 def __contains__(self, d):
3063 return d in self._dirs
3057 return d in self._dirs
3064
3058
3065 if safehasattr(parsers, 'dirs'):
3059 if safehasattr(parsers, 'dirs'):
3066 dirs = parsers.dirs
3060 dirs = parsers.dirs
3067
3061
3068 def finddirs(path):
3062 def finddirs(path):
3069 pos = path.rfind('/')
3063 pos = path.rfind('/')
3070 while pos != -1:
3064 while pos != -1:
3071 yield path[:pos]
3065 yield path[:pos]
3072 pos = path.rfind('/', 0, pos)
3066 pos = path.rfind('/', 0, pos)
3073
3067
3074 # compression code
3068 # compression code
3075
3069
3076 SERVERROLE = 'server'
3070 SERVERROLE = 'server'
3077 CLIENTROLE = 'client'
3071 CLIENTROLE = 'client'
3078
3072
3079 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3073 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3080 (u'name', u'serverpriority',
3074 (u'name', u'serverpriority',
3081 u'clientpriority'))
3075 u'clientpriority'))
3082
3076
3083 class compressormanager(object):
3077 class compressormanager(object):
3084 """Holds registrations of various compression engines.
3078 """Holds registrations of various compression engines.
3085
3079
3086 This class essentially abstracts the differences between compression
3080 This class essentially abstracts the differences between compression
3087 engines to allow new compression formats to be added easily, possibly from
3081 engines to allow new compression formats to be added easily, possibly from
3088 extensions.
3082 extensions.
3089
3083
3090 Compressors are registered against the global instance by calling its
3084 Compressors are registered against the global instance by calling its
3091 ``register()`` method.
3085 ``register()`` method.
3092 """
3086 """
3093 def __init__(self):
3087 def __init__(self):
3094 self._engines = {}
3088 self._engines = {}
3095 # Bundle spec human name to engine name.
3089 # Bundle spec human name to engine name.
3096 self._bundlenames = {}
3090 self._bundlenames = {}
3097 # Internal bundle identifier to engine name.
3091 # Internal bundle identifier to engine name.
3098 self._bundletypes = {}
3092 self._bundletypes = {}
3099 # Revlog header to engine name.
3093 # Revlog header to engine name.
3100 self._revlogheaders = {}
3094 self._revlogheaders = {}
3101 # Wire proto identifier to engine name.
3095 # Wire proto identifier to engine name.
3102 self._wiretypes = {}
3096 self._wiretypes = {}
3103
3097
3104 def __getitem__(self, key):
3098 def __getitem__(self, key):
3105 return self._engines[key]
3099 return self._engines[key]
3106
3100
3107 def __contains__(self, key):
3101 def __contains__(self, key):
3108 return key in self._engines
3102 return key in self._engines
3109
3103
3110 def __iter__(self):
3104 def __iter__(self):
3111 return iter(self._engines.keys())
3105 return iter(self._engines.keys())
3112
3106
3113 def register(self, engine):
3107 def register(self, engine):
3114 """Register a compression engine with the manager.
3108 """Register a compression engine with the manager.
3115
3109
3116 The argument must be a ``compressionengine`` instance.
3110 The argument must be a ``compressionengine`` instance.
3117 """
3111 """
3118 if not isinstance(engine, compressionengine):
3112 if not isinstance(engine, compressionengine):
3119 raise ValueError(_('argument must be a compressionengine'))
3113 raise ValueError(_('argument must be a compressionengine'))
3120
3114
3121 name = engine.name()
3115 name = engine.name()
3122
3116
3123 if name in self._engines:
3117 if name in self._engines:
3124 raise error.Abort(_('compression engine %s already registered') %
3118 raise error.Abort(_('compression engine %s already registered') %
3125 name)
3119 name)
3126
3120
3127 bundleinfo = engine.bundletype()
3121 bundleinfo = engine.bundletype()
3128 if bundleinfo:
3122 if bundleinfo:
3129 bundlename, bundletype = bundleinfo
3123 bundlename, bundletype = bundleinfo
3130
3124
3131 if bundlename in self._bundlenames:
3125 if bundlename in self._bundlenames:
3132 raise error.Abort(_('bundle name %s already registered') %
3126 raise error.Abort(_('bundle name %s already registered') %
3133 bundlename)
3127 bundlename)
3134 if bundletype in self._bundletypes:
3128 if bundletype in self._bundletypes:
3135 raise error.Abort(_('bundle type %s already registered by %s') %
3129 raise error.Abort(_('bundle type %s already registered by %s') %
3136 (bundletype, self._bundletypes[bundletype]))
3130 (bundletype, self._bundletypes[bundletype]))
3137
3131
3138 # No external facing name declared.
3132 # No external facing name declared.
3139 if bundlename:
3133 if bundlename:
3140 self._bundlenames[bundlename] = name
3134 self._bundlenames[bundlename] = name
3141
3135
3142 self._bundletypes[bundletype] = name
3136 self._bundletypes[bundletype] = name
3143
3137
3144 wiresupport = engine.wireprotosupport()
3138 wiresupport = engine.wireprotosupport()
3145 if wiresupport:
3139 if wiresupport:
3146 wiretype = wiresupport.name
3140 wiretype = wiresupport.name
3147 if wiretype in self._wiretypes:
3141 if wiretype in self._wiretypes:
3148 raise error.Abort(_('wire protocol compression %s already '
3142 raise error.Abort(_('wire protocol compression %s already '
3149 'registered by %s') %
3143 'registered by %s') %
3150 (wiretype, self._wiretypes[wiretype]))
3144 (wiretype, self._wiretypes[wiretype]))
3151
3145
3152 self._wiretypes[wiretype] = name
3146 self._wiretypes[wiretype] = name
3153
3147
3154 revlogheader = engine.revlogheader()
3148 revlogheader = engine.revlogheader()
3155 if revlogheader and revlogheader in self._revlogheaders:
3149 if revlogheader and revlogheader in self._revlogheaders:
3156 raise error.Abort(_('revlog header %s already registered by %s') %
3150 raise error.Abort(_('revlog header %s already registered by %s') %
3157 (revlogheader, self._revlogheaders[revlogheader]))
3151 (revlogheader, self._revlogheaders[revlogheader]))
3158
3152
3159 if revlogheader:
3153 if revlogheader:
3160 self._revlogheaders[revlogheader] = name
3154 self._revlogheaders[revlogheader] = name
3161
3155
3162 self._engines[name] = engine
3156 self._engines[name] = engine
3163
3157
3164 @property
3158 @property
3165 def supportedbundlenames(self):
3159 def supportedbundlenames(self):
3166 return set(self._bundlenames.keys())
3160 return set(self._bundlenames.keys())
3167
3161
3168 @property
3162 @property
3169 def supportedbundletypes(self):
3163 def supportedbundletypes(self):
3170 return set(self._bundletypes.keys())
3164 return set(self._bundletypes.keys())
3171
3165
3172 def forbundlename(self, bundlename):
3166 def forbundlename(self, bundlename):
3173 """Obtain a compression engine registered to a bundle name.
3167 """Obtain a compression engine registered to a bundle name.
3174
3168
3175 Will raise KeyError if the bundle type isn't registered.
3169 Will raise KeyError if the bundle type isn't registered.
3176
3170
3177 Will abort if the engine is known but not available.
3171 Will abort if the engine is known but not available.
3178 """
3172 """
3179 engine = self._engines[self._bundlenames[bundlename]]
3173 engine = self._engines[self._bundlenames[bundlename]]
3180 if not engine.available():
3174 if not engine.available():
3181 raise error.Abort(_('compression engine %s could not be loaded') %
3175 raise error.Abort(_('compression engine %s could not be loaded') %
3182 engine.name())
3176 engine.name())
3183 return engine
3177 return engine
3184
3178
3185 def forbundletype(self, bundletype):
3179 def forbundletype(self, bundletype):
3186 """Obtain a compression engine registered to a bundle type.
3180 """Obtain a compression engine registered to a bundle type.
3187
3181
3188 Will raise KeyError if the bundle type isn't registered.
3182 Will raise KeyError if the bundle type isn't registered.
3189
3183
3190 Will abort if the engine is known but not available.
3184 Will abort if the engine is known but not available.
3191 """
3185 """
3192 engine = self._engines[self._bundletypes[bundletype]]
3186 engine = self._engines[self._bundletypes[bundletype]]
3193 if not engine.available():
3187 if not engine.available():
3194 raise error.Abort(_('compression engine %s could not be loaded') %
3188 raise error.Abort(_('compression engine %s could not be loaded') %
3195 engine.name())
3189 engine.name())
3196 return engine
3190 return engine
3197
3191
3198 def supportedwireengines(self, role, onlyavailable=True):
3192 def supportedwireengines(self, role, onlyavailable=True):
3199 """Obtain compression engines that support the wire protocol.
3193 """Obtain compression engines that support the wire protocol.
3200
3194
3201 Returns a list of engines in prioritized order, most desired first.
3195 Returns a list of engines in prioritized order, most desired first.
3202
3196
3203 If ``onlyavailable`` is set, filter out engines that can't be
3197 If ``onlyavailable`` is set, filter out engines that can't be
3204 loaded.
3198 loaded.
3205 """
3199 """
3206 assert role in (SERVERROLE, CLIENTROLE)
3200 assert role in (SERVERROLE, CLIENTROLE)
3207
3201
3208 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3202 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3209
3203
3210 engines = [self._engines[e] for e in self._wiretypes.values()]
3204 engines = [self._engines[e] for e in self._wiretypes.values()]
3211 if onlyavailable:
3205 if onlyavailable:
3212 engines = [e for e in engines if e.available()]
3206 engines = [e for e in engines if e.available()]
3213
3207
3214 def getkey(e):
3208 def getkey(e):
3215 # Sort first by priority, highest first. In case of tie, sort
3209 # Sort first by priority, highest first. In case of tie, sort
3216 # alphabetically. This is arbitrary, but ensures output is
3210 # alphabetically. This is arbitrary, but ensures output is
3217 # stable.
3211 # stable.
3218 w = e.wireprotosupport()
3212 w = e.wireprotosupport()
3219 return -1 * getattr(w, attr), w.name
3213 return -1 * getattr(w, attr), w.name
3220
3214
3221 return list(sorted(engines, key=getkey))
3215 return list(sorted(engines, key=getkey))
3222
3216
3223 def forwiretype(self, wiretype):
3217 def forwiretype(self, wiretype):
3224 engine = self._engines[self._wiretypes[wiretype]]
3218 engine = self._engines[self._wiretypes[wiretype]]
3225 if not engine.available():
3219 if not engine.available():
3226 raise error.Abort(_('compression engine %s could not be loaded') %
3220 raise error.Abort(_('compression engine %s could not be loaded') %
3227 engine.name())
3221 engine.name())
3228 return engine
3222 return engine
3229
3223
3230 def forrevlogheader(self, header):
3224 def forrevlogheader(self, header):
3231 """Obtain a compression engine registered to a revlog header.
3225 """Obtain a compression engine registered to a revlog header.
3232
3226
3233 Will raise KeyError if the revlog header value isn't registered.
3227 Will raise KeyError if the revlog header value isn't registered.
3234 """
3228 """
3235 return self._engines[self._revlogheaders[header]]
3229 return self._engines[self._revlogheaders[header]]
3236
3230
3237 compengines = compressormanager()
3231 compengines = compressormanager()
3238
3232
3239 class compressionengine(object):
3233 class compressionengine(object):
3240 """Base class for compression engines.
3234 """Base class for compression engines.
3241
3235
3242 Compression engines must implement the interface defined by this class.
3236 Compression engines must implement the interface defined by this class.
3243 """
3237 """
3244 def name(self):
3238 def name(self):
3245 """Returns the name of the compression engine.
3239 """Returns the name of the compression engine.
3246
3240
3247 This is the key the engine is registered under.
3241 This is the key the engine is registered under.
3248
3242
3249 This method must be implemented.
3243 This method must be implemented.
3250 """
3244 """
3251 raise NotImplementedError()
3245 raise NotImplementedError()
3252
3246
3253 def available(self):
3247 def available(self):
3254 """Whether the compression engine is available.
3248 """Whether the compression engine is available.
3255
3249
3256 The intent of this method is to allow optional compression engines
3250 The intent of this method is to allow optional compression engines
3257 that may not be available in all installations (such as engines relying
3251 that may not be available in all installations (such as engines relying
3258 on C extensions that may not be present).
3252 on C extensions that may not be present).
3259 """
3253 """
3260 return True
3254 return True
3261
3255
3262 def bundletype(self):
3256 def bundletype(self):
3263 """Describes bundle identifiers for this engine.
3257 """Describes bundle identifiers for this engine.
3264
3258
3265 If this compression engine isn't supported for bundles, returns None.
3259 If this compression engine isn't supported for bundles, returns None.
3266
3260
3267 If this engine can be used for bundles, returns a 2-tuple of strings of
3261 If this engine can be used for bundles, returns a 2-tuple of strings of
3268 the user-facing "bundle spec" compression name and an internal
3262 the user-facing "bundle spec" compression name and an internal
3269 identifier used to denote the compression format within bundles. To
3263 identifier used to denote the compression format within bundles. To
3270 exclude the name from external usage, set the first element to ``None``.
3264 exclude the name from external usage, set the first element to ``None``.
3271
3265
3272 If bundle compression is supported, the class must also implement
3266 If bundle compression is supported, the class must also implement
3273 ``compressstream`` and `decompressorreader``.
3267 ``compressstream`` and `decompressorreader``.
3274
3268
3275 The docstring of this method is used in the help system to tell users
3269 The docstring of this method is used in the help system to tell users
3276 about this engine.
3270 about this engine.
3277 """
3271 """
3278 return None
3272 return None
3279
3273
3280 def wireprotosupport(self):
3274 def wireprotosupport(self):
3281 """Declare support for this compression format on the wire protocol.
3275 """Declare support for this compression format on the wire protocol.
3282
3276
3283 If this compression engine isn't supported for compressing wire
3277 If this compression engine isn't supported for compressing wire
3284 protocol payloads, returns None.
3278 protocol payloads, returns None.
3285
3279
3286 Otherwise, returns ``compenginewireprotosupport`` with the following
3280 Otherwise, returns ``compenginewireprotosupport`` with the following
3287 fields:
3281 fields:
3288
3282
3289 * String format identifier
3283 * String format identifier
3290 * Integer priority for the server
3284 * Integer priority for the server
3291 * Integer priority for the client
3285 * Integer priority for the client
3292
3286
3293 The integer priorities are used to order the advertisement of format
3287 The integer priorities are used to order the advertisement of format
3294 support by server and client. The highest integer is advertised
3288 support by server and client. The highest integer is advertised
3295 first. Integers with non-positive values aren't advertised.
3289 first. Integers with non-positive values aren't advertised.
3296
3290
3297 The priority values are somewhat arbitrary and only used for default
3291 The priority values are somewhat arbitrary and only used for default
3298 ordering. The relative order can be changed via config options.
3292 ordering. The relative order can be changed via config options.
3299
3293
3300 If wire protocol compression is supported, the class must also implement
3294 If wire protocol compression is supported, the class must also implement
3301 ``compressstream`` and ``decompressorreader``.
3295 ``compressstream`` and ``decompressorreader``.
3302 """
3296 """
3303 return None
3297 return None
3304
3298
3305 def revlogheader(self):
3299 def revlogheader(self):
3306 """Header added to revlog chunks that identifies this engine.
3300 """Header added to revlog chunks that identifies this engine.
3307
3301
3308 If this engine can be used to compress revlogs, this method should
3302 If this engine can be used to compress revlogs, this method should
3309 return the bytes used to identify chunks compressed with this engine.
3303 return the bytes used to identify chunks compressed with this engine.
3310 Else, the method should return ``None`` to indicate it does not
3304 Else, the method should return ``None`` to indicate it does not
3311 participate in revlog compression.
3305 participate in revlog compression.
3312 """
3306 """
3313 return None
3307 return None
3314
3308
3315 def compressstream(self, it, opts=None):
3309 def compressstream(self, it, opts=None):
3316 """Compress an iterator of chunks.
3310 """Compress an iterator of chunks.
3317
3311
3318 The method receives an iterator (ideally a generator) of chunks of
3312 The method receives an iterator (ideally a generator) of chunks of
3319 bytes to be compressed. It returns an iterator (ideally a generator)
3313 bytes to be compressed. It returns an iterator (ideally a generator)
3320 of bytes of chunks representing the compressed output.
3314 of bytes of chunks representing the compressed output.
3321
3315
3322 Optionally accepts an argument defining how to perform compression.
3316 Optionally accepts an argument defining how to perform compression.
3323 Each engine treats this argument differently.
3317 Each engine treats this argument differently.
3324 """
3318 """
3325 raise NotImplementedError()
3319 raise NotImplementedError()
3326
3320
3327 def decompressorreader(self, fh):
3321 def decompressorreader(self, fh):
3328 """Perform decompression on a file object.
3322 """Perform decompression on a file object.
3329
3323
3330 Argument is an object with a ``read(size)`` method that returns
3324 Argument is an object with a ``read(size)`` method that returns
3331 compressed data. Return value is an object with a ``read(size)`` that
3325 compressed data. Return value is an object with a ``read(size)`` that
3332 returns uncompressed data.
3326 returns uncompressed data.
3333 """
3327 """
3334 raise NotImplementedError()
3328 raise NotImplementedError()
3335
3329
3336 def revlogcompressor(self, opts=None):
3330 def revlogcompressor(self, opts=None):
3337 """Obtain an object that can be used to compress revlog entries.
3331 """Obtain an object that can be used to compress revlog entries.
3338
3332
3339 The object has a ``compress(data)`` method that compresses binary
3333 The object has a ``compress(data)`` method that compresses binary
3340 data. This method returns compressed binary data or ``None`` if
3334 data. This method returns compressed binary data or ``None`` if
3341 the data could not be compressed (too small, not compressible, etc).
3335 the data could not be compressed (too small, not compressible, etc).
3342 The returned data should have a header uniquely identifying this
3336 The returned data should have a header uniquely identifying this
3343 compression format so decompression can be routed to this engine.
3337 compression format so decompression can be routed to this engine.
3344 This header should be identified by the ``revlogheader()`` return
3338 This header should be identified by the ``revlogheader()`` return
3345 value.
3339 value.
3346
3340
3347 The object has a ``decompress(data)`` method that decompresses
3341 The object has a ``decompress(data)`` method that decompresses
3348 data. The method will only be called if ``data`` begins with
3342 data. The method will only be called if ``data`` begins with
3349 ``revlogheader()``. The method should return the raw, uncompressed
3343 ``revlogheader()``. The method should return the raw, uncompressed
3350 data or raise a ``RevlogError``.
3344 data or raise a ``RevlogError``.
3351
3345
3352 The object is reusable but is not thread safe.
3346 The object is reusable but is not thread safe.
3353 """
3347 """
3354 raise NotImplementedError()
3348 raise NotImplementedError()
3355
3349
3356 class _zlibengine(compressionengine):
3350 class _zlibengine(compressionengine):
3357 def name(self):
3351 def name(self):
3358 return 'zlib'
3352 return 'zlib'
3359
3353
3360 def bundletype(self):
3354 def bundletype(self):
3361 """zlib compression using the DEFLATE algorithm.
3355 """zlib compression using the DEFLATE algorithm.
3362
3356
3363 All Mercurial clients should support this format. The compression
3357 All Mercurial clients should support this format. The compression
3364 algorithm strikes a reasonable balance between compression ratio
3358 algorithm strikes a reasonable balance between compression ratio
3365 and size.
3359 and size.
3366 """
3360 """
3367 return 'gzip', 'GZ'
3361 return 'gzip', 'GZ'
3368
3362
3369 def wireprotosupport(self):
3363 def wireprotosupport(self):
3370 return compewireprotosupport('zlib', 20, 20)
3364 return compewireprotosupport('zlib', 20, 20)
3371
3365
3372 def revlogheader(self):
3366 def revlogheader(self):
3373 return 'x'
3367 return 'x'
3374
3368
3375 def compressstream(self, it, opts=None):
3369 def compressstream(self, it, opts=None):
3376 opts = opts or {}
3370 opts = opts or {}
3377
3371
3378 z = zlib.compressobj(opts.get('level', -1))
3372 z = zlib.compressobj(opts.get('level', -1))
3379 for chunk in it:
3373 for chunk in it:
3380 data = z.compress(chunk)
3374 data = z.compress(chunk)
3381 # Not all calls to compress emit data. It is cheaper to inspect
3375 # Not all calls to compress emit data. It is cheaper to inspect
3382 # here than to feed empty chunks through generator.
3376 # here than to feed empty chunks through generator.
3383 if data:
3377 if data:
3384 yield data
3378 yield data
3385
3379
3386 yield z.flush()
3380 yield z.flush()
3387
3381
3388 def decompressorreader(self, fh):
3382 def decompressorreader(self, fh):
3389 def gen():
3383 def gen():
3390 d = zlib.decompressobj()
3384 d = zlib.decompressobj()
3391 for chunk in filechunkiter(fh):
3385 for chunk in filechunkiter(fh):
3392 while chunk:
3386 while chunk:
3393 # Limit output size to limit memory.
3387 # Limit output size to limit memory.
3394 yield d.decompress(chunk, 2 ** 18)
3388 yield d.decompress(chunk, 2 ** 18)
3395 chunk = d.unconsumed_tail
3389 chunk = d.unconsumed_tail
3396
3390
3397 return chunkbuffer(gen())
3391 return chunkbuffer(gen())
3398
3392
3399 class zlibrevlogcompressor(object):
3393 class zlibrevlogcompressor(object):
3400 def compress(self, data):
3394 def compress(self, data):
3401 insize = len(data)
3395 insize = len(data)
3402 # Caller handles empty input case.
3396 # Caller handles empty input case.
3403 assert insize > 0
3397 assert insize > 0
3404
3398
3405 if insize < 44:
3399 if insize < 44:
3406 return None
3400 return None
3407
3401
3408 elif insize <= 1000000:
3402 elif insize <= 1000000:
3409 compressed = zlib.compress(data)
3403 compressed = zlib.compress(data)
3410 if len(compressed) < insize:
3404 if len(compressed) < insize:
3411 return compressed
3405 return compressed
3412 return None
3406 return None
3413
3407
3414 # zlib makes an internal copy of the input buffer, doubling
3408 # zlib makes an internal copy of the input buffer, doubling
3415 # memory usage for large inputs. So do streaming compression
3409 # memory usage for large inputs. So do streaming compression
3416 # on large inputs.
3410 # on large inputs.
3417 else:
3411 else:
3418 z = zlib.compressobj()
3412 z = zlib.compressobj()
3419 parts = []
3413 parts = []
3420 pos = 0
3414 pos = 0
3421 while pos < insize:
3415 while pos < insize:
3422 pos2 = pos + 2**20
3416 pos2 = pos + 2**20
3423 parts.append(z.compress(data[pos:pos2]))
3417 parts.append(z.compress(data[pos:pos2]))
3424 pos = pos2
3418 pos = pos2
3425 parts.append(z.flush())
3419 parts.append(z.flush())
3426
3420
3427 if sum(map(len, parts)) < insize:
3421 if sum(map(len, parts)) < insize:
3428 return ''.join(parts)
3422 return ''.join(parts)
3429 return None
3423 return None
3430
3424
3431 def decompress(self, data):
3425 def decompress(self, data):
3432 try:
3426 try:
3433 return zlib.decompress(data)
3427 return zlib.decompress(data)
3434 except zlib.error as e:
3428 except zlib.error as e:
3435 raise error.RevlogError(_('revlog decompress error: %s') %
3429 raise error.RevlogError(_('revlog decompress error: %s') %
3436 str(e))
3430 str(e))
3437
3431
3438 def revlogcompressor(self, opts=None):
3432 def revlogcompressor(self, opts=None):
3439 return self.zlibrevlogcompressor()
3433 return self.zlibrevlogcompressor()
3440
3434
3441 compengines.register(_zlibengine())
3435 compengines.register(_zlibengine())
3442
3436
3443 class _bz2engine(compressionengine):
3437 class _bz2engine(compressionengine):
3444 def name(self):
3438 def name(self):
3445 return 'bz2'
3439 return 'bz2'
3446
3440
3447 def bundletype(self):
3441 def bundletype(self):
3448 """An algorithm that produces smaller bundles than ``gzip``.
3442 """An algorithm that produces smaller bundles than ``gzip``.
3449
3443
3450 All Mercurial clients should support this format.
3444 All Mercurial clients should support this format.
3451
3445
3452 This engine will likely produce smaller bundles than ``gzip`` but
3446 This engine will likely produce smaller bundles than ``gzip`` but
3453 will be significantly slower, both during compression and
3447 will be significantly slower, both during compression and
3454 decompression.
3448 decompression.
3455
3449
3456 If available, the ``zstd`` engine can yield similar or better
3450 If available, the ``zstd`` engine can yield similar or better
3457 compression at much higher speeds.
3451 compression at much higher speeds.
3458 """
3452 """
3459 return 'bzip2', 'BZ'
3453 return 'bzip2', 'BZ'
3460
3454
3461 # We declare a protocol name but don't advertise by default because
3455 # We declare a protocol name but don't advertise by default because
3462 # it is slow.
3456 # it is slow.
3463 def wireprotosupport(self):
3457 def wireprotosupport(self):
3464 return compewireprotosupport('bzip2', 0, 0)
3458 return compewireprotosupport('bzip2', 0, 0)
3465
3459
3466 def compressstream(self, it, opts=None):
3460 def compressstream(self, it, opts=None):
3467 opts = opts or {}
3461 opts = opts or {}
3468 z = bz2.BZ2Compressor(opts.get('level', 9))
3462 z = bz2.BZ2Compressor(opts.get('level', 9))
3469 for chunk in it:
3463 for chunk in it:
3470 data = z.compress(chunk)
3464 data = z.compress(chunk)
3471 if data:
3465 if data:
3472 yield data
3466 yield data
3473
3467
3474 yield z.flush()
3468 yield z.flush()
3475
3469
3476 def decompressorreader(self, fh):
3470 def decompressorreader(self, fh):
3477 def gen():
3471 def gen():
3478 d = bz2.BZ2Decompressor()
3472 d = bz2.BZ2Decompressor()
3479 for chunk in filechunkiter(fh):
3473 for chunk in filechunkiter(fh):
3480 yield d.decompress(chunk)
3474 yield d.decompress(chunk)
3481
3475
3482 return chunkbuffer(gen())
3476 return chunkbuffer(gen())
3483
3477
3484 compengines.register(_bz2engine())
3478 compengines.register(_bz2engine())
3485
3479
3486 class _truncatedbz2engine(compressionengine):
3480 class _truncatedbz2engine(compressionengine):
3487 def name(self):
3481 def name(self):
3488 return 'bz2truncated'
3482 return 'bz2truncated'
3489
3483
3490 def bundletype(self):
3484 def bundletype(self):
3491 return None, '_truncatedBZ'
3485 return None, '_truncatedBZ'
3492
3486
3493 # We don't implement compressstream because it is hackily handled elsewhere.
3487 # We don't implement compressstream because it is hackily handled elsewhere.
3494
3488
3495 def decompressorreader(self, fh):
3489 def decompressorreader(self, fh):
3496 def gen():
3490 def gen():
3497 # The input stream doesn't have the 'BZ' header. So add it back.
3491 # The input stream doesn't have the 'BZ' header. So add it back.
3498 d = bz2.BZ2Decompressor()
3492 d = bz2.BZ2Decompressor()
3499 d.decompress('BZ')
3493 d.decompress('BZ')
3500 for chunk in filechunkiter(fh):
3494 for chunk in filechunkiter(fh):
3501 yield d.decompress(chunk)
3495 yield d.decompress(chunk)
3502
3496
3503 return chunkbuffer(gen())
3497 return chunkbuffer(gen())
3504
3498
3505 compengines.register(_truncatedbz2engine())
3499 compengines.register(_truncatedbz2engine())
3506
3500
3507 class _noopengine(compressionengine):
3501 class _noopengine(compressionengine):
3508 def name(self):
3502 def name(self):
3509 return 'none'
3503 return 'none'
3510
3504
3511 def bundletype(self):
3505 def bundletype(self):
3512 """No compression is performed.
3506 """No compression is performed.
3513
3507
3514 Use this compression engine to explicitly disable compression.
3508 Use this compression engine to explicitly disable compression.
3515 """
3509 """
3516 return 'none', 'UN'
3510 return 'none', 'UN'
3517
3511
3518 # Clients always support uncompressed payloads. Servers don't because
3512 # Clients always support uncompressed payloads. Servers don't because
3519 # unless you are on a fast network, uncompressed payloads can easily
3513 # unless you are on a fast network, uncompressed payloads can easily
3520 # saturate your network pipe.
3514 # saturate your network pipe.
3521 def wireprotosupport(self):
3515 def wireprotosupport(self):
3522 return compewireprotosupport('none', 0, 10)
3516 return compewireprotosupport('none', 0, 10)
3523
3517
3524 # We don't implement revlogheader because it is handled specially
3518 # We don't implement revlogheader because it is handled specially
3525 # in the revlog class.
3519 # in the revlog class.
3526
3520
3527 def compressstream(self, it, opts=None):
3521 def compressstream(self, it, opts=None):
3528 return it
3522 return it
3529
3523
3530 def decompressorreader(self, fh):
3524 def decompressorreader(self, fh):
3531 return fh
3525 return fh
3532
3526
3533 class nooprevlogcompressor(object):
3527 class nooprevlogcompressor(object):
3534 def compress(self, data):
3528 def compress(self, data):
3535 return None
3529 return None
3536
3530
3537 def revlogcompressor(self, opts=None):
3531 def revlogcompressor(self, opts=None):
3538 return self.nooprevlogcompressor()
3532 return self.nooprevlogcompressor()
3539
3533
3540 compengines.register(_noopengine())
3534 compengines.register(_noopengine())
3541
3535
3542 class _zstdengine(compressionengine):
3536 class _zstdengine(compressionengine):
3543 def name(self):
3537 def name(self):
3544 return 'zstd'
3538 return 'zstd'
3545
3539
3546 @propertycache
3540 @propertycache
3547 def _module(self):
3541 def _module(self):
3548 # Not all installs have the zstd module available. So defer importing
3542 # Not all installs have the zstd module available. So defer importing
3549 # until first access.
3543 # until first access.
3550 try:
3544 try:
3551 from . import zstd
3545 from . import zstd
3552 # Force delayed import.
3546 # Force delayed import.
3553 zstd.__version__
3547 zstd.__version__
3554 return zstd
3548 return zstd
3555 except ImportError:
3549 except ImportError:
3556 return None
3550 return None
3557
3551
3558 def available(self):
3552 def available(self):
3559 return bool(self._module)
3553 return bool(self._module)
3560
3554
3561 def bundletype(self):
3555 def bundletype(self):
3562 """A modern compression algorithm that is fast and highly flexible.
3556 """A modern compression algorithm that is fast and highly flexible.
3563
3557
3564 Only supported by Mercurial 4.1 and newer clients.
3558 Only supported by Mercurial 4.1 and newer clients.
3565
3559
3566 With the default settings, zstd compression is both faster and yields
3560 With the default settings, zstd compression is both faster and yields
3567 better compression than ``gzip``. It also frequently yields better
3561 better compression than ``gzip``. It also frequently yields better
3568 compression than ``bzip2`` while operating at much higher speeds.
3562 compression than ``bzip2`` while operating at much higher speeds.
3569
3563
3570 If this engine is available and backwards compatibility is not a
3564 If this engine is available and backwards compatibility is not a
3571 concern, it is likely the best available engine.
3565 concern, it is likely the best available engine.
3572 """
3566 """
3573 return 'zstd', 'ZS'
3567 return 'zstd', 'ZS'
3574
3568
3575 def wireprotosupport(self):
3569 def wireprotosupport(self):
3576 return compewireprotosupport('zstd', 50, 50)
3570 return compewireprotosupport('zstd', 50, 50)
3577
3571
3578 def revlogheader(self):
3572 def revlogheader(self):
3579 return '\x28'
3573 return '\x28'
3580
3574
3581 def compressstream(self, it, opts=None):
3575 def compressstream(self, it, opts=None):
3582 opts = opts or {}
3576 opts = opts or {}
3583 # zstd level 3 is almost always significantly faster than zlib
3577 # zstd level 3 is almost always significantly faster than zlib
3584 # while providing no worse compression. It strikes a good balance
3578 # while providing no worse compression. It strikes a good balance
3585 # between speed and compression.
3579 # between speed and compression.
3586 level = opts.get('level', 3)
3580 level = opts.get('level', 3)
3587
3581
3588 zstd = self._module
3582 zstd = self._module
3589 z = zstd.ZstdCompressor(level=level).compressobj()
3583 z = zstd.ZstdCompressor(level=level).compressobj()
3590 for chunk in it:
3584 for chunk in it:
3591 data = z.compress(chunk)
3585 data = z.compress(chunk)
3592 if data:
3586 if data:
3593 yield data
3587 yield data
3594
3588
3595 yield z.flush()
3589 yield z.flush()
3596
3590
3597 def decompressorreader(self, fh):
3591 def decompressorreader(self, fh):
3598 zstd = self._module
3592 zstd = self._module
3599 dctx = zstd.ZstdDecompressor()
3593 dctx = zstd.ZstdDecompressor()
3600 return chunkbuffer(dctx.read_from(fh))
3594 return chunkbuffer(dctx.read_from(fh))
3601
3595
3602 class zstdrevlogcompressor(object):
3596 class zstdrevlogcompressor(object):
3603 def __init__(self, zstd, level=3):
3597 def __init__(self, zstd, level=3):
3604 # Writing the content size adds a few bytes to the output. However,
3598 # Writing the content size adds a few bytes to the output. However,
3605 # it allows decompression to be more optimal since we can
3599 # it allows decompression to be more optimal since we can
3606 # pre-allocate a buffer to hold the result.
3600 # pre-allocate a buffer to hold the result.
3607 self._cctx = zstd.ZstdCompressor(level=level,
3601 self._cctx = zstd.ZstdCompressor(level=level,
3608 write_content_size=True)
3602 write_content_size=True)
3609 self._dctx = zstd.ZstdDecompressor()
3603 self._dctx = zstd.ZstdDecompressor()
3610 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3604 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3611 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3605 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3612
3606
3613 def compress(self, data):
3607 def compress(self, data):
3614 insize = len(data)
3608 insize = len(data)
3615 # Caller handles empty input case.
3609 # Caller handles empty input case.
3616 assert insize > 0
3610 assert insize > 0
3617
3611
3618 if insize < 50:
3612 if insize < 50:
3619 return None
3613 return None
3620
3614
3621 elif insize <= 1000000:
3615 elif insize <= 1000000:
3622 compressed = self._cctx.compress(data)
3616 compressed = self._cctx.compress(data)
3623 if len(compressed) < insize:
3617 if len(compressed) < insize:
3624 return compressed
3618 return compressed
3625 return None
3619 return None
3626 else:
3620 else:
3627 z = self._cctx.compressobj()
3621 z = self._cctx.compressobj()
3628 chunks = []
3622 chunks = []
3629 pos = 0
3623 pos = 0
3630 while pos < insize:
3624 while pos < insize:
3631 pos2 = pos + self._compinsize
3625 pos2 = pos + self._compinsize
3632 chunk = z.compress(data[pos:pos2])
3626 chunk = z.compress(data[pos:pos2])
3633 if chunk:
3627 if chunk:
3634 chunks.append(chunk)
3628 chunks.append(chunk)
3635 pos = pos2
3629 pos = pos2
3636 chunks.append(z.flush())
3630 chunks.append(z.flush())
3637
3631
3638 if sum(map(len, chunks)) < insize:
3632 if sum(map(len, chunks)) < insize:
3639 return ''.join(chunks)
3633 return ''.join(chunks)
3640 return None
3634 return None
3641
3635
3642 def decompress(self, data):
3636 def decompress(self, data):
3643 insize = len(data)
3637 insize = len(data)
3644
3638
3645 try:
3639 try:
3646 # This was measured to be faster than other streaming
3640 # This was measured to be faster than other streaming
3647 # decompressors.
3641 # decompressors.
3648 dobj = self._dctx.decompressobj()
3642 dobj = self._dctx.decompressobj()
3649 chunks = []
3643 chunks = []
3650 pos = 0
3644 pos = 0
3651 while pos < insize:
3645 while pos < insize:
3652 pos2 = pos + self._decompinsize
3646 pos2 = pos + self._decompinsize
3653 chunk = dobj.decompress(data[pos:pos2])
3647 chunk = dobj.decompress(data[pos:pos2])
3654 if chunk:
3648 if chunk:
3655 chunks.append(chunk)
3649 chunks.append(chunk)
3656 pos = pos2
3650 pos = pos2
3657 # Frame should be exhausted, so no finish() API.
3651 # Frame should be exhausted, so no finish() API.
3658
3652
3659 return ''.join(chunks)
3653 return ''.join(chunks)
3660 except Exception as e:
3654 except Exception as e:
3661 raise error.RevlogError(_('revlog decompress error: %s') %
3655 raise error.RevlogError(_('revlog decompress error: %s') %
3662 str(e))
3656 str(e))
3663
3657
3664 def revlogcompressor(self, opts=None):
3658 def revlogcompressor(self, opts=None):
3665 opts = opts or {}
3659 opts = opts or {}
3666 return self.zstdrevlogcompressor(self._module,
3660 return self.zstdrevlogcompressor(self._module,
3667 level=opts.get('level', 3))
3661 level=opts.get('level', 3))
3668
3662
3669 compengines.register(_zstdengine())
3663 compengines.register(_zstdengine())
3670
3664
3671 def bundlecompressiontopics():
3665 def bundlecompressiontopics():
3672 """Obtains a list of available bundle compressions for use in help."""
3666 """Obtains a list of available bundle compressions for use in help."""
3673 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3667 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3674 items = {}
3668 items = {}
3675
3669
3676 # We need to format the docstring. So use a dummy object/type to hold it
3670 # We need to format the docstring. So use a dummy object/type to hold it
3677 # rather than mutating the original.
3671 # rather than mutating the original.
3678 class docobject(object):
3672 class docobject(object):
3679 pass
3673 pass
3680
3674
3681 for name in compengines:
3675 for name in compengines:
3682 engine = compengines[name]
3676 engine = compengines[name]
3683
3677
3684 if not engine.available():
3678 if not engine.available():
3685 continue
3679 continue
3686
3680
3687 bt = engine.bundletype()
3681 bt = engine.bundletype()
3688 if not bt or not bt[0]:
3682 if not bt or not bt[0]:
3689 continue
3683 continue
3690
3684
3691 doc = pycompat.sysstr('``%s``\n %s') % (
3685 doc = pycompat.sysstr('``%s``\n %s') % (
3692 bt[0], engine.bundletype.__doc__)
3686 bt[0], engine.bundletype.__doc__)
3693
3687
3694 value = docobject()
3688 value = docobject()
3695 value.__doc__ = doc
3689 value.__doc__ = doc
3696
3690
3697 items[bt[0]] = value
3691 items[bt[0]] = value
3698
3692
3699 return items
3693 return items
3700
3694
3701 # convenient shortcut
3695 # convenient shortcut
3702 dst = debugstacktrace
3696 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now