##// END OF EJS Templates
util: add debugstacktrace depth limit...
Mads Kiilerich -
r31315:78ac7061 default
parent child Browse files
Show More
@@ -1,3547 +1,3550
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import platform as pyplatform
27 import platform as pyplatform
28 import re as remod
28 import re as remod
29 import shutil
29 import shutil
30 import signal
30 import signal
31 import socket
31 import socket
32 import stat
32 import stat
33 import string
33 import string
34 import subprocess
34 import subprocess
35 import sys
35 import sys
36 import tempfile
36 import tempfile
37 import textwrap
37 import textwrap
38 import time
38 import time
39 import traceback
39 import traceback
40 import zlib
40 import zlib
41
41
42 from . import (
42 from . import (
43 encoding,
43 encoding,
44 error,
44 error,
45 i18n,
45 i18n,
46 osutil,
46 osutil,
47 parsers,
47 parsers,
48 pycompat,
48 pycompat,
49 )
49 )
50
50
51 empty = pycompat.empty
51 empty = pycompat.empty
52 httplib = pycompat.httplib
52 httplib = pycompat.httplib
53 httpserver = pycompat.httpserver
53 httpserver = pycompat.httpserver
54 pickle = pycompat.pickle
54 pickle = pycompat.pickle
55 queue = pycompat.queue
55 queue = pycompat.queue
56 socketserver = pycompat.socketserver
56 socketserver = pycompat.socketserver
57 stderr = pycompat.stderr
57 stderr = pycompat.stderr
58 stdin = pycompat.stdin
58 stdin = pycompat.stdin
59 stdout = pycompat.stdout
59 stdout = pycompat.stdout
60 stringio = pycompat.stringio
60 stringio = pycompat.stringio
61 urlerr = pycompat.urlerr
61 urlerr = pycompat.urlerr
62 urlparse = pycompat.urlparse
62 urlparse = pycompat.urlparse
63 urlreq = pycompat.urlreq
63 urlreq = pycompat.urlreq
64 xmlrpclib = pycompat.xmlrpclib
64 xmlrpclib = pycompat.xmlrpclib
65
65
66 def isatty(fp):
66 def isatty(fp):
67 try:
67 try:
68 return fp.isatty()
68 return fp.isatty()
69 except AttributeError:
69 except AttributeError:
70 return False
70 return False
71
71
72 # glibc determines buffering on first write to stdout - if we replace a TTY
72 # glibc determines buffering on first write to stdout - if we replace a TTY
73 # destined stdout with a pipe destined stdout (e.g. pager), we want line
73 # destined stdout with a pipe destined stdout (e.g. pager), we want line
74 # buffering
74 # buffering
75 if isatty(stdout):
75 if isatty(stdout):
76 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
76 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
77
77
78 if pycompat.osname == 'nt':
78 if pycompat.osname == 'nt':
79 from . import windows as platform
79 from . import windows as platform
80 stdout = platform.winstdout(stdout)
80 stdout = platform.winstdout(stdout)
81 else:
81 else:
82 from . import posix as platform
82 from . import posix as platform
83
83
84 _ = i18n._
84 _ = i18n._
85
85
86 bindunixsocket = platform.bindunixsocket
86 bindunixsocket = platform.bindunixsocket
87 cachestat = platform.cachestat
87 cachestat = platform.cachestat
88 checkexec = platform.checkexec
88 checkexec = platform.checkexec
89 checklink = platform.checklink
89 checklink = platform.checklink
90 copymode = platform.copymode
90 copymode = platform.copymode
91 executablepath = platform.executablepath
91 executablepath = platform.executablepath
92 expandglobs = platform.expandglobs
92 expandglobs = platform.expandglobs
93 explainexit = platform.explainexit
93 explainexit = platform.explainexit
94 findexe = platform.findexe
94 findexe = platform.findexe
95 gethgcmd = platform.gethgcmd
95 gethgcmd = platform.gethgcmd
96 getuser = platform.getuser
96 getuser = platform.getuser
97 getpid = os.getpid
97 getpid = os.getpid
98 groupmembers = platform.groupmembers
98 groupmembers = platform.groupmembers
99 groupname = platform.groupname
99 groupname = platform.groupname
100 hidewindow = platform.hidewindow
100 hidewindow = platform.hidewindow
101 isexec = platform.isexec
101 isexec = platform.isexec
102 isowner = platform.isowner
102 isowner = platform.isowner
103 localpath = platform.localpath
103 localpath = platform.localpath
104 lookupreg = platform.lookupreg
104 lookupreg = platform.lookupreg
105 makedir = platform.makedir
105 makedir = platform.makedir
106 nlinks = platform.nlinks
106 nlinks = platform.nlinks
107 normpath = platform.normpath
107 normpath = platform.normpath
108 normcase = platform.normcase
108 normcase = platform.normcase
109 normcasespec = platform.normcasespec
109 normcasespec = platform.normcasespec
110 normcasefallback = platform.normcasefallback
110 normcasefallback = platform.normcasefallback
111 openhardlinks = platform.openhardlinks
111 openhardlinks = platform.openhardlinks
112 oslink = platform.oslink
112 oslink = platform.oslink
113 parsepatchoutput = platform.parsepatchoutput
113 parsepatchoutput = platform.parsepatchoutput
114 pconvert = platform.pconvert
114 pconvert = platform.pconvert
115 poll = platform.poll
115 poll = platform.poll
116 popen = platform.popen
116 popen = platform.popen
117 posixfile = platform.posixfile
117 posixfile = platform.posixfile
118 quotecommand = platform.quotecommand
118 quotecommand = platform.quotecommand
119 readpipe = platform.readpipe
119 readpipe = platform.readpipe
120 rename = platform.rename
120 rename = platform.rename
121 removedirs = platform.removedirs
121 removedirs = platform.removedirs
122 samedevice = platform.samedevice
122 samedevice = platform.samedevice
123 samefile = platform.samefile
123 samefile = platform.samefile
124 samestat = platform.samestat
124 samestat = platform.samestat
125 setbinary = platform.setbinary
125 setbinary = platform.setbinary
126 setflags = platform.setflags
126 setflags = platform.setflags
127 setsignalhandler = platform.setsignalhandler
127 setsignalhandler = platform.setsignalhandler
128 shellquote = platform.shellquote
128 shellquote = platform.shellquote
129 spawndetached = platform.spawndetached
129 spawndetached = platform.spawndetached
130 split = platform.split
130 split = platform.split
131 sshargs = platform.sshargs
131 sshargs = platform.sshargs
132 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
132 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
133 statisexec = platform.statisexec
133 statisexec = platform.statisexec
134 statislink = platform.statislink
134 statislink = platform.statislink
135 testpid = platform.testpid
135 testpid = platform.testpid
136 umask = platform.umask
136 umask = platform.umask
137 unlink = platform.unlink
137 unlink = platform.unlink
138 unlinkpath = platform.unlinkpath
138 unlinkpath = platform.unlinkpath
139 username = platform.username
139 username = platform.username
140
140
141 # Python compatibility
141 # Python compatibility
142
142
143 _notset = object()
143 _notset = object()
144
144
145 # disable Python's problematic floating point timestamps (issue4836)
145 # disable Python's problematic floating point timestamps (issue4836)
146 # (Python hypocritically says you shouldn't change this behavior in
146 # (Python hypocritically says you shouldn't change this behavior in
147 # libraries, and sure enough Mercurial is not a library.)
147 # libraries, and sure enough Mercurial is not a library.)
148 os.stat_float_times(False)
148 os.stat_float_times(False)
149
149
150 def safehasattr(thing, attr):
150 def safehasattr(thing, attr):
151 return getattr(thing, attr, _notset) is not _notset
151 return getattr(thing, attr, _notset) is not _notset
152
152
153 def bitsfrom(container):
153 def bitsfrom(container):
154 bits = 0
154 bits = 0
155 for bit in container:
155 for bit in container:
156 bits |= bit
156 bits |= bit
157 return bits
157 return bits
158
158
159 DIGESTS = {
159 DIGESTS = {
160 'md5': hashlib.md5,
160 'md5': hashlib.md5,
161 'sha1': hashlib.sha1,
161 'sha1': hashlib.sha1,
162 'sha512': hashlib.sha512,
162 'sha512': hashlib.sha512,
163 }
163 }
164 # List of digest types from strongest to weakest
164 # List of digest types from strongest to weakest
165 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
165 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
166
166
167 for k in DIGESTS_BY_STRENGTH:
167 for k in DIGESTS_BY_STRENGTH:
168 assert k in DIGESTS
168 assert k in DIGESTS
169
169
170 class digester(object):
170 class digester(object):
171 """helper to compute digests.
171 """helper to compute digests.
172
172
173 This helper can be used to compute one or more digests given their name.
173 This helper can be used to compute one or more digests given their name.
174
174
175 >>> d = digester(['md5', 'sha1'])
175 >>> d = digester(['md5', 'sha1'])
176 >>> d.update('foo')
176 >>> d.update('foo')
177 >>> [k for k in sorted(d)]
177 >>> [k for k in sorted(d)]
178 ['md5', 'sha1']
178 ['md5', 'sha1']
179 >>> d['md5']
179 >>> d['md5']
180 'acbd18db4cc2f85cedef654fccc4a4d8'
180 'acbd18db4cc2f85cedef654fccc4a4d8'
181 >>> d['sha1']
181 >>> d['sha1']
182 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
182 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
183 >>> digester.preferred(['md5', 'sha1'])
183 >>> digester.preferred(['md5', 'sha1'])
184 'sha1'
184 'sha1'
185 """
185 """
186
186
187 def __init__(self, digests, s=''):
187 def __init__(self, digests, s=''):
188 self._hashes = {}
188 self._hashes = {}
189 for k in digests:
189 for k in digests:
190 if k not in DIGESTS:
190 if k not in DIGESTS:
191 raise Abort(_('unknown digest type: %s') % k)
191 raise Abort(_('unknown digest type: %s') % k)
192 self._hashes[k] = DIGESTS[k]()
192 self._hashes[k] = DIGESTS[k]()
193 if s:
193 if s:
194 self.update(s)
194 self.update(s)
195
195
196 def update(self, data):
196 def update(self, data):
197 for h in self._hashes.values():
197 for h in self._hashes.values():
198 h.update(data)
198 h.update(data)
199
199
200 def __getitem__(self, key):
200 def __getitem__(self, key):
201 if key not in DIGESTS:
201 if key not in DIGESTS:
202 raise Abort(_('unknown digest type: %s') % k)
202 raise Abort(_('unknown digest type: %s') % k)
203 return self._hashes[key].hexdigest()
203 return self._hashes[key].hexdigest()
204
204
205 def __iter__(self):
205 def __iter__(self):
206 return iter(self._hashes)
206 return iter(self._hashes)
207
207
208 @staticmethod
208 @staticmethod
209 def preferred(supported):
209 def preferred(supported):
210 """returns the strongest digest type in both supported and DIGESTS."""
210 """returns the strongest digest type in both supported and DIGESTS."""
211
211
212 for k in DIGESTS_BY_STRENGTH:
212 for k in DIGESTS_BY_STRENGTH:
213 if k in supported:
213 if k in supported:
214 return k
214 return k
215 return None
215 return None
216
216
217 class digestchecker(object):
217 class digestchecker(object):
218 """file handle wrapper that additionally checks content against a given
218 """file handle wrapper that additionally checks content against a given
219 size and digests.
219 size and digests.
220
220
221 d = digestchecker(fh, size, {'md5': '...'})
221 d = digestchecker(fh, size, {'md5': '...'})
222
222
223 When multiple digests are given, all of them are validated.
223 When multiple digests are given, all of them are validated.
224 """
224 """
225
225
226 def __init__(self, fh, size, digests):
226 def __init__(self, fh, size, digests):
227 self._fh = fh
227 self._fh = fh
228 self._size = size
228 self._size = size
229 self._got = 0
229 self._got = 0
230 self._digests = dict(digests)
230 self._digests = dict(digests)
231 self._digester = digester(self._digests.keys())
231 self._digester = digester(self._digests.keys())
232
232
233 def read(self, length=-1):
233 def read(self, length=-1):
234 content = self._fh.read(length)
234 content = self._fh.read(length)
235 self._digester.update(content)
235 self._digester.update(content)
236 self._got += len(content)
236 self._got += len(content)
237 return content
237 return content
238
238
239 def validate(self):
239 def validate(self):
240 if self._size != self._got:
240 if self._size != self._got:
241 raise Abort(_('size mismatch: expected %d, got %d') %
241 raise Abort(_('size mismatch: expected %d, got %d') %
242 (self._size, self._got))
242 (self._size, self._got))
243 for k, v in self._digests.items():
243 for k, v in self._digests.items():
244 if v != self._digester[k]:
244 if v != self._digester[k]:
245 # i18n: first parameter is a digest name
245 # i18n: first parameter is a digest name
246 raise Abort(_('%s mismatch: expected %s, got %s') %
246 raise Abort(_('%s mismatch: expected %s, got %s') %
247 (k, v, self._digester[k]))
247 (k, v, self._digester[k]))
248
248
249 try:
249 try:
250 buffer = buffer
250 buffer = buffer
251 except NameError:
251 except NameError:
252 if not pycompat.ispy3:
252 if not pycompat.ispy3:
253 def buffer(sliceable, offset=0, length=None):
253 def buffer(sliceable, offset=0, length=None):
254 if length is not None:
254 if length is not None:
255 return sliceable[offset:offset + length]
255 return sliceable[offset:offset + length]
256 return sliceable[offset:]
256 return sliceable[offset:]
257 else:
257 else:
258 def buffer(sliceable, offset=0, length=None):
258 def buffer(sliceable, offset=0, length=None):
259 if length is not None:
259 if length is not None:
260 return memoryview(sliceable)[offset:offset + length]
260 return memoryview(sliceable)[offset:offset + length]
261 return memoryview(sliceable)[offset:]
261 return memoryview(sliceable)[offset:]
262
262
263 closefds = pycompat.osname == 'posix'
263 closefds = pycompat.osname == 'posix'
264
264
265 _chunksize = 4096
265 _chunksize = 4096
266
266
267 class bufferedinputpipe(object):
267 class bufferedinputpipe(object):
268 """a manually buffered input pipe
268 """a manually buffered input pipe
269
269
270 Python will not let us use buffered IO and lazy reading with 'polling' at
270 Python will not let us use buffered IO and lazy reading with 'polling' at
271 the same time. We cannot probe the buffer state and select will not detect
271 the same time. We cannot probe the buffer state and select will not detect
272 that data are ready to read if they are already buffered.
272 that data are ready to read if they are already buffered.
273
273
274 This class let us work around that by implementing its own buffering
274 This class let us work around that by implementing its own buffering
275 (allowing efficient readline) while offering a way to know if the buffer is
275 (allowing efficient readline) while offering a way to know if the buffer is
276 empty from the output (allowing collaboration of the buffer with polling).
276 empty from the output (allowing collaboration of the buffer with polling).
277
277
278 This class lives in the 'util' module because it makes use of the 'os'
278 This class lives in the 'util' module because it makes use of the 'os'
279 module from the python stdlib.
279 module from the python stdlib.
280 """
280 """
281
281
282 def __init__(self, input):
282 def __init__(self, input):
283 self._input = input
283 self._input = input
284 self._buffer = []
284 self._buffer = []
285 self._eof = False
285 self._eof = False
286 self._lenbuf = 0
286 self._lenbuf = 0
287
287
288 @property
288 @property
289 def hasbuffer(self):
289 def hasbuffer(self):
290 """True is any data is currently buffered
290 """True is any data is currently buffered
291
291
292 This will be used externally a pre-step for polling IO. If there is
292 This will be used externally a pre-step for polling IO. If there is
293 already data then no polling should be set in place."""
293 already data then no polling should be set in place."""
294 return bool(self._buffer)
294 return bool(self._buffer)
295
295
296 @property
296 @property
297 def closed(self):
297 def closed(self):
298 return self._input.closed
298 return self._input.closed
299
299
300 def fileno(self):
300 def fileno(self):
301 return self._input.fileno()
301 return self._input.fileno()
302
302
303 def close(self):
303 def close(self):
304 return self._input.close()
304 return self._input.close()
305
305
306 def read(self, size):
306 def read(self, size):
307 while (not self._eof) and (self._lenbuf < size):
307 while (not self._eof) and (self._lenbuf < size):
308 self._fillbuffer()
308 self._fillbuffer()
309 return self._frombuffer(size)
309 return self._frombuffer(size)
310
310
311 def readline(self, *args, **kwargs):
311 def readline(self, *args, **kwargs):
312 if 1 < len(self._buffer):
312 if 1 < len(self._buffer):
313 # this should not happen because both read and readline end with a
313 # this should not happen because both read and readline end with a
314 # _frombuffer call that collapse it.
314 # _frombuffer call that collapse it.
315 self._buffer = [''.join(self._buffer)]
315 self._buffer = [''.join(self._buffer)]
316 self._lenbuf = len(self._buffer[0])
316 self._lenbuf = len(self._buffer[0])
317 lfi = -1
317 lfi = -1
318 if self._buffer:
318 if self._buffer:
319 lfi = self._buffer[-1].find('\n')
319 lfi = self._buffer[-1].find('\n')
320 while (not self._eof) and lfi < 0:
320 while (not self._eof) and lfi < 0:
321 self._fillbuffer()
321 self._fillbuffer()
322 if self._buffer:
322 if self._buffer:
323 lfi = self._buffer[-1].find('\n')
323 lfi = self._buffer[-1].find('\n')
324 size = lfi + 1
324 size = lfi + 1
325 if lfi < 0: # end of file
325 if lfi < 0: # end of file
326 size = self._lenbuf
326 size = self._lenbuf
327 elif 1 < len(self._buffer):
327 elif 1 < len(self._buffer):
328 # we need to take previous chunks into account
328 # we need to take previous chunks into account
329 size += self._lenbuf - len(self._buffer[-1])
329 size += self._lenbuf - len(self._buffer[-1])
330 return self._frombuffer(size)
330 return self._frombuffer(size)
331
331
332 def _frombuffer(self, size):
332 def _frombuffer(self, size):
333 """return at most 'size' data from the buffer
333 """return at most 'size' data from the buffer
334
334
335 The data are removed from the buffer."""
335 The data are removed from the buffer."""
336 if size == 0 or not self._buffer:
336 if size == 0 or not self._buffer:
337 return ''
337 return ''
338 buf = self._buffer[0]
338 buf = self._buffer[0]
339 if 1 < len(self._buffer):
339 if 1 < len(self._buffer):
340 buf = ''.join(self._buffer)
340 buf = ''.join(self._buffer)
341
341
342 data = buf[:size]
342 data = buf[:size]
343 buf = buf[len(data):]
343 buf = buf[len(data):]
344 if buf:
344 if buf:
345 self._buffer = [buf]
345 self._buffer = [buf]
346 self._lenbuf = len(buf)
346 self._lenbuf = len(buf)
347 else:
347 else:
348 self._buffer = []
348 self._buffer = []
349 self._lenbuf = 0
349 self._lenbuf = 0
350 return data
350 return data
351
351
352 def _fillbuffer(self):
352 def _fillbuffer(self):
353 """read data to the buffer"""
353 """read data to the buffer"""
354 data = os.read(self._input.fileno(), _chunksize)
354 data = os.read(self._input.fileno(), _chunksize)
355 if not data:
355 if not data:
356 self._eof = True
356 self._eof = True
357 else:
357 else:
358 self._lenbuf += len(data)
358 self._lenbuf += len(data)
359 self._buffer.append(data)
359 self._buffer.append(data)
360
360
361 def popen2(cmd, env=None, newlines=False):
361 def popen2(cmd, env=None, newlines=False):
362 # Setting bufsize to -1 lets the system decide the buffer size.
362 # Setting bufsize to -1 lets the system decide the buffer size.
363 # The default for bufsize is 0, meaning unbuffered. This leads to
363 # The default for bufsize is 0, meaning unbuffered. This leads to
364 # poor performance on Mac OS X: http://bugs.python.org/issue4194
364 # poor performance on Mac OS X: http://bugs.python.org/issue4194
365 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
365 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
366 close_fds=closefds,
366 close_fds=closefds,
367 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
367 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
368 universal_newlines=newlines,
368 universal_newlines=newlines,
369 env=env)
369 env=env)
370 return p.stdin, p.stdout
370 return p.stdin, p.stdout
371
371
372 def popen3(cmd, env=None, newlines=False):
372 def popen3(cmd, env=None, newlines=False):
373 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
373 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
374 return stdin, stdout, stderr
374 return stdin, stdout, stderr
375
375
376 def popen4(cmd, env=None, newlines=False, bufsize=-1):
376 def popen4(cmd, env=None, newlines=False, bufsize=-1):
377 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
377 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
378 close_fds=closefds,
378 close_fds=closefds,
379 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
379 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
380 stderr=subprocess.PIPE,
380 stderr=subprocess.PIPE,
381 universal_newlines=newlines,
381 universal_newlines=newlines,
382 env=env)
382 env=env)
383 return p.stdin, p.stdout, p.stderr, p
383 return p.stdin, p.stdout, p.stderr, p
384
384
385 def version():
385 def version():
386 """Return version information if available."""
386 """Return version information if available."""
387 try:
387 try:
388 from . import __version__
388 from . import __version__
389 return __version__.version
389 return __version__.version
390 except ImportError:
390 except ImportError:
391 return 'unknown'
391 return 'unknown'
392
392
393 def versiontuple(v=None, n=4):
393 def versiontuple(v=None, n=4):
394 """Parses a Mercurial version string into an N-tuple.
394 """Parses a Mercurial version string into an N-tuple.
395
395
396 The version string to be parsed is specified with the ``v`` argument.
396 The version string to be parsed is specified with the ``v`` argument.
397 If it isn't defined, the current Mercurial version string will be parsed.
397 If it isn't defined, the current Mercurial version string will be parsed.
398
398
399 ``n`` can be 2, 3, or 4. Here is how some version strings map to
399 ``n`` can be 2, 3, or 4. Here is how some version strings map to
400 returned values:
400 returned values:
401
401
402 >>> v = '3.6.1+190-df9b73d2d444'
402 >>> v = '3.6.1+190-df9b73d2d444'
403 >>> versiontuple(v, 2)
403 >>> versiontuple(v, 2)
404 (3, 6)
404 (3, 6)
405 >>> versiontuple(v, 3)
405 >>> versiontuple(v, 3)
406 (3, 6, 1)
406 (3, 6, 1)
407 >>> versiontuple(v, 4)
407 >>> versiontuple(v, 4)
408 (3, 6, 1, '190-df9b73d2d444')
408 (3, 6, 1, '190-df9b73d2d444')
409
409
410 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
410 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
411 (3, 6, 1, '190-df9b73d2d444+20151118')
411 (3, 6, 1, '190-df9b73d2d444+20151118')
412
412
413 >>> v = '3.6'
413 >>> v = '3.6'
414 >>> versiontuple(v, 2)
414 >>> versiontuple(v, 2)
415 (3, 6)
415 (3, 6)
416 >>> versiontuple(v, 3)
416 >>> versiontuple(v, 3)
417 (3, 6, None)
417 (3, 6, None)
418 >>> versiontuple(v, 4)
418 >>> versiontuple(v, 4)
419 (3, 6, None, None)
419 (3, 6, None, None)
420
420
421 >>> v = '3.9-rc'
421 >>> v = '3.9-rc'
422 >>> versiontuple(v, 2)
422 >>> versiontuple(v, 2)
423 (3, 9)
423 (3, 9)
424 >>> versiontuple(v, 3)
424 >>> versiontuple(v, 3)
425 (3, 9, None)
425 (3, 9, None)
426 >>> versiontuple(v, 4)
426 >>> versiontuple(v, 4)
427 (3, 9, None, 'rc')
427 (3, 9, None, 'rc')
428
428
429 >>> v = '3.9-rc+2-02a8fea4289b'
429 >>> v = '3.9-rc+2-02a8fea4289b'
430 >>> versiontuple(v, 2)
430 >>> versiontuple(v, 2)
431 (3, 9)
431 (3, 9)
432 >>> versiontuple(v, 3)
432 >>> versiontuple(v, 3)
433 (3, 9, None)
433 (3, 9, None)
434 >>> versiontuple(v, 4)
434 >>> versiontuple(v, 4)
435 (3, 9, None, 'rc+2-02a8fea4289b')
435 (3, 9, None, 'rc+2-02a8fea4289b')
436 """
436 """
437 if not v:
437 if not v:
438 v = version()
438 v = version()
439 parts = remod.split('[\+-]', v, 1)
439 parts = remod.split('[\+-]', v, 1)
440 if len(parts) == 1:
440 if len(parts) == 1:
441 vparts, extra = parts[0], None
441 vparts, extra = parts[0], None
442 else:
442 else:
443 vparts, extra = parts
443 vparts, extra = parts
444
444
445 vints = []
445 vints = []
446 for i in vparts.split('.'):
446 for i in vparts.split('.'):
447 try:
447 try:
448 vints.append(int(i))
448 vints.append(int(i))
449 except ValueError:
449 except ValueError:
450 break
450 break
451 # (3, 6) -> (3, 6, None)
451 # (3, 6) -> (3, 6, None)
452 while len(vints) < 3:
452 while len(vints) < 3:
453 vints.append(None)
453 vints.append(None)
454
454
455 if n == 2:
455 if n == 2:
456 return (vints[0], vints[1])
456 return (vints[0], vints[1])
457 if n == 3:
457 if n == 3:
458 return (vints[0], vints[1], vints[2])
458 return (vints[0], vints[1], vints[2])
459 if n == 4:
459 if n == 4:
460 return (vints[0], vints[1], vints[2], extra)
460 return (vints[0], vints[1], vints[2], extra)
461
461
462 # used by parsedate
462 # used by parsedate
463 defaultdateformats = (
463 defaultdateformats = (
464 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
464 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
465 '%Y-%m-%dT%H:%M', # without seconds
465 '%Y-%m-%dT%H:%M', # without seconds
466 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
466 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
467 '%Y-%m-%dT%H%M', # without seconds
467 '%Y-%m-%dT%H%M', # without seconds
468 '%Y-%m-%d %H:%M:%S', # our common legal variant
468 '%Y-%m-%d %H:%M:%S', # our common legal variant
469 '%Y-%m-%d %H:%M', # without seconds
469 '%Y-%m-%d %H:%M', # without seconds
470 '%Y-%m-%d %H%M%S', # without :
470 '%Y-%m-%d %H%M%S', # without :
471 '%Y-%m-%d %H%M', # without seconds
471 '%Y-%m-%d %H%M', # without seconds
472 '%Y-%m-%d %I:%M:%S%p',
472 '%Y-%m-%d %I:%M:%S%p',
473 '%Y-%m-%d %H:%M',
473 '%Y-%m-%d %H:%M',
474 '%Y-%m-%d %I:%M%p',
474 '%Y-%m-%d %I:%M%p',
475 '%Y-%m-%d',
475 '%Y-%m-%d',
476 '%m-%d',
476 '%m-%d',
477 '%m/%d',
477 '%m/%d',
478 '%m/%d/%y',
478 '%m/%d/%y',
479 '%m/%d/%Y',
479 '%m/%d/%Y',
480 '%a %b %d %H:%M:%S %Y',
480 '%a %b %d %H:%M:%S %Y',
481 '%a %b %d %I:%M:%S%p %Y',
481 '%a %b %d %I:%M:%S%p %Y',
482 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
482 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
483 '%b %d %H:%M:%S %Y',
483 '%b %d %H:%M:%S %Y',
484 '%b %d %I:%M:%S%p %Y',
484 '%b %d %I:%M:%S%p %Y',
485 '%b %d %H:%M:%S',
485 '%b %d %H:%M:%S',
486 '%b %d %I:%M:%S%p',
486 '%b %d %I:%M:%S%p',
487 '%b %d %H:%M',
487 '%b %d %H:%M',
488 '%b %d %I:%M%p',
488 '%b %d %I:%M%p',
489 '%b %d %Y',
489 '%b %d %Y',
490 '%b %d',
490 '%b %d',
491 '%H:%M:%S',
491 '%H:%M:%S',
492 '%I:%M:%S%p',
492 '%I:%M:%S%p',
493 '%H:%M',
493 '%H:%M',
494 '%I:%M%p',
494 '%I:%M%p',
495 )
495 )
496
496
497 extendeddateformats = defaultdateformats + (
497 extendeddateformats = defaultdateformats + (
498 "%Y",
498 "%Y",
499 "%Y-%m",
499 "%Y-%m",
500 "%b",
500 "%b",
501 "%b %Y",
501 "%b %Y",
502 )
502 )
503
503
504 def cachefunc(func):
504 def cachefunc(func):
505 '''cache the result of function calls'''
505 '''cache the result of function calls'''
506 # XXX doesn't handle keywords args
506 # XXX doesn't handle keywords args
507 if func.__code__.co_argcount == 0:
507 if func.__code__.co_argcount == 0:
508 cache = []
508 cache = []
509 def f():
509 def f():
510 if len(cache) == 0:
510 if len(cache) == 0:
511 cache.append(func())
511 cache.append(func())
512 return cache[0]
512 return cache[0]
513 return f
513 return f
514 cache = {}
514 cache = {}
515 if func.__code__.co_argcount == 1:
515 if func.__code__.co_argcount == 1:
516 # we gain a small amount of time because
516 # we gain a small amount of time because
517 # we don't need to pack/unpack the list
517 # we don't need to pack/unpack the list
518 def f(arg):
518 def f(arg):
519 if arg not in cache:
519 if arg not in cache:
520 cache[arg] = func(arg)
520 cache[arg] = func(arg)
521 return cache[arg]
521 return cache[arg]
522 else:
522 else:
523 def f(*args):
523 def f(*args):
524 if args not in cache:
524 if args not in cache:
525 cache[args] = func(*args)
525 cache[args] = func(*args)
526 return cache[args]
526 return cache[args]
527
527
528 return f
528 return f
529
529
530 class sortdict(dict):
530 class sortdict(dict):
531 '''a simple sorted dictionary'''
531 '''a simple sorted dictionary'''
532 def __init__(self, data=None):
532 def __init__(self, data=None):
533 self._list = []
533 self._list = []
534 if data:
534 if data:
535 self.update(data)
535 self.update(data)
536 def copy(self):
536 def copy(self):
537 return sortdict(self)
537 return sortdict(self)
538 def __setitem__(self, key, val):
538 def __setitem__(self, key, val):
539 if key in self:
539 if key in self:
540 self._list.remove(key)
540 self._list.remove(key)
541 self._list.append(key)
541 self._list.append(key)
542 dict.__setitem__(self, key, val)
542 dict.__setitem__(self, key, val)
543 def __iter__(self):
543 def __iter__(self):
544 return self._list.__iter__()
544 return self._list.__iter__()
545 def update(self, src):
545 def update(self, src):
546 if isinstance(src, dict):
546 if isinstance(src, dict):
547 src = src.iteritems()
547 src = src.iteritems()
548 for k, v in src:
548 for k, v in src:
549 self[k] = v
549 self[k] = v
550 def clear(self):
550 def clear(self):
551 dict.clear(self)
551 dict.clear(self)
552 self._list = []
552 self._list = []
553 def items(self):
553 def items(self):
554 return [(k, self[k]) for k in self._list]
554 return [(k, self[k]) for k in self._list]
555 def __delitem__(self, key):
555 def __delitem__(self, key):
556 dict.__delitem__(self, key)
556 dict.__delitem__(self, key)
557 self._list.remove(key)
557 self._list.remove(key)
558 def pop(self, key, *args, **kwargs):
558 def pop(self, key, *args, **kwargs):
559 dict.pop(self, key, *args, **kwargs)
559 dict.pop(self, key, *args, **kwargs)
560 try:
560 try:
561 self._list.remove(key)
561 self._list.remove(key)
562 except ValueError:
562 except ValueError:
563 pass
563 pass
564 def keys(self):
564 def keys(self):
565 return self._list[:]
565 return self._list[:]
566 def iterkeys(self):
566 def iterkeys(self):
567 return self._list.__iter__()
567 return self._list.__iter__()
568 def iteritems(self):
568 def iteritems(self):
569 for k in self._list:
569 for k in self._list:
570 yield k, self[k]
570 yield k, self[k]
571 def insert(self, index, key, val):
571 def insert(self, index, key, val):
572 self._list.insert(index, key)
572 self._list.insert(index, key)
573 dict.__setitem__(self, key, val)
573 dict.__setitem__(self, key, val)
574 def __repr__(self):
574 def __repr__(self):
575 if not self:
575 if not self:
576 return '%s()' % self.__class__.__name__
576 return '%s()' % self.__class__.__name__
577 return '%s(%r)' % (self.__class__.__name__, self.items())
577 return '%s(%r)' % (self.__class__.__name__, self.items())
578
578
579 class _lrucachenode(object):
579 class _lrucachenode(object):
580 """A node in a doubly linked list.
580 """A node in a doubly linked list.
581
581
582 Holds a reference to nodes on either side as well as a key-value
582 Holds a reference to nodes on either side as well as a key-value
583 pair for the dictionary entry.
583 pair for the dictionary entry.
584 """
584 """
585 __slots__ = (u'next', u'prev', u'key', u'value')
585 __slots__ = (u'next', u'prev', u'key', u'value')
586
586
587 def __init__(self):
587 def __init__(self):
588 self.next = None
588 self.next = None
589 self.prev = None
589 self.prev = None
590
590
591 self.key = _notset
591 self.key = _notset
592 self.value = None
592 self.value = None
593
593
594 def markempty(self):
594 def markempty(self):
595 """Mark the node as emptied."""
595 """Mark the node as emptied."""
596 self.key = _notset
596 self.key = _notset
597
597
598 class lrucachedict(object):
598 class lrucachedict(object):
599 """Dict that caches most recent accesses and sets.
599 """Dict that caches most recent accesses and sets.
600
600
601 The dict consists of an actual backing dict - indexed by original
601 The dict consists of an actual backing dict - indexed by original
602 key - and a doubly linked circular list defining the order of entries in
602 key - and a doubly linked circular list defining the order of entries in
603 the cache.
603 the cache.
604
604
605 The head node is the newest entry in the cache. If the cache is full,
605 The head node is the newest entry in the cache. If the cache is full,
606 we recycle head.prev and make it the new head. Cache accesses result in
606 we recycle head.prev and make it the new head. Cache accesses result in
607 the node being moved to before the existing head and being marked as the
607 the node being moved to before the existing head and being marked as the
608 new head node.
608 new head node.
609 """
609 """
610 def __init__(self, max):
610 def __init__(self, max):
611 self._cache = {}
611 self._cache = {}
612
612
613 self._head = head = _lrucachenode()
613 self._head = head = _lrucachenode()
614 head.prev = head
614 head.prev = head
615 head.next = head
615 head.next = head
616 self._size = 1
616 self._size = 1
617 self._capacity = max
617 self._capacity = max
618
618
619 def __len__(self):
619 def __len__(self):
620 return len(self._cache)
620 return len(self._cache)
621
621
622 def __contains__(self, k):
622 def __contains__(self, k):
623 return k in self._cache
623 return k in self._cache
624
624
625 def __iter__(self):
625 def __iter__(self):
626 # We don't have to iterate in cache order, but why not.
626 # We don't have to iterate in cache order, but why not.
627 n = self._head
627 n = self._head
628 for i in range(len(self._cache)):
628 for i in range(len(self._cache)):
629 yield n.key
629 yield n.key
630 n = n.next
630 n = n.next
631
631
632 def __getitem__(self, k):
632 def __getitem__(self, k):
633 node = self._cache[k]
633 node = self._cache[k]
634 self._movetohead(node)
634 self._movetohead(node)
635 return node.value
635 return node.value
636
636
637 def __setitem__(self, k, v):
637 def __setitem__(self, k, v):
638 node = self._cache.get(k)
638 node = self._cache.get(k)
639 # Replace existing value and mark as newest.
639 # Replace existing value and mark as newest.
640 if node is not None:
640 if node is not None:
641 node.value = v
641 node.value = v
642 self._movetohead(node)
642 self._movetohead(node)
643 return
643 return
644
644
645 if self._size < self._capacity:
645 if self._size < self._capacity:
646 node = self._addcapacity()
646 node = self._addcapacity()
647 else:
647 else:
648 # Grab the last/oldest item.
648 # Grab the last/oldest item.
649 node = self._head.prev
649 node = self._head.prev
650
650
651 # At capacity. Kill the old entry.
651 # At capacity. Kill the old entry.
652 if node.key is not _notset:
652 if node.key is not _notset:
653 del self._cache[node.key]
653 del self._cache[node.key]
654
654
655 node.key = k
655 node.key = k
656 node.value = v
656 node.value = v
657 self._cache[k] = node
657 self._cache[k] = node
658 # And mark it as newest entry. No need to adjust order since it
658 # And mark it as newest entry. No need to adjust order since it
659 # is already self._head.prev.
659 # is already self._head.prev.
660 self._head = node
660 self._head = node
661
661
662 def __delitem__(self, k):
662 def __delitem__(self, k):
663 node = self._cache.pop(k)
663 node = self._cache.pop(k)
664 node.markempty()
664 node.markempty()
665
665
666 # Temporarily mark as newest item before re-adjusting head to make
666 # Temporarily mark as newest item before re-adjusting head to make
667 # this node the oldest item.
667 # this node the oldest item.
668 self._movetohead(node)
668 self._movetohead(node)
669 self._head = node.next
669 self._head = node.next
670
670
671 # Additional dict methods.
671 # Additional dict methods.
672
672
673 def get(self, k, default=None):
673 def get(self, k, default=None):
674 try:
674 try:
675 return self._cache[k].value
675 return self._cache[k].value
676 except KeyError:
676 except KeyError:
677 return default
677 return default
678
678
679 def clear(self):
679 def clear(self):
680 n = self._head
680 n = self._head
681 while n.key is not _notset:
681 while n.key is not _notset:
682 n.markempty()
682 n.markempty()
683 n = n.next
683 n = n.next
684
684
685 self._cache.clear()
685 self._cache.clear()
686
686
687 def copy(self):
687 def copy(self):
688 result = lrucachedict(self._capacity)
688 result = lrucachedict(self._capacity)
689 n = self._head.prev
689 n = self._head.prev
690 # Iterate in oldest-to-newest order, so the copy has the right ordering
690 # Iterate in oldest-to-newest order, so the copy has the right ordering
691 for i in range(len(self._cache)):
691 for i in range(len(self._cache)):
692 result[n.key] = n.value
692 result[n.key] = n.value
693 n = n.prev
693 n = n.prev
694 return result
694 return result
695
695
696 def _movetohead(self, node):
696 def _movetohead(self, node):
697 """Mark a node as the newest, making it the new head.
697 """Mark a node as the newest, making it the new head.
698
698
699 When a node is accessed, it becomes the freshest entry in the LRU
699 When a node is accessed, it becomes the freshest entry in the LRU
700 list, which is denoted by self._head.
700 list, which is denoted by self._head.
701
701
702 Visually, let's make ``N`` the new head node (* denotes head):
702 Visually, let's make ``N`` the new head node (* denotes head):
703
703
704 previous/oldest <-> head <-> next/next newest
704 previous/oldest <-> head <-> next/next newest
705
705
706 ----<->--- A* ---<->-----
706 ----<->--- A* ---<->-----
707 | |
707 | |
708 E <-> D <-> N <-> C <-> B
708 E <-> D <-> N <-> C <-> B
709
709
710 To:
710 To:
711
711
712 ----<->--- N* ---<->-----
712 ----<->--- N* ---<->-----
713 | |
713 | |
714 E <-> D <-> C <-> B <-> A
714 E <-> D <-> C <-> B <-> A
715
715
716 This requires the following moves:
716 This requires the following moves:
717
717
718 C.next = D (node.prev.next = node.next)
718 C.next = D (node.prev.next = node.next)
719 D.prev = C (node.next.prev = node.prev)
719 D.prev = C (node.next.prev = node.prev)
720 E.next = N (head.prev.next = node)
720 E.next = N (head.prev.next = node)
721 N.prev = E (node.prev = head.prev)
721 N.prev = E (node.prev = head.prev)
722 N.next = A (node.next = head)
722 N.next = A (node.next = head)
723 A.prev = N (head.prev = node)
723 A.prev = N (head.prev = node)
724 """
724 """
725 head = self._head
725 head = self._head
726 # C.next = D
726 # C.next = D
727 node.prev.next = node.next
727 node.prev.next = node.next
728 # D.prev = C
728 # D.prev = C
729 node.next.prev = node.prev
729 node.next.prev = node.prev
730 # N.prev = E
730 # N.prev = E
731 node.prev = head.prev
731 node.prev = head.prev
732 # N.next = A
732 # N.next = A
733 # It is tempting to do just "head" here, however if node is
733 # It is tempting to do just "head" here, however if node is
734 # adjacent to head, this will do bad things.
734 # adjacent to head, this will do bad things.
735 node.next = head.prev.next
735 node.next = head.prev.next
736 # E.next = N
736 # E.next = N
737 node.next.prev = node
737 node.next.prev = node
738 # A.prev = N
738 # A.prev = N
739 node.prev.next = node
739 node.prev.next = node
740
740
741 self._head = node
741 self._head = node
742
742
743 def _addcapacity(self):
743 def _addcapacity(self):
744 """Add a node to the circular linked list.
744 """Add a node to the circular linked list.
745
745
746 The new node is inserted before the head node.
746 The new node is inserted before the head node.
747 """
747 """
748 head = self._head
748 head = self._head
749 node = _lrucachenode()
749 node = _lrucachenode()
750 head.prev.next = node
750 head.prev.next = node
751 node.prev = head.prev
751 node.prev = head.prev
752 node.next = head
752 node.next = head
753 head.prev = node
753 head.prev = node
754 self._size += 1
754 self._size += 1
755 return node
755 return node
756
756
757 def lrucachefunc(func):
757 def lrucachefunc(func):
758 '''cache most recent results of function calls'''
758 '''cache most recent results of function calls'''
759 cache = {}
759 cache = {}
760 order = collections.deque()
760 order = collections.deque()
761 if func.__code__.co_argcount == 1:
761 if func.__code__.co_argcount == 1:
762 def f(arg):
762 def f(arg):
763 if arg not in cache:
763 if arg not in cache:
764 if len(cache) > 20:
764 if len(cache) > 20:
765 del cache[order.popleft()]
765 del cache[order.popleft()]
766 cache[arg] = func(arg)
766 cache[arg] = func(arg)
767 else:
767 else:
768 order.remove(arg)
768 order.remove(arg)
769 order.append(arg)
769 order.append(arg)
770 return cache[arg]
770 return cache[arg]
771 else:
771 else:
772 def f(*args):
772 def f(*args):
773 if args not in cache:
773 if args not in cache:
774 if len(cache) > 20:
774 if len(cache) > 20:
775 del cache[order.popleft()]
775 del cache[order.popleft()]
776 cache[args] = func(*args)
776 cache[args] = func(*args)
777 else:
777 else:
778 order.remove(args)
778 order.remove(args)
779 order.append(args)
779 order.append(args)
780 return cache[args]
780 return cache[args]
781
781
782 return f
782 return f
783
783
784 class propertycache(object):
784 class propertycache(object):
785 def __init__(self, func):
785 def __init__(self, func):
786 self.func = func
786 self.func = func
787 self.name = func.__name__
787 self.name = func.__name__
788 def __get__(self, obj, type=None):
788 def __get__(self, obj, type=None):
789 result = self.func(obj)
789 result = self.func(obj)
790 self.cachevalue(obj, result)
790 self.cachevalue(obj, result)
791 return result
791 return result
792
792
793 def cachevalue(self, obj, value):
793 def cachevalue(self, obj, value):
794 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
794 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
795 obj.__dict__[self.name] = value
795 obj.__dict__[self.name] = value
796
796
797 def pipefilter(s, cmd):
797 def pipefilter(s, cmd):
798 '''filter string S through command CMD, returning its output'''
798 '''filter string S through command CMD, returning its output'''
799 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
799 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
800 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
800 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
801 pout, perr = p.communicate(s)
801 pout, perr = p.communicate(s)
802 return pout
802 return pout
803
803
804 def tempfilter(s, cmd):
804 def tempfilter(s, cmd):
805 '''filter string S through a pair of temporary files with CMD.
805 '''filter string S through a pair of temporary files with CMD.
806 CMD is used as a template to create the real command to be run,
806 CMD is used as a template to create the real command to be run,
807 with the strings INFILE and OUTFILE replaced by the real names of
807 with the strings INFILE and OUTFILE replaced by the real names of
808 the temporary files generated.'''
808 the temporary files generated.'''
809 inname, outname = None, None
809 inname, outname = None, None
810 try:
810 try:
811 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
811 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
812 fp = os.fdopen(infd, pycompat.sysstr('wb'))
812 fp = os.fdopen(infd, pycompat.sysstr('wb'))
813 fp.write(s)
813 fp.write(s)
814 fp.close()
814 fp.close()
815 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
815 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
816 os.close(outfd)
816 os.close(outfd)
817 cmd = cmd.replace('INFILE', inname)
817 cmd = cmd.replace('INFILE', inname)
818 cmd = cmd.replace('OUTFILE', outname)
818 cmd = cmd.replace('OUTFILE', outname)
819 code = os.system(cmd)
819 code = os.system(cmd)
820 if pycompat.sysplatform == 'OpenVMS' and code & 1:
820 if pycompat.sysplatform == 'OpenVMS' and code & 1:
821 code = 0
821 code = 0
822 if code:
822 if code:
823 raise Abort(_("command '%s' failed: %s") %
823 raise Abort(_("command '%s' failed: %s") %
824 (cmd, explainexit(code)))
824 (cmd, explainexit(code)))
825 return readfile(outname)
825 return readfile(outname)
826 finally:
826 finally:
827 try:
827 try:
828 if inname:
828 if inname:
829 os.unlink(inname)
829 os.unlink(inname)
830 except OSError:
830 except OSError:
831 pass
831 pass
832 try:
832 try:
833 if outname:
833 if outname:
834 os.unlink(outname)
834 os.unlink(outname)
835 except OSError:
835 except OSError:
836 pass
836 pass
837
837
838 filtertable = {
838 filtertable = {
839 'tempfile:': tempfilter,
839 'tempfile:': tempfilter,
840 'pipe:': pipefilter,
840 'pipe:': pipefilter,
841 }
841 }
842
842
843 def filter(s, cmd):
843 def filter(s, cmd):
844 "filter a string through a command that transforms its input to its output"
844 "filter a string through a command that transforms its input to its output"
845 for name, fn in filtertable.iteritems():
845 for name, fn in filtertable.iteritems():
846 if cmd.startswith(name):
846 if cmd.startswith(name):
847 return fn(s, cmd[len(name):].lstrip())
847 return fn(s, cmd[len(name):].lstrip())
848 return pipefilter(s, cmd)
848 return pipefilter(s, cmd)
849
849
850 def binary(s):
850 def binary(s):
851 """return true if a string is binary data"""
851 """return true if a string is binary data"""
852 return bool(s and '\0' in s)
852 return bool(s and '\0' in s)
853
853
854 def increasingchunks(source, min=1024, max=65536):
854 def increasingchunks(source, min=1024, max=65536):
855 '''return no less than min bytes per chunk while data remains,
855 '''return no less than min bytes per chunk while data remains,
856 doubling min after each chunk until it reaches max'''
856 doubling min after each chunk until it reaches max'''
857 def log2(x):
857 def log2(x):
858 if not x:
858 if not x:
859 return 0
859 return 0
860 i = 0
860 i = 0
861 while x:
861 while x:
862 x >>= 1
862 x >>= 1
863 i += 1
863 i += 1
864 return i - 1
864 return i - 1
865
865
866 buf = []
866 buf = []
867 blen = 0
867 blen = 0
868 for chunk in source:
868 for chunk in source:
869 buf.append(chunk)
869 buf.append(chunk)
870 blen += len(chunk)
870 blen += len(chunk)
871 if blen >= min:
871 if blen >= min:
872 if min < max:
872 if min < max:
873 min = min << 1
873 min = min << 1
874 nmin = 1 << log2(blen)
874 nmin = 1 << log2(blen)
875 if nmin > min:
875 if nmin > min:
876 min = nmin
876 min = nmin
877 if min > max:
877 if min > max:
878 min = max
878 min = max
879 yield ''.join(buf)
879 yield ''.join(buf)
880 blen = 0
880 blen = 0
881 buf = []
881 buf = []
882 if buf:
882 if buf:
883 yield ''.join(buf)
883 yield ''.join(buf)
884
884
885 Abort = error.Abort
885 Abort = error.Abort
886
886
887 def always(fn):
887 def always(fn):
888 return True
888 return True
889
889
890 def never(fn):
890 def never(fn):
891 return False
891 return False
892
892
893 def nogc(func):
893 def nogc(func):
894 """disable garbage collector
894 """disable garbage collector
895
895
896 Python's garbage collector triggers a GC each time a certain number of
896 Python's garbage collector triggers a GC each time a certain number of
897 container objects (the number being defined by gc.get_threshold()) are
897 container objects (the number being defined by gc.get_threshold()) are
898 allocated even when marked not to be tracked by the collector. Tracking has
898 allocated even when marked not to be tracked by the collector. Tracking has
899 no effect on when GCs are triggered, only on what objects the GC looks
899 no effect on when GCs are triggered, only on what objects the GC looks
900 into. As a workaround, disable GC while building complex (huge)
900 into. As a workaround, disable GC while building complex (huge)
901 containers.
901 containers.
902
902
903 This garbage collector issue have been fixed in 2.7.
903 This garbage collector issue have been fixed in 2.7.
904 """
904 """
905 if sys.version_info >= (2, 7):
905 if sys.version_info >= (2, 7):
906 return func
906 return func
907 def wrapper(*args, **kwargs):
907 def wrapper(*args, **kwargs):
908 gcenabled = gc.isenabled()
908 gcenabled = gc.isenabled()
909 gc.disable()
909 gc.disable()
910 try:
910 try:
911 return func(*args, **kwargs)
911 return func(*args, **kwargs)
912 finally:
912 finally:
913 if gcenabled:
913 if gcenabled:
914 gc.enable()
914 gc.enable()
915 return wrapper
915 return wrapper
916
916
917 def pathto(root, n1, n2):
917 def pathto(root, n1, n2):
918 '''return the relative path from one place to another.
918 '''return the relative path from one place to another.
919 root should use os.sep to separate directories
919 root should use os.sep to separate directories
920 n1 should use os.sep to separate directories
920 n1 should use os.sep to separate directories
921 n2 should use "/" to separate directories
921 n2 should use "/" to separate directories
922 returns an os.sep-separated path.
922 returns an os.sep-separated path.
923
923
924 If n1 is a relative path, it's assumed it's
924 If n1 is a relative path, it's assumed it's
925 relative to root.
925 relative to root.
926 n2 should always be relative to root.
926 n2 should always be relative to root.
927 '''
927 '''
928 if not n1:
928 if not n1:
929 return localpath(n2)
929 return localpath(n2)
930 if os.path.isabs(n1):
930 if os.path.isabs(n1):
931 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
931 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
932 return os.path.join(root, localpath(n2))
932 return os.path.join(root, localpath(n2))
933 n2 = '/'.join((pconvert(root), n2))
933 n2 = '/'.join((pconvert(root), n2))
934 a, b = splitpath(n1), n2.split('/')
934 a, b = splitpath(n1), n2.split('/')
935 a.reverse()
935 a.reverse()
936 b.reverse()
936 b.reverse()
937 while a and b and a[-1] == b[-1]:
937 while a and b and a[-1] == b[-1]:
938 a.pop()
938 a.pop()
939 b.pop()
939 b.pop()
940 b.reverse()
940 b.reverse()
941 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
941 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
942
942
943 def mainfrozen():
943 def mainfrozen():
944 """return True if we are a frozen executable.
944 """return True if we are a frozen executable.
945
945
946 The code supports py2exe (most common, Windows only) and tools/freeze
946 The code supports py2exe (most common, Windows only) and tools/freeze
947 (portable, not much used).
947 (portable, not much used).
948 """
948 """
949 return (safehasattr(sys, "frozen") or # new py2exe
949 return (safehasattr(sys, "frozen") or # new py2exe
950 safehasattr(sys, "importers") or # old py2exe
950 safehasattr(sys, "importers") or # old py2exe
951 imp.is_frozen(u"__main__")) # tools/freeze
951 imp.is_frozen(u"__main__")) # tools/freeze
952
952
953 # the location of data files matching the source code
953 # the location of data files matching the source code
954 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
954 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
955 # executable version (py2exe) doesn't support __file__
955 # executable version (py2exe) doesn't support __file__
956 datapath = os.path.dirname(pycompat.sysexecutable)
956 datapath = os.path.dirname(pycompat.sysexecutable)
957 else:
957 else:
958 datapath = os.path.dirname(pycompat.fsencode(__file__))
958 datapath = os.path.dirname(pycompat.fsencode(__file__))
959
959
960 i18n.setdatapath(datapath)
960 i18n.setdatapath(datapath)
961
961
962 _hgexecutable = None
962 _hgexecutable = None
963
963
964 def hgexecutable():
964 def hgexecutable():
965 """return location of the 'hg' executable.
965 """return location of the 'hg' executable.
966
966
967 Defaults to $HG or 'hg' in the search path.
967 Defaults to $HG or 'hg' in the search path.
968 """
968 """
969 if _hgexecutable is None:
969 if _hgexecutable is None:
970 hg = encoding.environ.get('HG')
970 hg = encoding.environ.get('HG')
971 mainmod = sys.modules['__main__']
971 mainmod = sys.modules['__main__']
972 if hg:
972 if hg:
973 _sethgexecutable(hg)
973 _sethgexecutable(hg)
974 elif mainfrozen():
974 elif mainfrozen():
975 if getattr(sys, 'frozen', None) == 'macosx_app':
975 if getattr(sys, 'frozen', None) == 'macosx_app':
976 # Env variable set by py2app
976 # Env variable set by py2app
977 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
977 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
978 else:
978 else:
979 _sethgexecutable(pycompat.sysexecutable)
979 _sethgexecutable(pycompat.sysexecutable)
980 elif (os.path.basename(
980 elif (os.path.basename(
981 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
981 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
982 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
982 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
983 else:
983 else:
984 exe = findexe('hg') or os.path.basename(sys.argv[0])
984 exe = findexe('hg') or os.path.basename(sys.argv[0])
985 _sethgexecutable(exe)
985 _sethgexecutable(exe)
986 return _hgexecutable
986 return _hgexecutable
987
987
988 def _sethgexecutable(path):
988 def _sethgexecutable(path):
989 """set location of the 'hg' executable"""
989 """set location of the 'hg' executable"""
990 global _hgexecutable
990 global _hgexecutable
991 _hgexecutable = path
991 _hgexecutable = path
992
992
993 def _isstdout(f):
993 def _isstdout(f):
994 fileno = getattr(f, 'fileno', None)
994 fileno = getattr(f, 'fileno', None)
995 return fileno and fileno() == sys.__stdout__.fileno()
995 return fileno and fileno() == sys.__stdout__.fileno()
996
996
997 def shellenviron(environ=None):
997 def shellenviron(environ=None):
998 """return environ with optional override, useful for shelling out"""
998 """return environ with optional override, useful for shelling out"""
999 def py2shell(val):
999 def py2shell(val):
1000 'convert python object into string that is useful to shell'
1000 'convert python object into string that is useful to shell'
1001 if val is None or val is False:
1001 if val is None or val is False:
1002 return '0'
1002 return '0'
1003 if val is True:
1003 if val is True:
1004 return '1'
1004 return '1'
1005 return str(val)
1005 return str(val)
1006 env = dict(encoding.environ)
1006 env = dict(encoding.environ)
1007 if environ:
1007 if environ:
1008 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1008 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1009 env['HG'] = hgexecutable()
1009 env['HG'] = hgexecutable()
1010 return env
1010 return env
1011
1011
1012 def system(cmd, environ=None, cwd=None, out=None):
1012 def system(cmd, environ=None, cwd=None, out=None):
1013 '''enhanced shell command execution.
1013 '''enhanced shell command execution.
1014 run with environment maybe modified, maybe in different dir.
1014 run with environment maybe modified, maybe in different dir.
1015
1015
1016 if out is specified, it is assumed to be a file-like object that has a
1016 if out is specified, it is assumed to be a file-like object that has a
1017 write() method. stdout and stderr will be redirected to out.'''
1017 write() method. stdout and stderr will be redirected to out.'''
1018 try:
1018 try:
1019 stdout.flush()
1019 stdout.flush()
1020 except Exception:
1020 except Exception:
1021 pass
1021 pass
1022 cmd = quotecommand(cmd)
1022 cmd = quotecommand(cmd)
1023 if pycompat.sysplatform == 'plan9' and (sys.version_info[0] == 2
1023 if pycompat.sysplatform == 'plan9' and (sys.version_info[0] == 2
1024 and sys.version_info[1] < 7):
1024 and sys.version_info[1] < 7):
1025 # subprocess kludge to work around issues in half-baked Python
1025 # subprocess kludge to work around issues in half-baked Python
1026 # ports, notably bichued/python:
1026 # ports, notably bichued/python:
1027 if not cwd is None:
1027 if not cwd is None:
1028 os.chdir(cwd)
1028 os.chdir(cwd)
1029 rc = os.system(cmd)
1029 rc = os.system(cmd)
1030 else:
1030 else:
1031 env = shellenviron(environ)
1031 env = shellenviron(environ)
1032 if out is None or _isstdout(out):
1032 if out is None or _isstdout(out):
1033 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1033 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1034 env=env, cwd=cwd)
1034 env=env, cwd=cwd)
1035 else:
1035 else:
1036 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1036 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1037 env=env, cwd=cwd, stdout=subprocess.PIPE,
1037 env=env, cwd=cwd, stdout=subprocess.PIPE,
1038 stderr=subprocess.STDOUT)
1038 stderr=subprocess.STDOUT)
1039 for line in iter(proc.stdout.readline, ''):
1039 for line in iter(proc.stdout.readline, ''):
1040 out.write(line)
1040 out.write(line)
1041 proc.wait()
1041 proc.wait()
1042 rc = proc.returncode
1042 rc = proc.returncode
1043 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1043 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1044 rc = 0
1044 rc = 0
1045 return rc
1045 return rc
1046
1046
1047 def checksignature(func):
1047 def checksignature(func):
1048 '''wrap a function with code to check for calling errors'''
1048 '''wrap a function with code to check for calling errors'''
1049 def check(*args, **kwargs):
1049 def check(*args, **kwargs):
1050 try:
1050 try:
1051 return func(*args, **kwargs)
1051 return func(*args, **kwargs)
1052 except TypeError:
1052 except TypeError:
1053 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1053 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1054 raise error.SignatureError
1054 raise error.SignatureError
1055 raise
1055 raise
1056
1056
1057 return check
1057 return check
1058
1058
1059 # Hardlinks are problematic on CIFS, do not allow hardlinks
1059 # Hardlinks are problematic on CIFS, do not allow hardlinks
1060 # until we find a way to work around it cleanly (issue4546).
1060 # until we find a way to work around it cleanly (issue4546).
1061 # This is a variable so extensions can opt-in to using them.
1061 # This is a variable so extensions can opt-in to using them.
1062 allowhardlinks = False
1062 allowhardlinks = False
1063
1063
1064 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1064 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1065 '''copy a file, preserving mode and optionally other stat info like
1065 '''copy a file, preserving mode and optionally other stat info like
1066 atime/mtime
1066 atime/mtime
1067
1067
1068 checkambig argument is used with filestat, and is useful only if
1068 checkambig argument is used with filestat, and is useful only if
1069 destination file is guarded by any lock (e.g. repo.lock or
1069 destination file is guarded by any lock (e.g. repo.lock or
1070 repo.wlock).
1070 repo.wlock).
1071
1071
1072 copystat and checkambig should be exclusive.
1072 copystat and checkambig should be exclusive.
1073 '''
1073 '''
1074 assert not (copystat and checkambig)
1074 assert not (copystat and checkambig)
1075 oldstat = None
1075 oldstat = None
1076 if os.path.lexists(dest):
1076 if os.path.lexists(dest):
1077 if checkambig:
1077 if checkambig:
1078 oldstat = checkambig and filestat(dest)
1078 oldstat = checkambig and filestat(dest)
1079 unlink(dest)
1079 unlink(dest)
1080 if allowhardlinks and hardlink:
1080 if allowhardlinks and hardlink:
1081 try:
1081 try:
1082 oslink(src, dest)
1082 oslink(src, dest)
1083 return
1083 return
1084 except (IOError, OSError):
1084 except (IOError, OSError):
1085 pass # fall back to normal copy
1085 pass # fall back to normal copy
1086 if os.path.islink(src):
1086 if os.path.islink(src):
1087 os.symlink(os.readlink(src), dest)
1087 os.symlink(os.readlink(src), dest)
1088 # copytime is ignored for symlinks, but in general copytime isn't needed
1088 # copytime is ignored for symlinks, but in general copytime isn't needed
1089 # for them anyway
1089 # for them anyway
1090 else:
1090 else:
1091 try:
1091 try:
1092 shutil.copyfile(src, dest)
1092 shutil.copyfile(src, dest)
1093 if copystat:
1093 if copystat:
1094 # copystat also copies mode
1094 # copystat also copies mode
1095 shutil.copystat(src, dest)
1095 shutil.copystat(src, dest)
1096 else:
1096 else:
1097 shutil.copymode(src, dest)
1097 shutil.copymode(src, dest)
1098 if oldstat and oldstat.stat:
1098 if oldstat and oldstat.stat:
1099 newstat = filestat(dest)
1099 newstat = filestat(dest)
1100 if newstat.isambig(oldstat):
1100 if newstat.isambig(oldstat):
1101 # stat of copied file is ambiguous to original one
1101 # stat of copied file is ambiguous to original one
1102 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1102 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1103 os.utime(dest, (advanced, advanced))
1103 os.utime(dest, (advanced, advanced))
1104 except shutil.Error as inst:
1104 except shutil.Error as inst:
1105 raise Abort(str(inst))
1105 raise Abort(str(inst))
1106
1106
1107 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1107 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1108 """Copy a directory tree using hardlinks if possible."""
1108 """Copy a directory tree using hardlinks if possible."""
1109 num = 0
1109 num = 0
1110
1110
1111 if hardlink is None:
1111 if hardlink is None:
1112 hardlink = (os.stat(src).st_dev ==
1112 hardlink = (os.stat(src).st_dev ==
1113 os.stat(os.path.dirname(dst)).st_dev)
1113 os.stat(os.path.dirname(dst)).st_dev)
1114 if hardlink:
1114 if hardlink:
1115 topic = _('linking')
1115 topic = _('linking')
1116 else:
1116 else:
1117 topic = _('copying')
1117 topic = _('copying')
1118
1118
1119 if os.path.isdir(src):
1119 if os.path.isdir(src):
1120 os.mkdir(dst)
1120 os.mkdir(dst)
1121 for name, kind in osutil.listdir(src):
1121 for name, kind in osutil.listdir(src):
1122 srcname = os.path.join(src, name)
1122 srcname = os.path.join(src, name)
1123 dstname = os.path.join(dst, name)
1123 dstname = os.path.join(dst, name)
1124 def nprog(t, pos):
1124 def nprog(t, pos):
1125 if pos is not None:
1125 if pos is not None:
1126 return progress(t, pos + num)
1126 return progress(t, pos + num)
1127 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1127 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1128 num += n
1128 num += n
1129 else:
1129 else:
1130 if hardlink:
1130 if hardlink:
1131 try:
1131 try:
1132 oslink(src, dst)
1132 oslink(src, dst)
1133 except (IOError, OSError):
1133 except (IOError, OSError):
1134 hardlink = False
1134 hardlink = False
1135 shutil.copy(src, dst)
1135 shutil.copy(src, dst)
1136 else:
1136 else:
1137 shutil.copy(src, dst)
1137 shutil.copy(src, dst)
1138 num += 1
1138 num += 1
1139 progress(topic, num)
1139 progress(topic, num)
1140 progress(topic, None)
1140 progress(topic, None)
1141
1141
1142 return hardlink, num
1142 return hardlink, num
1143
1143
1144 _winreservednames = '''con prn aux nul
1144 _winreservednames = '''con prn aux nul
1145 com1 com2 com3 com4 com5 com6 com7 com8 com9
1145 com1 com2 com3 com4 com5 com6 com7 com8 com9
1146 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1146 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1147 _winreservedchars = ':*?"<>|'
1147 _winreservedchars = ':*?"<>|'
1148 def checkwinfilename(path):
1148 def checkwinfilename(path):
1149 r'''Check that the base-relative path is a valid filename on Windows.
1149 r'''Check that the base-relative path is a valid filename on Windows.
1150 Returns None if the path is ok, or a UI string describing the problem.
1150 Returns None if the path is ok, or a UI string describing the problem.
1151
1151
1152 >>> checkwinfilename("just/a/normal/path")
1152 >>> checkwinfilename("just/a/normal/path")
1153 >>> checkwinfilename("foo/bar/con.xml")
1153 >>> checkwinfilename("foo/bar/con.xml")
1154 "filename contains 'con', which is reserved on Windows"
1154 "filename contains 'con', which is reserved on Windows"
1155 >>> checkwinfilename("foo/con.xml/bar")
1155 >>> checkwinfilename("foo/con.xml/bar")
1156 "filename contains 'con', which is reserved on Windows"
1156 "filename contains 'con', which is reserved on Windows"
1157 >>> checkwinfilename("foo/bar/xml.con")
1157 >>> checkwinfilename("foo/bar/xml.con")
1158 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1158 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1159 "filename contains 'AUX', which is reserved on Windows"
1159 "filename contains 'AUX', which is reserved on Windows"
1160 >>> checkwinfilename("foo/bar/bla:.txt")
1160 >>> checkwinfilename("foo/bar/bla:.txt")
1161 "filename contains ':', which is reserved on Windows"
1161 "filename contains ':', which is reserved on Windows"
1162 >>> checkwinfilename("foo/bar/b\07la.txt")
1162 >>> checkwinfilename("foo/bar/b\07la.txt")
1163 "filename contains '\\x07', which is invalid on Windows"
1163 "filename contains '\\x07', which is invalid on Windows"
1164 >>> checkwinfilename("foo/bar/bla ")
1164 >>> checkwinfilename("foo/bar/bla ")
1165 "filename ends with ' ', which is not allowed on Windows"
1165 "filename ends with ' ', which is not allowed on Windows"
1166 >>> checkwinfilename("../bar")
1166 >>> checkwinfilename("../bar")
1167 >>> checkwinfilename("foo\\")
1167 >>> checkwinfilename("foo\\")
1168 "filename ends with '\\', which is invalid on Windows"
1168 "filename ends with '\\', which is invalid on Windows"
1169 >>> checkwinfilename("foo\\/bar")
1169 >>> checkwinfilename("foo\\/bar")
1170 "directory name ends with '\\', which is invalid on Windows"
1170 "directory name ends with '\\', which is invalid on Windows"
1171 '''
1171 '''
1172 if path.endswith('\\'):
1172 if path.endswith('\\'):
1173 return _("filename ends with '\\', which is invalid on Windows")
1173 return _("filename ends with '\\', which is invalid on Windows")
1174 if '\\/' in path:
1174 if '\\/' in path:
1175 return _("directory name ends with '\\', which is invalid on Windows")
1175 return _("directory name ends with '\\', which is invalid on Windows")
1176 for n in path.replace('\\', '/').split('/'):
1176 for n in path.replace('\\', '/').split('/'):
1177 if not n:
1177 if not n:
1178 continue
1178 continue
1179 for c in n:
1179 for c in n:
1180 if c in _winreservedchars:
1180 if c in _winreservedchars:
1181 return _("filename contains '%s', which is reserved "
1181 return _("filename contains '%s', which is reserved "
1182 "on Windows") % c
1182 "on Windows") % c
1183 if ord(c) <= 31:
1183 if ord(c) <= 31:
1184 return _("filename contains %r, which is invalid "
1184 return _("filename contains %r, which is invalid "
1185 "on Windows") % c
1185 "on Windows") % c
1186 base = n.split('.')[0]
1186 base = n.split('.')[0]
1187 if base and base.lower() in _winreservednames:
1187 if base and base.lower() in _winreservednames:
1188 return _("filename contains '%s', which is reserved "
1188 return _("filename contains '%s', which is reserved "
1189 "on Windows") % base
1189 "on Windows") % base
1190 t = n[-1]
1190 t = n[-1]
1191 if t in '. ' and n not in '..':
1191 if t in '. ' and n not in '..':
1192 return _("filename ends with '%s', which is not allowed "
1192 return _("filename ends with '%s', which is not allowed "
1193 "on Windows") % t
1193 "on Windows") % t
1194
1194
1195 if pycompat.osname == 'nt':
1195 if pycompat.osname == 'nt':
1196 checkosfilename = checkwinfilename
1196 checkosfilename = checkwinfilename
1197 timer = time.clock
1197 timer = time.clock
1198 else:
1198 else:
1199 checkosfilename = platform.checkosfilename
1199 checkosfilename = platform.checkosfilename
1200 timer = time.time
1200 timer = time.time
1201
1201
1202 if safehasattr(time, "perf_counter"):
1202 if safehasattr(time, "perf_counter"):
1203 timer = time.perf_counter
1203 timer = time.perf_counter
1204
1204
1205 def makelock(info, pathname):
1205 def makelock(info, pathname):
1206 try:
1206 try:
1207 return os.symlink(info, pathname)
1207 return os.symlink(info, pathname)
1208 except OSError as why:
1208 except OSError as why:
1209 if why.errno == errno.EEXIST:
1209 if why.errno == errno.EEXIST:
1210 raise
1210 raise
1211 except AttributeError: # no symlink in os
1211 except AttributeError: # no symlink in os
1212 pass
1212 pass
1213
1213
1214 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1214 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1215 os.write(ld, info)
1215 os.write(ld, info)
1216 os.close(ld)
1216 os.close(ld)
1217
1217
1218 def readlock(pathname):
1218 def readlock(pathname):
1219 try:
1219 try:
1220 return os.readlink(pathname)
1220 return os.readlink(pathname)
1221 except OSError as why:
1221 except OSError as why:
1222 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1222 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1223 raise
1223 raise
1224 except AttributeError: # no symlink in os
1224 except AttributeError: # no symlink in os
1225 pass
1225 pass
1226 fp = posixfile(pathname)
1226 fp = posixfile(pathname)
1227 r = fp.read()
1227 r = fp.read()
1228 fp.close()
1228 fp.close()
1229 return r
1229 return r
1230
1230
1231 def fstat(fp):
1231 def fstat(fp):
1232 '''stat file object that may not have fileno method.'''
1232 '''stat file object that may not have fileno method.'''
1233 try:
1233 try:
1234 return os.fstat(fp.fileno())
1234 return os.fstat(fp.fileno())
1235 except AttributeError:
1235 except AttributeError:
1236 return os.stat(fp.name)
1236 return os.stat(fp.name)
1237
1237
1238 # File system features
1238 # File system features
1239
1239
1240 def fscasesensitive(path):
1240 def fscasesensitive(path):
1241 """
1241 """
1242 Return true if the given path is on a case-sensitive filesystem
1242 Return true if the given path is on a case-sensitive filesystem
1243
1243
1244 Requires a path (like /foo/.hg) ending with a foldable final
1244 Requires a path (like /foo/.hg) ending with a foldable final
1245 directory component.
1245 directory component.
1246 """
1246 """
1247 s1 = os.lstat(path)
1247 s1 = os.lstat(path)
1248 d, b = os.path.split(path)
1248 d, b = os.path.split(path)
1249 b2 = b.upper()
1249 b2 = b.upper()
1250 if b == b2:
1250 if b == b2:
1251 b2 = b.lower()
1251 b2 = b.lower()
1252 if b == b2:
1252 if b == b2:
1253 return True # no evidence against case sensitivity
1253 return True # no evidence against case sensitivity
1254 p2 = os.path.join(d, b2)
1254 p2 = os.path.join(d, b2)
1255 try:
1255 try:
1256 s2 = os.lstat(p2)
1256 s2 = os.lstat(p2)
1257 if s2 == s1:
1257 if s2 == s1:
1258 return False
1258 return False
1259 return True
1259 return True
1260 except OSError:
1260 except OSError:
1261 return True
1261 return True
1262
1262
1263 try:
1263 try:
1264 import re2
1264 import re2
1265 _re2 = None
1265 _re2 = None
1266 except ImportError:
1266 except ImportError:
1267 _re2 = False
1267 _re2 = False
1268
1268
1269 class _re(object):
1269 class _re(object):
1270 def _checkre2(self):
1270 def _checkre2(self):
1271 global _re2
1271 global _re2
1272 try:
1272 try:
1273 # check if match works, see issue3964
1273 # check if match works, see issue3964
1274 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1274 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1275 except ImportError:
1275 except ImportError:
1276 _re2 = False
1276 _re2 = False
1277
1277
1278 def compile(self, pat, flags=0):
1278 def compile(self, pat, flags=0):
1279 '''Compile a regular expression, using re2 if possible
1279 '''Compile a regular expression, using re2 if possible
1280
1280
1281 For best performance, use only re2-compatible regexp features. The
1281 For best performance, use only re2-compatible regexp features. The
1282 only flags from the re module that are re2-compatible are
1282 only flags from the re module that are re2-compatible are
1283 IGNORECASE and MULTILINE.'''
1283 IGNORECASE and MULTILINE.'''
1284 if _re2 is None:
1284 if _re2 is None:
1285 self._checkre2()
1285 self._checkre2()
1286 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1286 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1287 if flags & remod.IGNORECASE:
1287 if flags & remod.IGNORECASE:
1288 pat = '(?i)' + pat
1288 pat = '(?i)' + pat
1289 if flags & remod.MULTILINE:
1289 if flags & remod.MULTILINE:
1290 pat = '(?m)' + pat
1290 pat = '(?m)' + pat
1291 try:
1291 try:
1292 return re2.compile(pat)
1292 return re2.compile(pat)
1293 except re2.error:
1293 except re2.error:
1294 pass
1294 pass
1295 return remod.compile(pat, flags)
1295 return remod.compile(pat, flags)
1296
1296
1297 @propertycache
1297 @propertycache
1298 def escape(self):
1298 def escape(self):
1299 '''Return the version of escape corresponding to self.compile.
1299 '''Return the version of escape corresponding to self.compile.
1300
1300
1301 This is imperfect because whether re2 or re is used for a particular
1301 This is imperfect because whether re2 or re is used for a particular
1302 function depends on the flags, etc, but it's the best we can do.
1302 function depends on the flags, etc, but it's the best we can do.
1303 '''
1303 '''
1304 global _re2
1304 global _re2
1305 if _re2 is None:
1305 if _re2 is None:
1306 self._checkre2()
1306 self._checkre2()
1307 if _re2:
1307 if _re2:
1308 return re2.escape
1308 return re2.escape
1309 else:
1309 else:
1310 return remod.escape
1310 return remod.escape
1311
1311
1312 re = _re()
1312 re = _re()
1313
1313
1314 _fspathcache = {}
1314 _fspathcache = {}
1315 def fspath(name, root):
1315 def fspath(name, root):
1316 '''Get name in the case stored in the filesystem
1316 '''Get name in the case stored in the filesystem
1317
1317
1318 The name should be relative to root, and be normcase-ed for efficiency.
1318 The name should be relative to root, and be normcase-ed for efficiency.
1319
1319
1320 Note that this function is unnecessary, and should not be
1320 Note that this function is unnecessary, and should not be
1321 called, for case-sensitive filesystems (simply because it's expensive).
1321 called, for case-sensitive filesystems (simply because it's expensive).
1322
1322
1323 The root should be normcase-ed, too.
1323 The root should be normcase-ed, too.
1324 '''
1324 '''
1325 def _makefspathcacheentry(dir):
1325 def _makefspathcacheentry(dir):
1326 return dict((normcase(n), n) for n in os.listdir(dir))
1326 return dict((normcase(n), n) for n in os.listdir(dir))
1327
1327
1328 seps = pycompat.ossep
1328 seps = pycompat.ossep
1329 if pycompat.osaltsep:
1329 if pycompat.osaltsep:
1330 seps = seps + pycompat.osaltsep
1330 seps = seps + pycompat.osaltsep
1331 # Protect backslashes. This gets silly very quickly.
1331 # Protect backslashes. This gets silly very quickly.
1332 seps.replace('\\','\\\\')
1332 seps.replace('\\','\\\\')
1333 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1333 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1334 dir = os.path.normpath(root)
1334 dir = os.path.normpath(root)
1335 result = []
1335 result = []
1336 for part, sep in pattern.findall(name):
1336 for part, sep in pattern.findall(name):
1337 if sep:
1337 if sep:
1338 result.append(sep)
1338 result.append(sep)
1339 continue
1339 continue
1340
1340
1341 if dir not in _fspathcache:
1341 if dir not in _fspathcache:
1342 _fspathcache[dir] = _makefspathcacheentry(dir)
1342 _fspathcache[dir] = _makefspathcacheentry(dir)
1343 contents = _fspathcache[dir]
1343 contents = _fspathcache[dir]
1344
1344
1345 found = contents.get(part)
1345 found = contents.get(part)
1346 if not found:
1346 if not found:
1347 # retry "once per directory" per "dirstate.walk" which
1347 # retry "once per directory" per "dirstate.walk" which
1348 # may take place for each patches of "hg qpush", for example
1348 # may take place for each patches of "hg qpush", for example
1349 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1349 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1350 found = contents.get(part)
1350 found = contents.get(part)
1351
1351
1352 result.append(found or part)
1352 result.append(found or part)
1353 dir = os.path.join(dir, part)
1353 dir = os.path.join(dir, part)
1354
1354
1355 return ''.join(result)
1355 return ''.join(result)
1356
1356
1357 def checknlink(testfile):
1357 def checknlink(testfile):
1358 '''check whether hardlink count reporting works properly'''
1358 '''check whether hardlink count reporting works properly'''
1359
1359
1360 # testfile may be open, so we need a separate file for checking to
1360 # testfile may be open, so we need a separate file for checking to
1361 # work around issue2543 (or testfile may get lost on Samba shares)
1361 # work around issue2543 (or testfile may get lost on Samba shares)
1362 f1 = testfile + ".hgtmp1"
1362 f1 = testfile + ".hgtmp1"
1363 if os.path.lexists(f1):
1363 if os.path.lexists(f1):
1364 return False
1364 return False
1365 try:
1365 try:
1366 posixfile(f1, 'w').close()
1366 posixfile(f1, 'w').close()
1367 except IOError:
1367 except IOError:
1368 try:
1368 try:
1369 os.unlink(f1)
1369 os.unlink(f1)
1370 except OSError:
1370 except OSError:
1371 pass
1371 pass
1372 return False
1372 return False
1373
1373
1374 f2 = testfile + ".hgtmp2"
1374 f2 = testfile + ".hgtmp2"
1375 fd = None
1375 fd = None
1376 try:
1376 try:
1377 oslink(f1, f2)
1377 oslink(f1, f2)
1378 # nlinks() may behave differently for files on Windows shares if
1378 # nlinks() may behave differently for files on Windows shares if
1379 # the file is open.
1379 # the file is open.
1380 fd = posixfile(f2)
1380 fd = posixfile(f2)
1381 return nlinks(f2) > 1
1381 return nlinks(f2) > 1
1382 except OSError:
1382 except OSError:
1383 return False
1383 return False
1384 finally:
1384 finally:
1385 if fd is not None:
1385 if fd is not None:
1386 fd.close()
1386 fd.close()
1387 for f in (f1, f2):
1387 for f in (f1, f2):
1388 try:
1388 try:
1389 os.unlink(f)
1389 os.unlink(f)
1390 except OSError:
1390 except OSError:
1391 pass
1391 pass
1392
1392
1393 def endswithsep(path):
1393 def endswithsep(path):
1394 '''Check path ends with os.sep or os.altsep.'''
1394 '''Check path ends with os.sep or os.altsep.'''
1395 return (path.endswith(pycompat.ossep)
1395 return (path.endswith(pycompat.ossep)
1396 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1396 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1397
1397
1398 def splitpath(path):
1398 def splitpath(path):
1399 '''Split path by os.sep.
1399 '''Split path by os.sep.
1400 Note that this function does not use os.altsep because this is
1400 Note that this function does not use os.altsep because this is
1401 an alternative of simple "xxx.split(os.sep)".
1401 an alternative of simple "xxx.split(os.sep)".
1402 It is recommended to use os.path.normpath() before using this
1402 It is recommended to use os.path.normpath() before using this
1403 function if need.'''
1403 function if need.'''
1404 return path.split(pycompat.ossep)
1404 return path.split(pycompat.ossep)
1405
1405
1406 def gui():
1406 def gui():
1407 '''Are we running in a GUI?'''
1407 '''Are we running in a GUI?'''
1408 if pycompat.sysplatform == 'darwin':
1408 if pycompat.sysplatform == 'darwin':
1409 if 'SSH_CONNECTION' in encoding.environ:
1409 if 'SSH_CONNECTION' in encoding.environ:
1410 # handle SSH access to a box where the user is logged in
1410 # handle SSH access to a box where the user is logged in
1411 return False
1411 return False
1412 elif getattr(osutil, 'isgui', None):
1412 elif getattr(osutil, 'isgui', None):
1413 # check if a CoreGraphics session is available
1413 # check if a CoreGraphics session is available
1414 return osutil.isgui()
1414 return osutil.isgui()
1415 else:
1415 else:
1416 # pure build; use a safe default
1416 # pure build; use a safe default
1417 return True
1417 return True
1418 else:
1418 else:
1419 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1419 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1420
1420
1421 def mktempcopy(name, emptyok=False, createmode=None):
1421 def mktempcopy(name, emptyok=False, createmode=None):
1422 """Create a temporary file with the same contents from name
1422 """Create a temporary file with the same contents from name
1423
1423
1424 The permission bits are copied from the original file.
1424 The permission bits are copied from the original file.
1425
1425
1426 If the temporary file is going to be truncated immediately, you
1426 If the temporary file is going to be truncated immediately, you
1427 can use emptyok=True as an optimization.
1427 can use emptyok=True as an optimization.
1428
1428
1429 Returns the name of the temporary file.
1429 Returns the name of the temporary file.
1430 """
1430 """
1431 d, fn = os.path.split(name)
1431 d, fn = os.path.split(name)
1432 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1432 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1433 os.close(fd)
1433 os.close(fd)
1434 # Temporary files are created with mode 0600, which is usually not
1434 # Temporary files are created with mode 0600, which is usually not
1435 # what we want. If the original file already exists, just copy
1435 # what we want. If the original file already exists, just copy
1436 # its mode. Otherwise, manually obey umask.
1436 # its mode. Otherwise, manually obey umask.
1437 copymode(name, temp, createmode)
1437 copymode(name, temp, createmode)
1438 if emptyok:
1438 if emptyok:
1439 return temp
1439 return temp
1440 try:
1440 try:
1441 try:
1441 try:
1442 ifp = posixfile(name, "rb")
1442 ifp = posixfile(name, "rb")
1443 except IOError as inst:
1443 except IOError as inst:
1444 if inst.errno == errno.ENOENT:
1444 if inst.errno == errno.ENOENT:
1445 return temp
1445 return temp
1446 if not getattr(inst, 'filename', None):
1446 if not getattr(inst, 'filename', None):
1447 inst.filename = name
1447 inst.filename = name
1448 raise
1448 raise
1449 ofp = posixfile(temp, "wb")
1449 ofp = posixfile(temp, "wb")
1450 for chunk in filechunkiter(ifp):
1450 for chunk in filechunkiter(ifp):
1451 ofp.write(chunk)
1451 ofp.write(chunk)
1452 ifp.close()
1452 ifp.close()
1453 ofp.close()
1453 ofp.close()
1454 except: # re-raises
1454 except: # re-raises
1455 try: os.unlink(temp)
1455 try: os.unlink(temp)
1456 except OSError: pass
1456 except OSError: pass
1457 raise
1457 raise
1458 return temp
1458 return temp
1459
1459
1460 class filestat(object):
1460 class filestat(object):
1461 """help to exactly detect change of a file
1461 """help to exactly detect change of a file
1462
1462
1463 'stat' attribute is result of 'os.stat()' if specified 'path'
1463 'stat' attribute is result of 'os.stat()' if specified 'path'
1464 exists. Otherwise, it is None. This can avoid preparative
1464 exists. Otherwise, it is None. This can avoid preparative
1465 'exists()' examination on client side of this class.
1465 'exists()' examination on client side of this class.
1466 """
1466 """
1467 def __init__(self, path):
1467 def __init__(self, path):
1468 try:
1468 try:
1469 self.stat = os.stat(path)
1469 self.stat = os.stat(path)
1470 except OSError as err:
1470 except OSError as err:
1471 if err.errno != errno.ENOENT:
1471 if err.errno != errno.ENOENT:
1472 raise
1472 raise
1473 self.stat = None
1473 self.stat = None
1474
1474
1475 __hash__ = object.__hash__
1475 __hash__ = object.__hash__
1476
1476
1477 def __eq__(self, old):
1477 def __eq__(self, old):
1478 try:
1478 try:
1479 # if ambiguity between stat of new and old file is
1479 # if ambiguity between stat of new and old file is
1480 # avoided, comparison of size, ctime and mtime is enough
1480 # avoided, comparison of size, ctime and mtime is enough
1481 # to exactly detect change of a file regardless of platform
1481 # to exactly detect change of a file regardless of platform
1482 return (self.stat.st_size == old.stat.st_size and
1482 return (self.stat.st_size == old.stat.st_size and
1483 self.stat.st_ctime == old.stat.st_ctime and
1483 self.stat.st_ctime == old.stat.st_ctime and
1484 self.stat.st_mtime == old.stat.st_mtime)
1484 self.stat.st_mtime == old.stat.st_mtime)
1485 except AttributeError:
1485 except AttributeError:
1486 return False
1486 return False
1487
1487
1488 def isambig(self, old):
1488 def isambig(self, old):
1489 """Examine whether new (= self) stat is ambiguous against old one
1489 """Examine whether new (= self) stat is ambiguous against old one
1490
1490
1491 "S[N]" below means stat of a file at N-th change:
1491 "S[N]" below means stat of a file at N-th change:
1492
1492
1493 - S[n-1].ctime < S[n].ctime: can detect change of a file
1493 - S[n-1].ctime < S[n].ctime: can detect change of a file
1494 - S[n-1].ctime == S[n].ctime
1494 - S[n-1].ctime == S[n].ctime
1495 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1495 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1496 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1496 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1497 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1497 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1498 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1498 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1499
1499
1500 Case (*2) above means that a file was changed twice or more at
1500 Case (*2) above means that a file was changed twice or more at
1501 same time in sec (= S[n-1].ctime), and comparison of timestamp
1501 same time in sec (= S[n-1].ctime), and comparison of timestamp
1502 is ambiguous.
1502 is ambiguous.
1503
1503
1504 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1504 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1505 timestamp is ambiguous".
1505 timestamp is ambiguous".
1506
1506
1507 But advancing mtime only in case (*2) doesn't work as
1507 But advancing mtime only in case (*2) doesn't work as
1508 expected, because naturally advanced S[n].mtime in case (*1)
1508 expected, because naturally advanced S[n].mtime in case (*1)
1509 might be equal to manually advanced S[n-1 or earlier].mtime.
1509 might be equal to manually advanced S[n-1 or earlier].mtime.
1510
1510
1511 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1511 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1512 treated as ambiguous regardless of mtime, to avoid overlooking
1512 treated as ambiguous regardless of mtime, to avoid overlooking
1513 by confliction between such mtime.
1513 by confliction between such mtime.
1514
1514
1515 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1515 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1516 S[n].mtime", even if size of a file isn't changed.
1516 S[n].mtime", even if size of a file isn't changed.
1517 """
1517 """
1518 try:
1518 try:
1519 return (self.stat.st_ctime == old.stat.st_ctime)
1519 return (self.stat.st_ctime == old.stat.st_ctime)
1520 except AttributeError:
1520 except AttributeError:
1521 return False
1521 return False
1522
1522
1523 def avoidambig(self, path, old):
1523 def avoidambig(self, path, old):
1524 """Change file stat of specified path to avoid ambiguity
1524 """Change file stat of specified path to avoid ambiguity
1525
1525
1526 'old' should be previous filestat of 'path'.
1526 'old' should be previous filestat of 'path'.
1527
1527
1528 This skips avoiding ambiguity, if a process doesn't have
1528 This skips avoiding ambiguity, if a process doesn't have
1529 appropriate privileges for 'path'.
1529 appropriate privileges for 'path'.
1530 """
1530 """
1531 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1531 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1532 try:
1532 try:
1533 os.utime(path, (advanced, advanced))
1533 os.utime(path, (advanced, advanced))
1534 except OSError as inst:
1534 except OSError as inst:
1535 if inst.errno == errno.EPERM:
1535 if inst.errno == errno.EPERM:
1536 # utime() on the file created by another user causes EPERM,
1536 # utime() on the file created by another user causes EPERM,
1537 # if a process doesn't have appropriate privileges
1537 # if a process doesn't have appropriate privileges
1538 return
1538 return
1539 raise
1539 raise
1540
1540
1541 def __ne__(self, other):
1541 def __ne__(self, other):
1542 return not self == other
1542 return not self == other
1543
1543
1544 class atomictempfile(object):
1544 class atomictempfile(object):
1545 '''writable file object that atomically updates a file
1545 '''writable file object that atomically updates a file
1546
1546
1547 All writes will go to a temporary copy of the original file. Call
1547 All writes will go to a temporary copy of the original file. Call
1548 close() when you are done writing, and atomictempfile will rename
1548 close() when you are done writing, and atomictempfile will rename
1549 the temporary copy to the original name, making the changes
1549 the temporary copy to the original name, making the changes
1550 visible. If the object is destroyed without being closed, all your
1550 visible. If the object is destroyed without being closed, all your
1551 writes are discarded.
1551 writes are discarded.
1552
1552
1553 checkambig argument of constructor is used with filestat, and is
1553 checkambig argument of constructor is used with filestat, and is
1554 useful only if target file is guarded by any lock (e.g. repo.lock
1554 useful only if target file is guarded by any lock (e.g. repo.lock
1555 or repo.wlock).
1555 or repo.wlock).
1556 '''
1556 '''
1557 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1557 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1558 self.__name = name # permanent name
1558 self.__name = name # permanent name
1559 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1559 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1560 createmode=createmode)
1560 createmode=createmode)
1561 self._fp = posixfile(self._tempname, mode)
1561 self._fp = posixfile(self._tempname, mode)
1562 self._checkambig = checkambig
1562 self._checkambig = checkambig
1563
1563
1564 # delegated methods
1564 # delegated methods
1565 self.read = self._fp.read
1565 self.read = self._fp.read
1566 self.write = self._fp.write
1566 self.write = self._fp.write
1567 self.seek = self._fp.seek
1567 self.seek = self._fp.seek
1568 self.tell = self._fp.tell
1568 self.tell = self._fp.tell
1569 self.fileno = self._fp.fileno
1569 self.fileno = self._fp.fileno
1570
1570
1571 def close(self):
1571 def close(self):
1572 if not self._fp.closed:
1572 if not self._fp.closed:
1573 self._fp.close()
1573 self._fp.close()
1574 filename = localpath(self.__name)
1574 filename = localpath(self.__name)
1575 oldstat = self._checkambig and filestat(filename)
1575 oldstat = self._checkambig and filestat(filename)
1576 if oldstat and oldstat.stat:
1576 if oldstat and oldstat.stat:
1577 rename(self._tempname, filename)
1577 rename(self._tempname, filename)
1578 newstat = filestat(filename)
1578 newstat = filestat(filename)
1579 if newstat.isambig(oldstat):
1579 if newstat.isambig(oldstat):
1580 # stat of changed file is ambiguous to original one
1580 # stat of changed file is ambiguous to original one
1581 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1581 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1582 os.utime(filename, (advanced, advanced))
1582 os.utime(filename, (advanced, advanced))
1583 else:
1583 else:
1584 rename(self._tempname, filename)
1584 rename(self._tempname, filename)
1585
1585
1586 def discard(self):
1586 def discard(self):
1587 if not self._fp.closed:
1587 if not self._fp.closed:
1588 try:
1588 try:
1589 os.unlink(self._tempname)
1589 os.unlink(self._tempname)
1590 except OSError:
1590 except OSError:
1591 pass
1591 pass
1592 self._fp.close()
1592 self._fp.close()
1593
1593
1594 def __del__(self):
1594 def __del__(self):
1595 if safehasattr(self, '_fp'): # constructor actually did something
1595 if safehasattr(self, '_fp'): # constructor actually did something
1596 self.discard()
1596 self.discard()
1597
1597
1598 def __enter__(self):
1598 def __enter__(self):
1599 return self
1599 return self
1600
1600
1601 def __exit__(self, exctype, excvalue, traceback):
1601 def __exit__(self, exctype, excvalue, traceback):
1602 if exctype is not None:
1602 if exctype is not None:
1603 self.discard()
1603 self.discard()
1604 else:
1604 else:
1605 self.close()
1605 self.close()
1606
1606
1607 def makedirs(name, mode=None, notindexed=False):
1607 def makedirs(name, mode=None, notindexed=False):
1608 """recursive directory creation with parent mode inheritance
1608 """recursive directory creation with parent mode inheritance
1609
1609
1610 Newly created directories are marked as "not to be indexed by
1610 Newly created directories are marked as "not to be indexed by
1611 the content indexing service", if ``notindexed`` is specified
1611 the content indexing service", if ``notindexed`` is specified
1612 for "write" mode access.
1612 for "write" mode access.
1613 """
1613 """
1614 try:
1614 try:
1615 makedir(name, notindexed)
1615 makedir(name, notindexed)
1616 except OSError as err:
1616 except OSError as err:
1617 if err.errno == errno.EEXIST:
1617 if err.errno == errno.EEXIST:
1618 return
1618 return
1619 if err.errno != errno.ENOENT or not name:
1619 if err.errno != errno.ENOENT or not name:
1620 raise
1620 raise
1621 parent = os.path.dirname(os.path.abspath(name))
1621 parent = os.path.dirname(os.path.abspath(name))
1622 if parent == name:
1622 if parent == name:
1623 raise
1623 raise
1624 makedirs(parent, mode, notindexed)
1624 makedirs(parent, mode, notindexed)
1625 try:
1625 try:
1626 makedir(name, notindexed)
1626 makedir(name, notindexed)
1627 except OSError as err:
1627 except OSError as err:
1628 # Catch EEXIST to handle races
1628 # Catch EEXIST to handle races
1629 if err.errno == errno.EEXIST:
1629 if err.errno == errno.EEXIST:
1630 return
1630 return
1631 raise
1631 raise
1632 if mode is not None:
1632 if mode is not None:
1633 os.chmod(name, mode)
1633 os.chmod(name, mode)
1634
1634
1635 def readfile(path):
1635 def readfile(path):
1636 with open(path, 'rb') as fp:
1636 with open(path, 'rb') as fp:
1637 return fp.read()
1637 return fp.read()
1638
1638
1639 def writefile(path, text):
1639 def writefile(path, text):
1640 with open(path, 'wb') as fp:
1640 with open(path, 'wb') as fp:
1641 fp.write(text)
1641 fp.write(text)
1642
1642
1643 def appendfile(path, text):
1643 def appendfile(path, text):
1644 with open(path, 'ab') as fp:
1644 with open(path, 'ab') as fp:
1645 fp.write(text)
1645 fp.write(text)
1646
1646
1647 class chunkbuffer(object):
1647 class chunkbuffer(object):
1648 """Allow arbitrary sized chunks of data to be efficiently read from an
1648 """Allow arbitrary sized chunks of data to be efficiently read from an
1649 iterator over chunks of arbitrary size."""
1649 iterator over chunks of arbitrary size."""
1650
1650
1651 def __init__(self, in_iter):
1651 def __init__(self, in_iter):
1652 """in_iter is the iterator that's iterating over the input chunks.
1652 """in_iter is the iterator that's iterating over the input chunks.
1653 targetsize is how big a buffer to try to maintain."""
1653 targetsize is how big a buffer to try to maintain."""
1654 def splitbig(chunks):
1654 def splitbig(chunks):
1655 for chunk in chunks:
1655 for chunk in chunks:
1656 if len(chunk) > 2**20:
1656 if len(chunk) > 2**20:
1657 pos = 0
1657 pos = 0
1658 while pos < len(chunk):
1658 while pos < len(chunk):
1659 end = pos + 2 ** 18
1659 end = pos + 2 ** 18
1660 yield chunk[pos:end]
1660 yield chunk[pos:end]
1661 pos = end
1661 pos = end
1662 else:
1662 else:
1663 yield chunk
1663 yield chunk
1664 self.iter = splitbig(in_iter)
1664 self.iter = splitbig(in_iter)
1665 self._queue = collections.deque()
1665 self._queue = collections.deque()
1666 self._chunkoffset = 0
1666 self._chunkoffset = 0
1667
1667
1668 def read(self, l=None):
1668 def read(self, l=None):
1669 """Read L bytes of data from the iterator of chunks of data.
1669 """Read L bytes of data from the iterator of chunks of data.
1670 Returns less than L bytes if the iterator runs dry.
1670 Returns less than L bytes if the iterator runs dry.
1671
1671
1672 If size parameter is omitted, read everything"""
1672 If size parameter is omitted, read everything"""
1673 if l is None:
1673 if l is None:
1674 return ''.join(self.iter)
1674 return ''.join(self.iter)
1675
1675
1676 left = l
1676 left = l
1677 buf = []
1677 buf = []
1678 queue = self._queue
1678 queue = self._queue
1679 while left > 0:
1679 while left > 0:
1680 # refill the queue
1680 # refill the queue
1681 if not queue:
1681 if not queue:
1682 target = 2**18
1682 target = 2**18
1683 for chunk in self.iter:
1683 for chunk in self.iter:
1684 queue.append(chunk)
1684 queue.append(chunk)
1685 target -= len(chunk)
1685 target -= len(chunk)
1686 if target <= 0:
1686 if target <= 0:
1687 break
1687 break
1688 if not queue:
1688 if not queue:
1689 break
1689 break
1690
1690
1691 # The easy way to do this would be to queue.popleft(), modify the
1691 # The easy way to do this would be to queue.popleft(), modify the
1692 # chunk (if necessary), then queue.appendleft(). However, for cases
1692 # chunk (if necessary), then queue.appendleft(). However, for cases
1693 # where we read partial chunk content, this incurs 2 dequeue
1693 # where we read partial chunk content, this incurs 2 dequeue
1694 # mutations and creates a new str for the remaining chunk in the
1694 # mutations and creates a new str for the remaining chunk in the
1695 # queue. Our code below avoids this overhead.
1695 # queue. Our code below avoids this overhead.
1696
1696
1697 chunk = queue[0]
1697 chunk = queue[0]
1698 chunkl = len(chunk)
1698 chunkl = len(chunk)
1699 offset = self._chunkoffset
1699 offset = self._chunkoffset
1700
1700
1701 # Use full chunk.
1701 # Use full chunk.
1702 if offset == 0 and left >= chunkl:
1702 if offset == 0 and left >= chunkl:
1703 left -= chunkl
1703 left -= chunkl
1704 queue.popleft()
1704 queue.popleft()
1705 buf.append(chunk)
1705 buf.append(chunk)
1706 # self._chunkoffset remains at 0.
1706 # self._chunkoffset remains at 0.
1707 continue
1707 continue
1708
1708
1709 chunkremaining = chunkl - offset
1709 chunkremaining = chunkl - offset
1710
1710
1711 # Use all of unconsumed part of chunk.
1711 # Use all of unconsumed part of chunk.
1712 if left >= chunkremaining:
1712 if left >= chunkremaining:
1713 left -= chunkremaining
1713 left -= chunkremaining
1714 queue.popleft()
1714 queue.popleft()
1715 # offset == 0 is enabled by block above, so this won't merely
1715 # offset == 0 is enabled by block above, so this won't merely
1716 # copy via ``chunk[0:]``.
1716 # copy via ``chunk[0:]``.
1717 buf.append(chunk[offset:])
1717 buf.append(chunk[offset:])
1718 self._chunkoffset = 0
1718 self._chunkoffset = 0
1719
1719
1720 # Partial chunk needed.
1720 # Partial chunk needed.
1721 else:
1721 else:
1722 buf.append(chunk[offset:offset + left])
1722 buf.append(chunk[offset:offset + left])
1723 self._chunkoffset += left
1723 self._chunkoffset += left
1724 left -= chunkremaining
1724 left -= chunkremaining
1725
1725
1726 return ''.join(buf)
1726 return ''.join(buf)
1727
1727
1728 def filechunkiter(f, size=131072, limit=None):
1728 def filechunkiter(f, size=131072, limit=None):
1729 """Create a generator that produces the data in the file size
1729 """Create a generator that produces the data in the file size
1730 (default 131072) bytes at a time, up to optional limit (default is
1730 (default 131072) bytes at a time, up to optional limit (default is
1731 to read all data). Chunks may be less than size bytes if the
1731 to read all data). Chunks may be less than size bytes if the
1732 chunk is the last chunk in the file, or the file is a socket or
1732 chunk is the last chunk in the file, or the file is a socket or
1733 some other type of file that sometimes reads less data than is
1733 some other type of file that sometimes reads less data than is
1734 requested."""
1734 requested."""
1735 assert size >= 0
1735 assert size >= 0
1736 assert limit is None or limit >= 0
1736 assert limit is None or limit >= 0
1737 while True:
1737 while True:
1738 if limit is None:
1738 if limit is None:
1739 nbytes = size
1739 nbytes = size
1740 else:
1740 else:
1741 nbytes = min(limit, size)
1741 nbytes = min(limit, size)
1742 s = nbytes and f.read(nbytes)
1742 s = nbytes and f.read(nbytes)
1743 if not s:
1743 if not s:
1744 break
1744 break
1745 if limit:
1745 if limit:
1746 limit -= len(s)
1746 limit -= len(s)
1747 yield s
1747 yield s
1748
1748
1749 def makedate(timestamp=None):
1749 def makedate(timestamp=None):
1750 '''Return a unix timestamp (or the current time) as a (unixtime,
1750 '''Return a unix timestamp (or the current time) as a (unixtime,
1751 offset) tuple based off the local timezone.'''
1751 offset) tuple based off the local timezone.'''
1752 if timestamp is None:
1752 if timestamp is None:
1753 timestamp = time.time()
1753 timestamp = time.time()
1754 if timestamp < 0:
1754 if timestamp < 0:
1755 hint = _("check your clock")
1755 hint = _("check your clock")
1756 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1756 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1757 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1757 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1758 datetime.datetime.fromtimestamp(timestamp))
1758 datetime.datetime.fromtimestamp(timestamp))
1759 tz = delta.days * 86400 + delta.seconds
1759 tz = delta.days * 86400 + delta.seconds
1760 return timestamp, tz
1760 return timestamp, tz
1761
1761
1762 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1762 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1763 """represent a (unixtime, offset) tuple as a localized time.
1763 """represent a (unixtime, offset) tuple as a localized time.
1764 unixtime is seconds since the epoch, and offset is the time zone's
1764 unixtime is seconds since the epoch, and offset is the time zone's
1765 number of seconds away from UTC.
1765 number of seconds away from UTC.
1766
1766
1767 >>> datestr((0, 0))
1767 >>> datestr((0, 0))
1768 'Thu Jan 01 00:00:00 1970 +0000'
1768 'Thu Jan 01 00:00:00 1970 +0000'
1769 >>> datestr((42, 0))
1769 >>> datestr((42, 0))
1770 'Thu Jan 01 00:00:42 1970 +0000'
1770 'Thu Jan 01 00:00:42 1970 +0000'
1771 >>> datestr((-42, 0))
1771 >>> datestr((-42, 0))
1772 'Wed Dec 31 23:59:18 1969 +0000'
1772 'Wed Dec 31 23:59:18 1969 +0000'
1773 >>> datestr((0x7fffffff, 0))
1773 >>> datestr((0x7fffffff, 0))
1774 'Tue Jan 19 03:14:07 2038 +0000'
1774 'Tue Jan 19 03:14:07 2038 +0000'
1775 >>> datestr((-0x80000000, 0))
1775 >>> datestr((-0x80000000, 0))
1776 'Fri Dec 13 20:45:52 1901 +0000'
1776 'Fri Dec 13 20:45:52 1901 +0000'
1777 """
1777 """
1778 t, tz = date or makedate()
1778 t, tz = date or makedate()
1779 if "%1" in format or "%2" in format or "%z" in format:
1779 if "%1" in format or "%2" in format or "%z" in format:
1780 sign = (tz > 0) and "-" or "+"
1780 sign = (tz > 0) and "-" or "+"
1781 minutes = abs(tz) // 60
1781 minutes = abs(tz) // 60
1782 q, r = divmod(minutes, 60)
1782 q, r = divmod(minutes, 60)
1783 format = format.replace("%z", "%1%2")
1783 format = format.replace("%z", "%1%2")
1784 format = format.replace("%1", "%c%02d" % (sign, q))
1784 format = format.replace("%1", "%c%02d" % (sign, q))
1785 format = format.replace("%2", "%02d" % r)
1785 format = format.replace("%2", "%02d" % r)
1786 d = t - tz
1786 d = t - tz
1787 if d > 0x7fffffff:
1787 if d > 0x7fffffff:
1788 d = 0x7fffffff
1788 d = 0x7fffffff
1789 elif d < -0x80000000:
1789 elif d < -0x80000000:
1790 d = -0x80000000
1790 d = -0x80000000
1791 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1791 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1792 # because they use the gmtime() system call which is buggy on Windows
1792 # because they use the gmtime() system call which is buggy on Windows
1793 # for negative values.
1793 # for negative values.
1794 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1794 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1795 s = t.strftime(format)
1795 s = t.strftime(format)
1796 return s
1796 return s
1797
1797
1798 def shortdate(date=None):
1798 def shortdate(date=None):
1799 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1799 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1800 return datestr(date, format='%Y-%m-%d')
1800 return datestr(date, format='%Y-%m-%d')
1801
1801
1802 def parsetimezone(s):
1802 def parsetimezone(s):
1803 """find a trailing timezone, if any, in string, and return a
1803 """find a trailing timezone, if any, in string, and return a
1804 (offset, remainder) pair"""
1804 (offset, remainder) pair"""
1805
1805
1806 if s.endswith("GMT") or s.endswith("UTC"):
1806 if s.endswith("GMT") or s.endswith("UTC"):
1807 return 0, s[:-3].rstrip()
1807 return 0, s[:-3].rstrip()
1808
1808
1809 # Unix-style timezones [+-]hhmm
1809 # Unix-style timezones [+-]hhmm
1810 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1810 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1811 sign = (s[-5] == "+") and 1 or -1
1811 sign = (s[-5] == "+") and 1 or -1
1812 hours = int(s[-4:-2])
1812 hours = int(s[-4:-2])
1813 minutes = int(s[-2:])
1813 minutes = int(s[-2:])
1814 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1814 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1815
1815
1816 # ISO8601 trailing Z
1816 # ISO8601 trailing Z
1817 if s.endswith("Z") and s[-2:-1].isdigit():
1817 if s.endswith("Z") and s[-2:-1].isdigit():
1818 return 0, s[:-1]
1818 return 0, s[:-1]
1819
1819
1820 # ISO8601-style [+-]hh:mm
1820 # ISO8601-style [+-]hh:mm
1821 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1821 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1822 s[-5:-3].isdigit() and s[-2:].isdigit()):
1822 s[-5:-3].isdigit() and s[-2:].isdigit()):
1823 sign = (s[-6] == "+") and 1 or -1
1823 sign = (s[-6] == "+") and 1 or -1
1824 hours = int(s[-5:-3])
1824 hours = int(s[-5:-3])
1825 minutes = int(s[-2:])
1825 minutes = int(s[-2:])
1826 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1826 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1827
1827
1828 return None, s
1828 return None, s
1829
1829
1830 def strdate(string, format, defaults=[]):
1830 def strdate(string, format, defaults=[]):
1831 """parse a localized time string and return a (unixtime, offset) tuple.
1831 """parse a localized time string and return a (unixtime, offset) tuple.
1832 if the string cannot be parsed, ValueError is raised."""
1832 if the string cannot be parsed, ValueError is raised."""
1833 # NOTE: unixtime = localunixtime + offset
1833 # NOTE: unixtime = localunixtime + offset
1834 offset, date = parsetimezone(string)
1834 offset, date = parsetimezone(string)
1835
1835
1836 # add missing elements from defaults
1836 # add missing elements from defaults
1837 usenow = False # default to using biased defaults
1837 usenow = False # default to using biased defaults
1838 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1838 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1839 found = [True for p in part if ("%"+p) in format]
1839 found = [True for p in part if ("%"+p) in format]
1840 if not found:
1840 if not found:
1841 date += "@" + defaults[part][usenow]
1841 date += "@" + defaults[part][usenow]
1842 format += "@%" + part[0]
1842 format += "@%" + part[0]
1843 else:
1843 else:
1844 # We've found a specific time element, less specific time
1844 # We've found a specific time element, less specific time
1845 # elements are relative to today
1845 # elements are relative to today
1846 usenow = True
1846 usenow = True
1847
1847
1848 timetuple = time.strptime(date, format)
1848 timetuple = time.strptime(date, format)
1849 localunixtime = int(calendar.timegm(timetuple))
1849 localunixtime = int(calendar.timegm(timetuple))
1850 if offset is None:
1850 if offset is None:
1851 # local timezone
1851 # local timezone
1852 unixtime = int(time.mktime(timetuple))
1852 unixtime = int(time.mktime(timetuple))
1853 offset = unixtime - localunixtime
1853 offset = unixtime - localunixtime
1854 else:
1854 else:
1855 unixtime = localunixtime + offset
1855 unixtime = localunixtime + offset
1856 return unixtime, offset
1856 return unixtime, offset
1857
1857
1858 def parsedate(date, formats=None, bias=None):
1858 def parsedate(date, formats=None, bias=None):
1859 """parse a localized date/time and return a (unixtime, offset) tuple.
1859 """parse a localized date/time and return a (unixtime, offset) tuple.
1860
1860
1861 The date may be a "unixtime offset" string or in one of the specified
1861 The date may be a "unixtime offset" string or in one of the specified
1862 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1862 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1863
1863
1864 >>> parsedate(' today ') == parsedate(\
1864 >>> parsedate(' today ') == parsedate(\
1865 datetime.date.today().strftime('%b %d'))
1865 datetime.date.today().strftime('%b %d'))
1866 True
1866 True
1867 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1867 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1868 datetime.timedelta(days=1)\
1868 datetime.timedelta(days=1)\
1869 ).strftime('%b %d'))
1869 ).strftime('%b %d'))
1870 True
1870 True
1871 >>> now, tz = makedate()
1871 >>> now, tz = makedate()
1872 >>> strnow, strtz = parsedate('now')
1872 >>> strnow, strtz = parsedate('now')
1873 >>> (strnow - now) < 1
1873 >>> (strnow - now) < 1
1874 True
1874 True
1875 >>> tz == strtz
1875 >>> tz == strtz
1876 True
1876 True
1877 """
1877 """
1878 if bias is None:
1878 if bias is None:
1879 bias = {}
1879 bias = {}
1880 if not date:
1880 if not date:
1881 return 0, 0
1881 return 0, 0
1882 if isinstance(date, tuple) and len(date) == 2:
1882 if isinstance(date, tuple) and len(date) == 2:
1883 return date
1883 return date
1884 if not formats:
1884 if not formats:
1885 formats = defaultdateformats
1885 formats = defaultdateformats
1886 date = date.strip()
1886 date = date.strip()
1887
1887
1888 if date == 'now' or date == _('now'):
1888 if date == 'now' or date == _('now'):
1889 return makedate()
1889 return makedate()
1890 if date == 'today' or date == _('today'):
1890 if date == 'today' or date == _('today'):
1891 date = datetime.date.today().strftime('%b %d')
1891 date = datetime.date.today().strftime('%b %d')
1892 elif date == 'yesterday' or date == _('yesterday'):
1892 elif date == 'yesterday' or date == _('yesterday'):
1893 date = (datetime.date.today() -
1893 date = (datetime.date.today() -
1894 datetime.timedelta(days=1)).strftime('%b %d')
1894 datetime.timedelta(days=1)).strftime('%b %d')
1895
1895
1896 try:
1896 try:
1897 when, offset = map(int, date.split(' '))
1897 when, offset = map(int, date.split(' '))
1898 except ValueError:
1898 except ValueError:
1899 # fill out defaults
1899 # fill out defaults
1900 now = makedate()
1900 now = makedate()
1901 defaults = {}
1901 defaults = {}
1902 for part in ("d", "mb", "yY", "HI", "M", "S"):
1902 for part in ("d", "mb", "yY", "HI", "M", "S"):
1903 # this piece is for rounding the specific end of unknowns
1903 # this piece is for rounding the specific end of unknowns
1904 b = bias.get(part)
1904 b = bias.get(part)
1905 if b is None:
1905 if b is None:
1906 if part[0] in "HMS":
1906 if part[0] in "HMS":
1907 b = "00"
1907 b = "00"
1908 else:
1908 else:
1909 b = "0"
1909 b = "0"
1910
1910
1911 # this piece is for matching the generic end to today's date
1911 # this piece is for matching the generic end to today's date
1912 n = datestr(now, "%" + part[0])
1912 n = datestr(now, "%" + part[0])
1913
1913
1914 defaults[part] = (b, n)
1914 defaults[part] = (b, n)
1915
1915
1916 for format in formats:
1916 for format in formats:
1917 try:
1917 try:
1918 when, offset = strdate(date, format, defaults)
1918 when, offset = strdate(date, format, defaults)
1919 except (ValueError, OverflowError):
1919 except (ValueError, OverflowError):
1920 pass
1920 pass
1921 else:
1921 else:
1922 break
1922 break
1923 else:
1923 else:
1924 raise Abort(_('invalid date: %r') % date)
1924 raise Abort(_('invalid date: %r') % date)
1925 # validate explicit (probably user-specified) date and
1925 # validate explicit (probably user-specified) date and
1926 # time zone offset. values must fit in signed 32 bits for
1926 # time zone offset. values must fit in signed 32 bits for
1927 # current 32-bit linux runtimes. timezones go from UTC-12
1927 # current 32-bit linux runtimes. timezones go from UTC-12
1928 # to UTC+14
1928 # to UTC+14
1929 if when < -0x80000000 or when > 0x7fffffff:
1929 if when < -0x80000000 or when > 0x7fffffff:
1930 raise Abort(_('date exceeds 32 bits: %d') % when)
1930 raise Abort(_('date exceeds 32 bits: %d') % when)
1931 if offset < -50400 or offset > 43200:
1931 if offset < -50400 or offset > 43200:
1932 raise Abort(_('impossible time zone offset: %d') % offset)
1932 raise Abort(_('impossible time zone offset: %d') % offset)
1933 return when, offset
1933 return when, offset
1934
1934
1935 def matchdate(date):
1935 def matchdate(date):
1936 """Return a function that matches a given date match specifier
1936 """Return a function that matches a given date match specifier
1937
1937
1938 Formats include:
1938 Formats include:
1939
1939
1940 '{date}' match a given date to the accuracy provided
1940 '{date}' match a given date to the accuracy provided
1941
1941
1942 '<{date}' on or before a given date
1942 '<{date}' on or before a given date
1943
1943
1944 '>{date}' on or after a given date
1944 '>{date}' on or after a given date
1945
1945
1946 >>> p1 = parsedate("10:29:59")
1946 >>> p1 = parsedate("10:29:59")
1947 >>> p2 = parsedate("10:30:00")
1947 >>> p2 = parsedate("10:30:00")
1948 >>> p3 = parsedate("10:30:59")
1948 >>> p3 = parsedate("10:30:59")
1949 >>> p4 = parsedate("10:31:00")
1949 >>> p4 = parsedate("10:31:00")
1950 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1950 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1951 >>> f = matchdate("10:30")
1951 >>> f = matchdate("10:30")
1952 >>> f(p1[0])
1952 >>> f(p1[0])
1953 False
1953 False
1954 >>> f(p2[0])
1954 >>> f(p2[0])
1955 True
1955 True
1956 >>> f(p3[0])
1956 >>> f(p3[0])
1957 True
1957 True
1958 >>> f(p4[0])
1958 >>> f(p4[0])
1959 False
1959 False
1960 >>> f(p5[0])
1960 >>> f(p5[0])
1961 False
1961 False
1962 """
1962 """
1963
1963
1964 def lower(date):
1964 def lower(date):
1965 d = {'mb': "1", 'd': "1"}
1965 d = {'mb': "1", 'd': "1"}
1966 return parsedate(date, extendeddateformats, d)[0]
1966 return parsedate(date, extendeddateformats, d)[0]
1967
1967
1968 def upper(date):
1968 def upper(date):
1969 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1969 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1970 for days in ("31", "30", "29"):
1970 for days in ("31", "30", "29"):
1971 try:
1971 try:
1972 d["d"] = days
1972 d["d"] = days
1973 return parsedate(date, extendeddateformats, d)[0]
1973 return parsedate(date, extendeddateformats, d)[0]
1974 except Abort:
1974 except Abort:
1975 pass
1975 pass
1976 d["d"] = "28"
1976 d["d"] = "28"
1977 return parsedate(date, extendeddateformats, d)[0]
1977 return parsedate(date, extendeddateformats, d)[0]
1978
1978
1979 date = date.strip()
1979 date = date.strip()
1980
1980
1981 if not date:
1981 if not date:
1982 raise Abort(_("dates cannot consist entirely of whitespace"))
1982 raise Abort(_("dates cannot consist entirely of whitespace"))
1983 elif date[0] == "<":
1983 elif date[0] == "<":
1984 if not date[1:]:
1984 if not date[1:]:
1985 raise Abort(_("invalid day spec, use '<DATE'"))
1985 raise Abort(_("invalid day spec, use '<DATE'"))
1986 when = upper(date[1:])
1986 when = upper(date[1:])
1987 return lambda x: x <= when
1987 return lambda x: x <= when
1988 elif date[0] == ">":
1988 elif date[0] == ">":
1989 if not date[1:]:
1989 if not date[1:]:
1990 raise Abort(_("invalid day spec, use '>DATE'"))
1990 raise Abort(_("invalid day spec, use '>DATE'"))
1991 when = lower(date[1:])
1991 when = lower(date[1:])
1992 return lambda x: x >= when
1992 return lambda x: x >= when
1993 elif date[0] == "-":
1993 elif date[0] == "-":
1994 try:
1994 try:
1995 days = int(date[1:])
1995 days = int(date[1:])
1996 except ValueError:
1996 except ValueError:
1997 raise Abort(_("invalid day spec: %s") % date[1:])
1997 raise Abort(_("invalid day spec: %s") % date[1:])
1998 if days < 0:
1998 if days < 0:
1999 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1999 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
2000 % date[1:])
2000 % date[1:])
2001 when = makedate()[0] - days * 3600 * 24
2001 when = makedate()[0] - days * 3600 * 24
2002 return lambda x: x >= when
2002 return lambda x: x >= when
2003 elif " to " in date:
2003 elif " to " in date:
2004 a, b = date.split(" to ")
2004 a, b = date.split(" to ")
2005 start, stop = lower(a), upper(b)
2005 start, stop = lower(a), upper(b)
2006 return lambda x: x >= start and x <= stop
2006 return lambda x: x >= start and x <= stop
2007 else:
2007 else:
2008 start, stop = lower(date), upper(date)
2008 start, stop = lower(date), upper(date)
2009 return lambda x: x >= start and x <= stop
2009 return lambda x: x >= start and x <= stop
2010
2010
2011 def stringmatcher(pattern, casesensitive=True):
2011 def stringmatcher(pattern, casesensitive=True):
2012 """
2012 """
2013 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2013 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2014 returns the matcher name, pattern, and matcher function.
2014 returns the matcher name, pattern, and matcher function.
2015 missing or unknown prefixes are treated as literal matches.
2015 missing or unknown prefixes are treated as literal matches.
2016
2016
2017 helper for tests:
2017 helper for tests:
2018 >>> def test(pattern, *tests):
2018 >>> def test(pattern, *tests):
2019 ... kind, pattern, matcher = stringmatcher(pattern)
2019 ... kind, pattern, matcher = stringmatcher(pattern)
2020 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2020 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2021 >>> def itest(pattern, *tests):
2021 >>> def itest(pattern, *tests):
2022 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2022 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2023 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2023 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2024
2024
2025 exact matching (no prefix):
2025 exact matching (no prefix):
2026 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2026 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2027 ('literal', 'abcdefg', [False, False, True])
2027 ('literal', 'abcdefg', [False, False, True])
2028
2028
2029 regex matching ('re:' prefix)
2029 regex matching ('re:' prefix)
2030 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2030 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2031 ('re', 'a.+b', [False, False, True])
2031 ('re', 'a.+b', [False, False, True])
2032
2032
2033 force exact matches ('literal:' prefix)
2033 force exact matches ('literal:' prefix)
2034 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2034 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2035 ('literal', 're:foobar', [False, True])
2035 ('literal', 're:foobar', [False, True])
2036
2036
2037 unknown prefixes are ignored and treated as literals
2037 unknown prefixes are ignored and treated as literals
2038 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2038 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2039 ('literal', 'foo:bar', [False, False, True])
2039 ('literal', 'foo:bar', [False, False, True])
2040
2040
2041 case insensitive regex matches
2041 case insensitive regex matches
2042 >>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
2042 >>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
2043 ('re', 'A.+b', [False, False, True])
2043 ('re', 'A.+b', [False, False, True])
2044
2044
2045 case insensitive literal matches
2045 case insensitive literal matches
2046 >>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
2046 >>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
2047 ('literal', 'ABCDEFG', [False, False, True])
2047 ('literal', 'ABCDEFG', [False, False, True])
2048 """
2048 """
2049 if pattern.startswith('re:'):
2049 if pattern.startswith('re:'):
2050 pattern = pattern[3:]
2050 pattern = pattern[3:]
2051 try:
2051 try:
2052 flags = 0
2052 flags = 0
2053 if not casesensitive:
2053 if not casesensitive:
2054 flags = remod.I
2054 flags = remod.I
2055 regex = remod.compile(pattern, flags)
2055 regex = remod.compile(pattern, flags)
2056 except remod.error as e:
2056 except remod.error as e:
2057 raise error.ParseError(_('invalid regular expression: %s')
2057 raise error.ParseError(_('invalid regular expression: %s')
2058 % e)
2058 % e)
2059 return 're', pattern, regex.search
2059 return 're', pattern, regex.search
2060 elif pattern.startswith('literal:'):
2060 elif pattern.startswith('literal:'):
2061 pattern = pattern[8:]
2061 pattern = pattern[8:]
2062
2062
2063 match = pattern.__eq__
2063 match = pattern.__eq__
2064
2064
2065 if not casesensitive:
2065 if not casesensitive:
2066 ipat = encoding.lower(pattern)
2066 ipat = encoding.lower(pattern)
2067 match = lambda s: ipat == encoding.lower(s)
2067 match = lambda s: ipat == encoding.lower(s)
2068 return 'literal', pattern, match
2068 return 'literal', pattern, match
2069
2069
2070 def shortuser(user):
2070 def shortuser(user):
2071 """Return a short representation of a user name or email address."""
2071 """Return a short representation of a user name or email address."""
2072 f = user.find('@')
2072 f = user.find('@')
2073 if f >= 0:
2073 if f >= 0:
2074 user = user[:f]
2074 user = user[:f]
2075 f = user.find('<')
2075 f = user.find('<')
2076 if f >= 0:
2076 if f >= 0:
2077 user = user[f + 1:]
2077 user = user[f + 1:]
2078 f = user.find(' ')
2078 f = user.find(' ')
2079 if f >= 0:
2079 if f >= 0:
2080 user = user[:f]
2080 user = user[:f]
2081 f = user.find('.')
2081 f = user.find('.')
2082 if f >= 0:
2082 if f >= 0:
2083 user = user[:f]
2083 user = user[:f]
2084 return user
2084 return user
2085
2085
2086 def emailuser(user):
2086 def emailuser(user):
2087 """Return the user portion of an email address."""
2087 """Return the user portion of an email address."""
2088 f = user.find('@')
2088 f = user.find('@')
2089 if f >= 0:
2089 if f >= 0:
2090 user = user[:f]
2090 user = user[:f]
2091 f = user.find('<')
2091 f = user.find('<')
2092 if f >= 0:
2092 if f >= 0:
2093 user = user[f + 1:]
2093 user = user[f + 1:]
2094 return user
2094 return user
2095
2095
2096 def email(author):
2096 def email(author):
2097 '''get email of author.'''
2097 '''get email of author.'''
2098 r = author.find('>')
2098 r = author.find('>')
2099 if r == -1:
2099 if r == -1:
2100 r = None
2100 r = None
2101 return author[author.find('<') + 1:r]
2101 return author[author.find('<') + 1:r]
2102
2102
2103 def ellipsis(text, maxlength=400):
2103 def ellipsis(text, maxlength=400):
2104 """Trim string to at most maxlength (default: 400) columns in display."""
2104 """Trim string to at most maxlength (default: 400) columns in display."""
2105 return encoding.trim(text, maxlength, ellipsis='...')
2105 return encoding.trim(text, maxlength, ellipsis='...')
2106
2106
2107 def unitcountfn(*unittable):
2107 def unitcountfn(*unittable):
2108 '''return a function that renders a readable count of some quantity'''
2108 '''return a function that renders a readable count of some quantity'''
2109
2109
2110 def go(count):
2110 def go(count):
2111 for multiplier, divisor, format in unittable:
2111 for multiplier, divisor, format in unittable:
2112 if count >= divisor * multiplier:
2112 if count >= divisor * multiplier:
2113 return format % (count / float(divisor))
2113 return format % (count / float(divisor))
2114 return unittable[-1][2] % count
2114 return unittable[-1][2] % count
2115
2115
2116 return go
2116 return go
2117
2117
2118 bytecount = unitcountfn(
2118 bytecount = unitcountfn(
2119 (100, 1 << 30, _('%.0f GB')),
2119 (100, 1 << 30, _('%.0f GB')),
2120 (10, 1 << 30, _('%.1f GB')),
2120 (10, 1 << 30, _('%.1f GB')),
2121 (1, 1 << 30, _('%.2f GB')),
2121 (1, 1 << 30, _('%.2f GB')),
2122 (100, 1 << 20, _('%.0f MB')),
2122 (100, 1 << 20, _('%.0f MB')),
2123 (10, 1 << 20, _('%.1f MB')),
2123 (10, 1 << 20, _('%.1f MB')),
2124 (1, 1 << 20, _('%.2f MB')),
2124 (1, 1 << 20, _('%.2f MB')),
2125 (100, 1 << 10, _('%.0f KB')),
2125 (100, 1 << 10, _('%.0f KB')),
2126 (10, 1 << 10, _('%.1f KB')),
2126 (10, 1 << 10, _('%.1f KB')),
2127 (1, 1 << 10, _('%.2f KB')),
2127 (1, 1 << 10, _('%.2f KB')),
2128 (1, 1, _('%.0f bytes')),
2128 (1, 1, _('%.0f bytes')),
2129 )
2129 )
2130
2130
2131 def uirepr(s):
2131 def uirepr(s):
2132 # Avoid double backslash in Windows path repr()
2132 # Avoid double backslash in Windows path repr()
2133 return repr(s).replace('\\\\', '\\')
2133 return repr(s).replace('\\\\', '\\')
2134
2134
2135 # delay import of textwrap
2135 # delay import of textwrap
2136 def MBTextWrapper(**kwargs):
2136 def MBTextWrapper(**kwargs):
2137 class tw(textwrap.TextWrapper):
2137 class tw(textwrap.TextWrapper):
2138 """
2138 """
2139 Extend TextWrapper for width-awareness.
2139 Extend TextWrapper for width-awareness.
2140
2140
2141 Neither number of 'bytes' in any encoding nor 'characters' is
2141 Neither number of 'bytes' in any encoding nor 'characters' is
2142 appropriate to calculate terminal columns for specified string.
2142 appropriate to calculate terminal columns for specified string.
2143
2143
2144 Original TextWrapper implementation uses built-in 'len()' directly,
2144 Original TextWrapper implementation uses built-in 'len()' directly,
2145 so overriding is needed to use width information of each characters.
2145 so overriding is needed to use width information of each characters.
2146
2146
2147 In addition, characters classified into 'ambiguous' width are
2147 In addition, characters classified into 'ambiguous' width are
2148 treated as wide in East Asian area, but as narrow in other.
2148 treated as wide in East Asian area, but as narrow in other.
2149
2149
2150 This requires use decision to determine width of such characters.
2150 This requires use decision to determine width of such characters.
2151 """
2151 """
2152 def _cutdown(self, ucstr, space_left):
2152 def _cutdown(self, ucstr, space_left):
2153 l = 0
2153 l = 0
2154 colwidth = encoding.ucolwidth
2154 colwidth = encoding.ucolwidth
2155 for i in xrange(len(ucstr)):
2155 for i in xrange(len(ucstr)):
2156 l += colwidth(ucstr[i])
2156 l += colwidth(ucstr[i])
2157 if space_left < l:
2157 if space_left < l:
2158 return (ucstr[:i], ucstr[i:])
2158 return (ucstr[:i], ucstr[i:])
2159 return ucstr, ''
2159 return ucstr, ''
2160
2160
2161 # overriding of base class
2161 # overriding of base class
2162 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2162 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2163 space_left = max(width - cur_len, 1)
2163 space_left = max(width - cur_len, 1)
2164
2164
2165 if self.break_long_words:
2165 if self.break_long_words:
2166 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2166 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2167 cur_line.append(cut)
2167 cur_line.append(cut)
2168 reversed_chunks[-1] = res
2168 reversed_chunks[-1] = res
2169 elif not cur_line:
2169 elif not cur_line:
2170 cur_line.append(reversed_chunks.pop())
2170 cur_line.append(reversed_chunks.pop())
2171
2171
2172 # this overriding code is imported from TextWrapper of Python 2.6
2172 # this overriding code is imported from TextWrapper of Python 2.6
2173 # to calculate columns of string by 'encoding.ucolwidth()'
2173 # to calculate columns of string by 'encoding.ucolwidth()'
2174 def _wrap_chunks(self, chunks):
2174 def _wrap_chunks(self, chunks):
2175 colwidth = encoding.ucolwidth
2175 colwidth = encoding.ucolwidth
2176
2176
2177 lines = []
2177 lines = []
2178 if self.width <= 0:
2178 if self.width <= 0:
2179 raise ValueError("invalid width %r (must be > 0)" % self.width)
2179 raise ValueError("invalid width %r (must be > 0)" % self.width)
2180
2180
2181 # Arrange in reverse order so items can be efficiently popped
2181 # Arrange in reverse order so items can be efficiently popped
2182 # from a stack of chucks.
2182 # from a stack of chucks.
2183 chunks.reverse()
2183 chunks.reverse()
2184
2184
2185 while chunks:
2185 while chunks:
2186
2186
2187 # Start the list of chunks that will make up the current line.
2187 # Start the list of chunks that will make up the current line.
2188 # cur_len is just the length of all the chunks in cur_line.
2188 # cur_len is just the length of all the chunks in cur_line.
2189 cur_line = []
2189 cur_line = []
2190 cur_len = 0
2190 cur_len = 0
2191
2191
2192 # Figure out which static string will prefix this line.
2192 # Figure out which static string will prefix this line.
2193 if lines:
2193 if lines:
2194 indent = self.subsequent_indent
2194 indent = self.subsequent_indent
2195 else:
2195 else:
2196 indent = self.initial_indent
2196 indent = self.initial_indent
2197
2197
2198 # Maximum width for this line.
2198 # Maximum width for this line.
2199 width = self.width - len(indent)
2199 width = self.width - len(indent)
2200
2200
2201 # First chunk on line is whitespace -- drop it, unless this
2201 # First chunk on line is whitespace -- drop it, unless this
2202 # is the very beginning of the text (i.e. no lines started yet).
2202 # is the very beginning of the text (i.e. no lines started yet).
2203 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2203 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2204 del chunks[-1]
2204 del chunks[-1]
2205
2205
2206 while chunks:
2206 while chunks:
2207 l = colwidth(chunks[-1])
2207 l = colwidth(chunks[-1])
2208
2208
2209 # Can at least squeeze this chunk onto the current line.
2209 # Can at least squeeze this chunk onto the current line.
2210 if cur_len + l <= width:
2210 if cur_len + l <= width:
2211 cur_line.append(chunks.pop())
2211 cur_line.append(chunks.pop())
2212 cur_len += l
2212 cur_len += l
2213
2213
2214 # Nope, this line is full.
2214 # Nope, this line is full.
2215 else:
2215 else:
2216 break
2216 break
2217
2217
2218 # The current line is full, and the next chunk is too big to
2218 # The current line is full, and the next chunk is too big to
2219 # fit on *any* line (not just this one).
2219 # fit on *any* line (not just this one).
2220 if chunks and colwidth(chunks[-1]) > width:
2220 if chunks and colwidth(chunks[-1]) > width:
2221 self._handle_long_word(chunks, cur_line, cur_len, width)
2221 self._handle_long_word(chunks, cur_line, cur_len, width)
2222
2222
2223 # If the last chunk on this line is all whitespace, drop it.
2223 # If the last chunk on this line is all whitespace, drop it.
2224 if (self.drop_whitespace and
2224 if (self.drop_whitespace and
2225 cur_line and cur_line[-1].strip() == ''):
2225 cur_line and cur_line[-1].strip() == ''):
2226 del cur_line[-1]
2226 del cur_line[-1]
2227
2227
2228 # Convert current line back to a string and store it in list
2228 # Convert current line back to a string and store it in list
2229 # of all lines (return value).
2229 # of all lines (return value).
2230 if cur_line:
2230 if cur_line:
2231 lines.append(indent + ''.join(cur_line))
2231 lines.append(indent + ''.join(cur_line))
2232
2232
2233 return lines
2233 return lines
2234
2234
2235 global MBTextWrapper
2235 global MBTextWrapper
2236 MBTextWrapper = tw
2236 MBTextWrapper = tw
2237 return tw(**kwargs)
2237 return tw(**kwargs)
2238
2238
2239 def wrap(line, width, initindent='', hangindent=''):
2239 def wrap(line, width, initindent='', hangindent=''):
2240 maxindent = max(len(hangindent), len(initindent))
2240 maxindent = max(len(hangindent), len(initindent))
2241 if width <= maxindent:
2241 if width <= maxindent:
2242 # adjust for weird terminal size
2242 # adjust for weird terminal size
2243 width = max(78, maxindent + 1)
2243 width = max(78, maxindent + 1)
2244 line = line.decode(encoding.encoding, encoding.encodingmode)
2244 line = line.decode(encoding.encoding, encoding.encodingmode)
2245 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2245 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2246 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2246 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2247 wrapper = MBTextWrapper(width=width,
2247 wrapper = MBTextWrapper(width=width,
2248 initial_indent=initindent,
2248 initial_indent=initindent,
2249 subsequent_indent=hangindent)
2249 subsequent_indent=hangindent)
2250 return wrapper.fill(line).encode(encoding.encoding)
2250 return wrapper.fill(line).encode(encoding.encoding)
2251
2251
2252 if (pyplatform.python_implementation() == 'CPython' and
2252 if (pyplatform.python_implementation() == 'CPython' and
2253 sys.version_info < (3, 0)):
2253 sys.version_info < (3, 0)):
2254 # There is an issue in CPython that some IO methods do not handle EINTR
2254 # There is an issue in CPython that some IO methods do not handle EINTR
2255 # correctly. The following table shows what CPython version (and functions)
2255 # correctly. The following table shows what CPython version (and functions)
2256 # are affected (buggy: has the EINTR bug, okay: otherwise):
2256 # are affected (buggy: has the EINTR bug, okay: otherwise):
2257 #
2257 #
2258 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2258 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2259 # --------------------------------------------------
2259 # --------------------------------------------------
2260 # fp.__iter__ | buggy | buggy | okay
2260 # fp.__iter__ | buggy | buggy | okay
2261 # fp.read* | buggy | okay [1] | okay
2261 # fp.read* | buggy | okay [1] | okay
2262 #
2262 #
2263 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2263 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2264 #
2264 #
2265 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2265 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2266 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2266 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2267 #
2267 #
2268 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2268 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2269 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2269 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2270 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2270 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2271 # fp.__iter__ but not other fp.read* methods.
2271 # fp.__iter__ but not other fp.read* methods.
2272 #
2272 #
2273 # On modern systems like Linux, the "read" syscall cannot be interrupted
2273 # On modern systems like Linux, the "read" syscall cannot be interrupted
2274 # when reading "fast" files like on-disk files. So the EINTR issue only
2274 # when reading "fast" files like on-disk files. So the EINTR issue only
2275 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2275 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2276 # files approximately as "fast" files and use the fast (unsafe) code path,
2276 # files approximately as "fast" files and use the fast (unsafe) code path,
2277 # to minimize the performance impact.
2277 # to minimize the performance impact.
2278 if sys.version_info >= (2, 7, 4):
2278 if sys.version_info >= (2, 7, 4):
2279 # fp.readline deals with EINTR correctly, use it as a workaround.
2279 # fp.readline deals with EINTR correctly, use it as a workaround.
2280 def _safeiterfile(fp):
2280 def _safeiterfile(fp):
2281 return iter(fp.readline, '')
2281 return iter(fp.readline, '')
2282 else:
2282 else:
2283 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2283 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2284 # note: this may block longer than necessary because of bufsize.
2284 # note: this may block longer than necessary because of bufsize.
2285 def _safeiterfile(fp, bufsize=4096):
2285 def _safeiterfile(fp, bufsize=4096):
2286 fd = fp.fileno()
2286 fd = fp.fileno()
2287 line = ''
2287 line = ''
2288 while True:
2288 while True:
2289 try:
2289 try:
2290 buf = os.read(fd, bufsize)
2290 buf = os.read(fd, bufsize)
2291 except OSError as ex:
2291 except OSError as ex:
2292 # os.read only raises EINTR before any data is read
2292 # os.read only raises EINTR before any data is read
2293 if ex.errno == errno.EINTR:
2293 if ex.errno == errno.EINTR:
2294 continue
2294 continue
2295 else:
2295 else:
2296 raise
2296 raise
2297 line += buf
2297 line += buf
2298 if '\n' in buf:
2298 if '\n' in buf:
2299 splitted = line.splitlines(True)
2299 splitted = line.splitlines(True)
2300 line = ''
2300 line = ''
2301 for l in splitted:
2301 for l in splitted:
2302 if l[-1] == '\n':
2302 if l[-1] == '\n':
2303 yield l
2303 yield l
2304 else:
2304 else:
2305 line = l
2305 line = l
2306 if not buf:
2306 if not buf:
2307 break
2307 break
2308 if line:
2308 if line:
2309 yield line
2309 yield line
2310
2310
2311 def iterfile(fp):
2311 def iterfile(fp):
2312 fastpath = True
2312 fastpath = True
2313 if type(fp) is file:
2313 if type(fp) is file:
2314 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2314 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2315 if fastpath:
2315 if fastpath:
2316 return fp
2316 return fp
2317 else:
2317 else:
2318 return _safeiterfile(fp)
2318 return _safeiterfile(fp)
2319 else:
2319 else:
2320 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2320 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2321 def iterfile(fp):
2321 def iterfile(fp):
2322 return fp
2322 return fp
2323
2323
2324 def iterlines(iterator):
2324 def iterlines(iterator):
2325 for chunk in iterator:
2325 for chunk in iterator:
2326 for line in chunk.splitlines():
2326 for line in chunk.splitlines():
2327 yield line
2327 yield line
2328
2328
2329 def expandpath(path):
2329 def expandpath(path):
2330 return os.path.expanduser(os.path.expandvars(path))
2330 return os.path.expanduser(os.path.expandvars(path))
2331
2331
2332 def hgcmd():
2332 def hgcmd():
2333 """Return the command used to execute current hg
2333 """Return the command used to execute current hg
2334
2334
2335 This is different from hgexecutable() because on Windows we want
2335 This is different from hgexecutable() because on Windows we want
2336 to avoid things opening new shell windows like batch files, so we
2336 to avoid things opening new shell windows like batch files, so we
2337 get either the python call or current executable.
2337 get either the python call or current executable.
2338 """
2338 """
2339 if mainfrozen():
2339 if mainfrozen():
2340 if getattr(sys, 'frozen', None) == 'macosx_app':
2340 if getattr(sys, 'frozen', None) == 'macosx_app':
2341 # Env variable set by py2app
2341 # Env variable set by py2app
2342 return [encoding.environ['EXECUTABLEPATH']]
2342 return [encoding.environ['EXECUTABLEPATH']]
2343 else:
2343 else:
2344 return [pycompat.sysexecutable]
2344 return [pycompat.sysexecutable]
2345 return gethgcmd()
2345 return gethgcmd()
2346
2346
2347 def rundetached(args, condfn):
2347 def rundetached(args, condfn):
2348 """Execute the argument list in a detached process.
2348 """Execute the argument list in a detached process.
2349
2349
2350 condfn is a callable which is called repeatedly and should return
2350 condfn is a callable which is called repeatedly and should return
2351 True once the child process is known to have started successfully.
2351 True once the child process is known to have started successfully.
2352 At this point, the child process PID is returned. If the child
2352 At this point, the child process PID is returned. If the child
2353 process fails to start or finishes before condfn() evaluates to
2353 process fails to start or finishes before condfn() evaluates to
2354 True, return -1.
2354 True, return -1.
2355 """
2355 """
2356 # Windows case is easier because the child process is either
2356 # Windows case is easier because the child process is either
2357 # successfully starting and validating the condition or exiting
2357 # successfully starting and validating the condition or exiting
2358 # on failure. We just poll on its PID. On Unix, if the child
2358 # on failure. We just poll on its PID. On Unix, if the child
2359 # process fails to start, it will be left in a zombie state until
2359 # process fails to start, it will be left in a zombie state until
2360 # the parent wait on it, which we cannot do since we expect a long
2360 # the parent wait on it, which we cannot do since we expect a long
2361 # running process on success. Instead we listen for SIGCHLD telling
2361 # running process on success. Instead we listen for SIGCHLD telling
2362 # us our child process terminated.
2362 # us our child process terminated.
2363 terminated = set()
2363 terminated = set()
2364 def handler(signum, frame):
2364 def handler(signum, frame):
2365 terminated.add(os.wait())
2365 terminated.add(os.wait())
2366 prevhandler = None
2366 prevhandler = None
2367 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2367 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2368 if SIGCHLD is not None:
2368 if SIGCHLD is not None:
2369 prevhandler = signal.signal(SIGCHLD, handler)
2369 prevhandler = signal.signal(SIGCHLD, handler)
2370 try:
2370 try:
2371 pid = spawndetached(args)
2371 pid = spawndetached(args)
2372 while not condfn():
2372 while not condfn():
2373 if ((pid in terminated or not testpid(pid))
2373 if ((pid in terminated or not testpid(pid))
2374 and not condfn()):
2374 and not condfn()):
2375 return -1
2375 return -1
2376 time.sleep(0.1)
2376 time.sleep(0.1)
2377 return pid
2377 return pid
2378 finally:
2378 finally:
2379 if prevhandler is not None:
2379 if prevhandler is not None:
2380 signal.signal(signal.SIGCHLD, prevhandler)
2380 signal.signal(signal.SIGCHLD, prevhandler)
2381
2381
2382 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2382 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2383 """Return the result of interpolating items in the mapping into string s.
2383 """Return the result of interpolating items in the mapping into string s.
2384
2384
2385 prefix is a single character string, or a two character string with
2385 prefix is a single character string, or a two character string with
2386 a backslash as the first character if the prefix needs to be escaped in
2386 a backslash as the first character if the prefix needs to be escaped in
2387 a regular expression.
2387 a regular expression.
2388
2388
2389 fn is an optional function that will be applied to the replacement text
2389 fn is an optional function that will be applied to the replacement text
2390 just before replacement.
2390 just before replacement.
2391
2391
2392 escape_prefix is an optional flag that allows using doubled prefix for
2392 escape_prefix is an optional flag that allows using doubled prefix for
2393 its escaping.
2393 its escaping.
2394 """
2394 """
2395 fn = fn or (lambda s: s)
2395 fn = fn or (lambda s: s)
2396 patterns = '|'.join(mapping.keys())
2396 patterns = '|'.join(mapping.keys())
2397 if escape_prefix:
2397 if escape_prefix:
2398 patterns += '|' + prefix
2398 patterns += '|' + prefix
2399 if len(prefix) > 1:
2399 if len(prefix) > 1:
2400 prefix_char = prefix[1:]
2400 prefix_char = prefix[1:]
2401 else:
2401 else:
2402 prefix_char = prefix
2402 prefix_char = prefix
2403 mapping[prefix_char] = prefix_char
2403 mapping[prefix_char] = prefix_char
2404 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2404 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2405 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2405 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2406
2406
2407 def getport(port):
2407 def getport(port):
2408 """Return the port for a given network service.
2408 """Return the port for a given network service.
2409
2409
2410 If port is an integer, it's returned as is. If it's a string, it's
2410 If port is an integer, it's returned as is. If it's a string, it's
2411 looked up using socket.getservbyname(). If there's no matching
2411 looked up using socket.getservbyname(). If there's no matching
2412 service, error.Abort is raised.
2412 service, error.Abort is raised.
2413 """
2413 """
2414 try:
2414 try:
2415 return int(port)
2415 return int(port)
2416 except ValueError:
2416 except ValueError:
2417 pass
2417 pass
2418
2418
2419 try:
2419 try:
2420 return socket.getservbyname(port)
2420 return socket.getservbyname(port)
2421 except socket.error:
2421 except socket.error:
2422 raise Abort(_("no port number associated with service '%s'") % port)
2422 raise Abort(_("no port number associated with service '%s'") % port)
2423
2423
2424 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2424 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2425 '0': False, 'no': False, 'false': False, 'off': False,
2425 '0': False, 'no': False, 'false': False, 'off': False,
2426 'never': False}
2426 'never': False}
2427
2427
2428 def parsebool(s):
2428 def parsebool(s):
2429 """Parse s into a boolean.
2429 """Parse s into a boolean.
2430
2430
2431 If s is not a valid boolean, returns None.
2431 If s is not a valid boolean, returns None.
2432 """
2432 """
2433 return _booleans.get(s.lower(), None)
2433 return _booleans.get(s.lower(), None)
2434
2434
2435 _hextochr = dict((a + b, chr(int(a + b, 16)))
2435 _hextochr = dict((a + b, chr(int(a + b, 16)))
2436 for a in string.hexdigits for b in string.hexdigits)
2436 for a in string.hexdigits for b in string.hexdigits)
2437
2437
2438 class url(object):
2438 class url(object):
2439 r"""Reliable URL parser.
2439 r"""Reliable URL parser.
2440
2440
2441 This parses URLs and provides attributes for the following
2441 This parses URLs and provides attributes for the following
2442 components:
2442 components:
2443
2443
2444 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2444 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2445
2445
2446 Missing components are set to None. The only exception is
2446 Missing components are set to None. The only exception is
2447 fragment, which is set to '' if present but empty.
2447 fragment, which is set to '' if present but empty.
2448
2448
2449 If parsefragment is False, fragment is included in query. If
2449 If parsefragment is False, fragment is included in query. If
2450 parsequery is False, query is included in path. If both are
2450 parsequery is False, query is included in path. If both are
2451 False, both fragment and query are included in path.
2451 False, both fragment and query are included in path.
2452
2452
2453 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2453 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2454
2454
2455 Note that for backward compatibility reasons, bundle URLs do not
2455 Note that for backward compatibility reasons, bundle URLs do not
2456 take host names. That means 'bundle://../' has a path of '../'.
2456 take host names. That means 'bundle://../' has a path of '../'.
2457
2457
2458 Examples:
2458 Examples:
2459
2459
2460 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2460 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2461 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2461 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2462 >>> url('ssh://[::1]:2200//home/joe/repo')
2462 >>> url('ssh://[::1]:2200//home/joe/repo')
2463 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2463 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2464 >>> url('file:///home/joe/repo')
2464 >>> url('file:///home/joe/repo')
2465 <url scheme: 'file', path: '/home/joe/repo'>
2465 <url scheme: 'file', path: '/home/joe/repo'>
2466 >>> url('file:///c:/temp/foo/')
2466 >>> url('file:///c:/temp/foo/')
2467 <url scheme: 'file', path: 'c:/temp/foo/'>
2467 <url scheme: 'file', path: 'c:/temp/foo/'>
2468 >>> url('bundle:foo')
2468 >>> url('bundle:foo')
2469 <url scheme: 'bundle', path: 'foo'>
2469 <url scheme: 'bundle', path: 'foo'>
2470 >>> url('bundle://../foo')
2470 >>> url('bundle://../foo')
2471 <url scheme: 'bundle', path: '../foo'>
2471 <url scheme: 'bundle', path: '../foo'>
2472 >>> url(r'c:\foo\bar')
2472 >>> url(r'c:\foo\bar')
2473 <url path: 'c:\\foo\\bar'>
2473 <url path: 'c:\\foo\\bar'>
2474 >>> url(r'\\blah\blah\blah')
2474 >>> url(r'\\blah\blah\blah')
2475 <url path: '\\\\blah\\blah\\blah'>
2475 <url path: '\\\\blah\\blah\\blah'>
2476 >>> url(r'\\blah\blah\blah#baz')
2476 >>> url(r'\\blah\blah\blah#baz')
2477 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2477 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2478 >>> url(r'file:///C:\users\me')
2478 >>> url(r'file:///C:\users\me')
2479 <url scheme: 'file', path: 'C:\\users\\me'>
2479 <url scheme: 'file', path: 'C:\\users\\me'>
2480
2480
2481 Authentication credentials:
2481 Authentication credentials:
2482
2482
2483 >>> url('ssh://joe:xyz@x/repo')
2483 >>> url('ssh://joe:xyz@x/repo')
2484 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2484 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2485 >>> url('ssh://joe@x/repo')
2485 >>> url('ssh://joe@x/repo')
2486 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2486 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2487
2487
2488 Query strings and fragments:
2488 Query strings and fragments:
2489
2489
2490 >>> url('http://host/a?b#c')
2490 >>> url('http://host/a?b#c')
2491 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2491 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2492 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2492 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2493 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2493 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2494
2494
2495 Empty path:
2495 Empty path:
2496
2496
2497 >>> url('')
2497 >>> url('')
2498 <url path: ''>
2498 <url path: ''>
2499 >>> url('#a')
2499 >>> url('#a')
2500 <url path: '', fragment: 'a'>
2500 <url path: '', fragment: 'a'>
2501 >>> url('http://host/')
2501 >>> url('http://host/')
2502 <url scheme: 'http', host: 'host', path: ''>
2502 <url scheme: 'http', host: 'host', path: ''>
2503 >>> url('http://host/#a')
2503 >>> url('http://host/#a')
2504 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2504 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2505
2505
2506 Only scheme:
2506 Only scheme:
2507
2507
2508 >>> url('http:')
2508 >>> url('http:')
2509 <url scheme: 'http'>
2509 <url scheme: 'http'>
2510 """
2510 """
2511
2511
2512 _safechars = "!~*'()+"
2512 _safechars = "!~*'()+"
2513 _safepchars = "/!~*'()+:\\"
2513 _safepchars = "/!~*'()+:\\"
2514 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2514 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2515
2515
2516 def __init__(self, path, parsequery=True, parsefragment=True):
2516 def __init__(self, path, parsequery=True, parsefragment=True):
2517 # We slowly chomp away at path until we have only the path left
2517 # We slowly chomp away at path until we have only the path left
2518 self.scheme = self.user = self.passwd = self.host = None
2518 self.scheme = self.user = self.passwd = self.host = None
2519 self.port = self.path = self.query = self.fragment = None
2519 self.port = self.path = self.query = self.fragment = None
2520 self._localpath = True
2520 self._localpath = True
2521 self._hostport = ''
2521 self._hostport = ''
2522 self._origpath = path
2522 self._origpath = path
2523
2523
2524 if parsefragment and '#' in path:
2524 if parsefragment and '#' in path:
2525 path, self.fragment = path.split('#', 1)
2525 path, self.fragment = path.split('#', 1)
2526
2526
2527 # special case for Windows drive letters and UNC paths
2527 # special case for Windows drive letters and UNC paths
2528 if hasdriveletter(path) or path.startswith('\\\\'):
2528 if hasdriveletter(path) or path.startswith('\\\\'):
2529 self.path = path
2529 self.path = path
2530 return
2530 return
2531
2531
2532 # For compatibility reasons, we can't handle bundle paths as
2532 # For compatibility reasons, we can't handle bundle paths as
2533 # normal URLS
2533 # normal URLS
2534 if path.startswith('bundle:'):
2534 if path.startswith('bundle:'):
2535 self.scheme = 'bundle'
2535 self.scheme = 'bundle'
2536 path = path[7:]
2536 path = path[7:]
2537 if path.startswith('//'):
2537 if path.startswith('//'):
2538 path = path[2:]
2538 path = path[2:]
2539 self.path = path
2539 self.path = path
2540 return
2540 return
2541
2541
2542 if self._matchscheme(path):
2542 if self._matchscheme(path):
2543 parts = path.split(':', 1)
2543 parts = path.split(':', 1)
2544 if parts[0]:
2544 if parts[0]:
2545 self.scheme, path = parts
2545 self.scheme, path = parts
2546 self._localpath = False
2546 self._localpath = False
2547
2547
2548 if not path:
2548 if not path:
2549 path = None
2549 path = None
2550 if self._localpath:
2550 if self._localpath:
2551 self.path = ''
2551 self.path = ''
2552 return
2552 return
2553 else:
2553 else:
2554 if self._localpath:
2554 if self._localpath:
2555 self.path = path
2555 self.path = path
2556 return
2556 return
2557
2557
2558 if parsequery and '?' in path:
2558 if parsequery and '?' in path:
2559 path, self.query = path.split('?', 1)
2559 path, self.query = path.split('?', 1)
2560 if not path:
2560 if not path:
2561 path = None
2561 path = None
2562 if not self.query:
2562 if not self.query:
2563 self.query = None
2563 self.query = None
2564
2564
2565 # // is required to specify a host/authority
2565 # // is required to specify a host/authority
2566 if path and path.startswith('//'):
2566 if path and path.startswith('//'):
2567 parts = path[2:].split('/', 1)
2567 parts = path[2:].split('/', 1)
2568 if len(parts) > 1:
2568 if len(parts) > 1:
2569 self.host, path = parts
2569 self.host, path = parts
2570 else:
2570 else:
2571 self.host = parts[0]
2571 self.host = parts[0]
2572 path = None
2572 path = None
2573 if not self.host:
2573 if not self.host:
2574 self.host = None
2574 self.host = None
2575 # path of file:///d is /d
2575 # path of file:///d is /d
2576 # path of file:///d:/ is d:/, not /d:/
2576 # path of file:///d:/ is d:/, not /d:/
2577 if path and not hasdriveletter(path):
2577 if path and not hasdriveletter(path):
2578 path = '/' + path
2578 path = '/' + path
2579
2579
2580 if self.host and '@' in self.host:
2580 if self.host and '@' in self.host:
2581 self.user, self.host = self.host.rsplit('@', 1)
2581 self.user, self.host = self.host.rsplit('@', 1)
2582 if ':' in self.user:
2582 if ':' in self.user:
2583 self.user, self.passwd = self.user.split(':', 1)
2583 self.user, self.passwd = self.user.split(':', 1)
2584 if not self.host:
2584 if not self.host:
2585 self.host = None
2585 self.host = None
2586
2586
2587 # Don't split on colons in IPv6 addresses without ports
2587 # Don't split on colons in IPv6 addresses without ports
2588 if (self.host and ':' in self.host and
2588 if (self.host and ':' in self.host and
2589 not (self.host.startswith('[') and self.host.endswith(']'))):
2589 not (self.host.startswith('[') and self.host.endswith(']'))):
2590 self._hostport = self.host
2590 self._hostport = self.host
2591 self.host, self.port = self.host.rsplit(':', 1)
2591 self.host, self.port = self.host.rsplit(':', 1)
2592 if not self.host:
2592 if not self.host:
2593 self.host = None
2593 self.host = None
2594
2594
2595 if (self.host and self.scheme == 'file' and
2595 if (self.host and self.scheme == 'file' and
2596 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2596 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2597 raise Abort(_('file:// URLs can only refer to localhost'))
2597 raise Abort(_('file:// URLs can only refer to localhost'))
2598
2598
2599 self.path = path
2599 self.path = path
2600
2600
2601 # leave the query string escaped
2601 # leave the query string escaped
2602 for a in ('user', 'passwd', 'host', 'port',
2602 for a in ('user', 'passwd', 'host', 'port',
2603 'path', 'fragment'):
2603 'path', 'fragment'):
2604 v = getattr(self, a)
2604 v = getattr(self, a)
2605 if v is not None:
2605 if v is not None:
2606 setattr(self, a, pycompat.urlunquote(v))
2606 setattr(self, a, pycompat.urlunquote(v))
2607
2607
2608 def __repr__(self):
2608 def __repr__(self):
2609 attrs = []
2609 attrs = []
2610 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2610 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2611 'query', 'fragment'):
2611 'query', 'fragment'):
2612 v = getattr(self, a)
2612 v = getattr(self, a)
2613 if v is not None:
2613 if v is not None:
2614 attrs.append('%s: %r' % (a, v))
2614 attrs.append('%s: %r' % (a, v))
2615 return '<url %s>' % ', '.join(attrs)
2615 return '<url %s>' % ', '.join(attrs)
2616
2616
2617 def __str__(self):
2617 def __str__(self):
2618 r"""Join the URL's components back into a URL string.
2618 r"""Join the URL's components back into a URL string.
2619
2619
2620 Examples:
2620 Examples:
2621
2621
2622 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2622 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2623 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2623 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2624 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2624 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2625 'http://user:pw@host:80/?foo=bar&baz=42'
2625 'http://user:pw@host:80/?foo=bar&baz=42'
2626 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2626 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2627 'http://user:pw@host:80/?foo=bar%3dbaz'
2627 'http://user:pw@host:80/?foo=bar%3dbaz'
2628 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2628 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2629 'ssh://user:pw@[::1]:2200//home/joe#'
2629 'ssh://user:pw@[::1]:2200//home/joe#'
2630 >>> str(url('http://localhost:80//'))
2630 >>> str(url('http://localhost:80//'))
2631 'http://localhost:80//'
2631 'http://localhost:80//'
2632 >>> str(url('http://localhost:80/'))
2632 >>> str(url('http://localhost:80/'))
2633 'http://localhost:80/'
2633 'http://localhost:80/'
2634 >>> str(url('http://localhost:80'))
2634 >>> str(url('http://localhost:80'))
2635 'http://localhost:80/'
2635 'http://localhost:80/'
2636 >>> str(url('bundle:foo'))
2636 >>> str(url('bundle:foo'))
2637 'bundle:foo'
2637 'bundle:foo'
2638 >>> str(url('bundle://../foo'))
2638 >>> str(url('bundle://../foo'))
2639 'bundle:../foo'
2639 'bundle:../foo'
2640 >>> str(url('path'))
2640 >>> str(url('path'))
2641 'path'
2641 'path'
2642 >>> str(url('file:///tmp/foo/bar'))
2642 >>> str(url('file:///tmp/foo/bar'))
2643 'file:///tmp/foo/bar'
2643 'file:///tmp/foo/bar'
2644 >>> str(url('file:///c:/tmp/foo/bar'))
2644 >>> str(url('file:///c:/tmp/foo/bar'))
2645 'file:///c:/tmp/foo/bar'
2645 'file:///c:/tmp/foo/bar'
2646 >>> print url(r'bundle:foo\bar')
2646 >>> print url(r'bundle:foo\bar')
2647 bundle:foo\bar
2647 bundle:foo\bar
2648 >>> print url(r'file:///D:\data\hg')
2648 >>> print url(r'file:///D:\data\hg')
2649 file:///D:\data\hg
2649 file:///D:\data\hg
2650 """
2650 """
2651 if self._localpath:
2651 if self._localpath:
2652 s = self.path
2652 s = self.path
2653 if self.scheme == 'bundle':
2653 if self.scheme == 'bundle':
2654 s = 'bundle:' + s
2654 s = 'bundle:' + s
2655 if self.fragment:
2655 if self.fragment:
2656 s += '#' + self.fragment
2656 s += '#' + self.fragment
2657 return s
2657 return s
2658
2658
2659 s = self.scheme + ':'
2659 s = self.scheme + ':'
2660 if self.user or self.passwd or self.host:
2660 if self.user or self.passwd or self.host:
2661 s += '//'
2661 s += '//'
2662 elif self.scheme and (not self.path or self.path.startswith('/')
2662 elif self.scheme and (not self.path or self.path.startswith('/')
2663 or hasdriveletter(self.path)):
2663 or hasdriveletter(self.path)):
2664 s += '//'
2664 s += '//'
2665 if hasdriveletter(self.path):
2665 if hasdriveletter(self.path):
2666 s += '/'
2666 s += '/'
2667 if self.user:
2667 if self.user:
2668 s += urlreq.quote(self.user, safe=self._safechars)
2668 s += urlreq.quote(self.user, safe=self._safechars)
2669 if self.passwd:
2669 if self.passwd:
2670 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2670 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2671 if self.user or self.passwd:
2671 if self.user or self.passwd:
2672 s += '@'
2672 s += '@'
2673 if self.host:
2673 if self.host:
2674 if not (self.host.startswith('[') and self.host.endswith(']')):
2674 if not (self.host.startswith('[') and self.host.endswith(']')):
2675 s += urlreq.quote(self.host)
2675 s += urlreq.quote(self.host)
2676 else:
2676 else:
2677 s += self.host
2677 s += self.host
2678 if self.port:
2678 if self.port:
2679 s += ':' + urlreq.quote(self.port)
2679 s += ':' + urlreq.quote(self.port)
2680 if self.host:
2680 if self.host:
2681 s += '/'
2681 s += '/'
2682 if self.path:
2682 if self.path:
2683 # TODO: similar to the query string, we should not unescape the
2683 # TODO: similar to the query string, we should not unescape the
2684 # path when we store it, the path might contain '%2f' = '/',
2684 # path when we store it, the path might contain '%2f' = '/',
2685 # which we should *not* escape.
2685 # which we should *not* escape.
2686 s += urlreq.quote(self.path, safe=self._safepchars)
2686 s += urlreq.quote(self.path, safe=self._safepchars)
2687 if self.query:
2687 if self.query:
2688 # we store the query in escaped form.
2688 # we store the query in escaped form.
2689 s += '?' + self.query
2689 s += '?' + self.query
2690 if self.fragment is not None:
2690 if self.fragment is not None:
2691 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2691 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2692 return s
2692 return s
2693
2693
2694 def authinfo(self):
2694 def authinfo(self):
2695 user, passwd = self.user, self.passwd
2695 user, passwd = self.user, self.passwd
2696 try:
2696 try:
2697 self.user, self.passwd = None, None
2697 self.user, self.passwd = None, None
2698 s = str(self)
2698 s = str(self)
2699 finally:
2699 finally:
2700 self.user, self.passwd = user, passwd
2700 self.user, self.passwd = user, passwd
2701 if not self.user:
2701 if not self.user:
2702 return (s, None)
2702 return (s, None)
2703 # authinfo[1] is passed to urllib2 password manager, and its
2703 # authinfo[1] is passed to urllib2 password manager, and its
2704 # URIs must not contain credentials. The host is passed in the
2704 # URIs must not contain credentials. The host is passed in the
2705 # URIs list because Python < 2.4.3 uses only that to search for
2705 # URIs list because Python < 2.4.3 uses only that to search for
2706 # a password.
2706 # a password.
2707 return (s, (None, (s, self.host),
2707 return (s, (None, (s, self.host),
2708 self.user, self.passwd or ''))
2708 self.user, self.passwd or ''))
2709
2709
2710 def isabs(self):
2710 def isabs(self):
2711 if self.scheme and self.scheme != 'file':
2711 if self.scheme and self.scheme != 'file':
2712 return True # remote URL
2712 return True # remote URL
2713 if hasdriveletter(self.path):
2713 if hasdriveletter(self.path):
2714 return True # absolute for our purposes - can't be joined()
2714 return True # absolute for our purposes - can't be joined()
2715 if self.path.startswith(r'\\'):
2715 if self.path.startswith(r'\\'):
2716 return True # Windows UNC path
2716 return True # Windows UNC path
2717 if self.path.startswith('/'):
2717 if self.path.startswith('/'):
2718 return True # POSIX-style
2718 return True # POSIX-style
2719 return False
2719 return False
2720
2720
2721 def localpath(self):
2721 def localpath(self):
2722 if self.scheme == 'file' or self.scheme == 'bundle':
2722 if self.scheme == 'file' or self.scheme == 'bundle':
2723 path = self.path or '/'
2723 path = self.path or '/'
2724 # For Windows, we need to promote hosts containing drive
2724 # For Windows, we need to promote hosts containing drive
2725 # letters to paths with drive letters.
2725 # letters to paths with drive letters.
2726 if hasdriveletter(self._hostport):
2726 if hasdriveletter(self._hostport):
2727 path = self._hostport + '/' + self.path
2727 path = self._hostport + '/' + self.path
2728 elif (self.host is not None and self.path
2728 elif (self.host is not None and self.path
2729 and not hasdriveletter(path)):
2729 and not hasdriveletter(path)):
2730 path = '/' + path
2730 path = '/' + path
2731 return path
2731 return path
2732 return self._origpath
2732 return self._origpath
2733
2733
2734 def islocal(self):
2734 def islocal(self):
2735 '''whether localpath will return something that posixfile can open'''
2735 '''whether localpath will return something that posixfile can open'''
2736 return (not self.scheme or self.scheme == 'file'
2736 return (not self.scheme or self.scheme == 'file'
2737 or self.scheme == 'bundle')
2737 or self.scheme == 'bundle')
2738
2738
2739 def hasscheme(path):
2739 def hasscheme(path):
2740 return bool(url(path).scheme)
2740 return bool(url(path).scheme)
2741
2741
2742 def hasdriveletter(path):
2742 def hasdriveletter(path):
2743 return path and path[1:2] == ':' and path[0:1].isalpha()
2743 return path and path[1:2] == ':' and path[0:1].isalpha()
2744
2744
2745 def urllocalpath(path):
2745 def urllocalpath(path):
2746 return url(path, parsequery=False, parsefragment=False).localpath()
2746 return url(path, parsequery=False, parsefragment=False).localpath()
2747
2747
2748 def hidepassword(u):
2748 def hidepassword(u):
2749 '''hide user credential in a url string'''
2749 '''hide user credential in a url string'''
2750 u = url(u)
2750 u = url(u)
2751 if u.passwd:
2751 if u.passwd:
2752 u.passwd = '***'
2752 u.passwd = '***'
2753 return str(u)
2753 return str(u)
2754
2754
2755 def removeauth(u):
2755 def removeauth(u):
2756 '''remove all authentication information from a url string'''
2756 '''remove all authentication information from a url string'''
2757 u = url(u)
2757 u = url(u)
2758 u.user = u.passwd = None
2758 u.user = u.passwd = None
2759 return str(u)
2759 return str(u)
2760
2760
2761 timecount = unitcountfn(
2761 timecount = unitcountfn(
2762 (1, 1e3, _('%.0f s')),
2762 (1, 1e3, _('%.0f s')),
2763 (100, 1, _('%.1f s')),
2763 (100, 1, _('%.1f s')),
2764 (10, 1, _('%.2f s')),
2764 (10, 1, _('%.2f s')),
2765 (1, 1, _('%.3f s')),
2765 (1, 1, _('%.3f s')),
2766 (100, 0.001, _('%.1f ms')),
2766 (100, 0.001, _('%.1f ms')),
2767 (10, 0.001, _('%.2f ms')),
2767 (10, 0.001, _('%.2f ms')),
2768 (1, 0.001, _('%.3f ms')),
2768 (1, 0.001, _('%.3f ms')),
2769 (100, 0.000001, _('%.1f us')),
2769 (100, 0.000001, _('%.1f us')),
2770 (10, 0.000001, _('%.2f us')),
2770 (10, 0.000001, _('%.2f us')),
2771 (1, 0.000001, _('%.3f us')),
2771 (1, 0.000001, _('%.3f us')),
2772 (100, 0.000000001, _('%.1f ns')),
2772 (100, 0.000000001, _('%.1f ns')),
2773 (10, 0.000000001, _('%.2f ns')),
2773 (10, 0.000000001, _('%.2f ns')),
2774 (1, 0.000000001, _('%.3f ns')),
2774 (1, 0.000000001, _('%.3f ns')),
2775 )
2775 )
2776
2776
2777 _timenesting = [0]
2777 _timenesting = [0]
2778
2778
2779 def timed(func):
2779 def timed(func):
2780 '''Report the execution time of a function call to stderr.
2780 '''Report the execution time of a function call to stderr.
2781
2781
2782 During development, use as a decorator when you need to measure
2782 During development, use as a decorator when you need to measure
2783 the cost of a function, e.g. as follows:
2783 the cost of a function, e.g. as follows:
2784
2784
2785 @util.timed
2785 @util.timed
2786 def foo(a, b, c):
2786 def foo(a, b, c):
2787 pass
2787 pass
2788 '''
2788 '''
2789
2789
2790 def wrapper(*args, **kwargs):
2790 def wrapper(*args, **kwargs):
2791 start = timer()
2791 start = timer()
2792 indent = 2
2792 indent = 2
2793 _timenesting[0] += indent
2793 _timenesting[0] += indent
2794 try:
2794 try:
2795 return func(*args, **kwargs)
2795 return func(*args, **kwargs)
2796 finally:
2796 finally:
2797 elapsed = timer() - start
2797 elapsed = timer() - start
2798 _timenesting[0] -= indent
2798 _timenesting[0] -= indent
2799 stderr.write('%s%s: %s\n' %
2799 stderr.write('%s%s: %s\n' %
2800 (' ' * _timenesting[0], func.__name__,
2800 (' ' * _timenesting[0], func.__name__,
2801 timecount(elapsed)))
2801 timecount(elapsed)))
2802 return wrapper
2802 return wrapper
2803
2803
2804 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2804 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2805 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2805 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2806
2806
2807 def sizetoint(s):
2807 def sizetoint(s):
2808 '''Convert a space specifier to a byte count.
2808 '''Convert a space specifier to a byte count.
2809
2809
2810 >>> sizetoint('30')
2810 >>> sizetoint('30')
2811 30
2811 30
2812 >>> sizetoint('2.2kb')
2812 >>> sizetoint('2.2kb')
2813 2252
2813 2252
2814 >>> sizetoint('6M')
2814 >>> sizetoint('6M')
2815 6291456
2815 6291456
2816 '''
2816 '''
2817 t = s.strip().lower()
2817 t = s.strip().lower()
2818 try:
2818 try:
2819 for k, u in _sizeunits:
2819 for k, u in _sizeunits:
2820 if t.endswith(k):
2820 if t.endswith(k):
2821 return int(float(t[:-len(k)]) * u)
2821 return int(float(t[:-len(k)]) * u)
2822 return int(t)
2822 return int(t)
2823 except ValueError:
2823 except ValueError:
2824 raise error.ParseError(_("couldn't parse size: %s") % s)
2824 raise error.ParseError(_("couldn't parse size: %s") % s)
2825
2825
2826 class hooks(object):
2826 class hooks(object):
2827 '''A collection of hook functions that can be used to extend a
2827 '''A collection of hook functions that can be used to extend a
2828 function's behavior. Hooks are called in lexicographic order,
2828 function's behavior. Hooks are called in lexicographic order,
2829 based on the names of their sources.'''
2829 based on the names of their sources.'''
2830
2830
2831 def __init__(self):
2831 def __init__(self):
2832 self._hooks = []
2832 self._hooks = []
2833
2833
2834 def add(self, source, hook):
2834 def add(self, source, hook):
2835 self._hooks.append((source, hook))
2835 self._hooks.append((source, hook))
2836
2836
2837 def __call__(self, *args):
2837 def __call__(self, *args):
2838 self._hooks.sort(key=lambda x: x[0])
2838 self._hooks.sort(key=lambda x: x[0])
2839 results = []
2839 results = []
2840 for source, hook in self._hooks:
2840 for source, hook in self._hooks:
2841 results.append(hook(*args))
2841 results.append(hook(*args))
2842 return results
2842 return results
2843
2843
2844 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2844 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
2845 '''Yields lines for a nicely formatted stacktrace.
2845 '''Yields lines for a nicely formatted stacktrace.
2846 Skips the 'skip' last entries.
2846 Skips the 'skip' last entries, then return the last 'depth' entries.
2847 Each file+linenumber is formatted according to fileline.
2847 Each file+linenumber is formatted according to fileline.
2848 Each line is formatted according to line.
2848 Each line is formatted according to line.
2849 If line is None, it yields:
2849 If line is None, it yields:
2850 length of longest filepath+line number,
2850 length of longest filepath+line number,
2851 filepath+linenumber,
2851 filepath+linenumber,
2852 function
2852 function
2853
2853
2854 Not be used in production code but very convenient while developing.
2854 Not be used in production code but very convenient while developing.
2855 '''
2855 '''
2856 entries = [(fileline % (fn, ln), func)
2856 entries = [(fileline % (fn, ln), func)
2857 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2857 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2858 ][-depth:]
2858 if entries:
2859 if entries:
2859 fnmax = max(len(entry[0]) for entry in entries)
2860 fnmax = max(len(entry[0]) for entry in entries)
2860 for fnln, func in entries:
2861 for fnln, func in entries:
2861 if line is None:
2862 if line is None:
2862 yield (fnmax, fnln, func)
2863 yield (fnmax, fnln, func)
2863 else:
2864 else:
2864 yield line % (fnmax, fnln, func)
2865 yield line % (fnmax, fnln, func)
2865
2866
2866 def debugstacktrace(msg='stacktrace', skip=0, f=stderr, otherf=stdout):
2867 def debugstacktrace(msg='stacktrace', skip=0,
2868 f=stderr, otherf=stdout, depth=0):
2867 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2869 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2868 Skips the 'skip' last entries. By default it will flush stdout first.
2870 Skips the 'skip' entries closest to the call, then show 'depth' entries.
2871 By default it will flush stdout first.
2869 It can be used everywhere and intentionally does not require an ui object.
2872 It can be used everywhere and intentionally does not require an ui object.
2870 Not be used in production code but very convenient while developing.
2873 Not be used in production code but very convenient while developing.
2871 '''
2874 '''
2872 if otherf:
2875 if otherf:
2873 otherf.flush()
2876 otherf.flush()
2874 f.write('%s at:\n' % msg.rstrip())
2877 f.write('%s at:\n' % msg.rstrip())
2875 for line in getstackframes(skip + 1):
2878 for line in getstackframes(skip + 1, depth=depth):
2876 f.write(line)
2879 f.write(line)
2877 f.flush()
2880 f.flush()
2878
2881
2879 class dirs(object):
2882 class dirs(object):
2880 '''a multiset of directory names from a dirstate or manifest'''
2883 '''a multiset of directory names from a dirstate or manifest'''
2881
2884
2882 def __init__(self, map, skip=None):
2885 def __init__(self, map, skip=None):
2883 self._dirs = {}
2886 self._dirs = {}
2884 addpath = self.addpath
2887 addpath = self.addpath
2885 if safehasattr(map, 'iteritems') and skip is not None:
2888 if safehasattr(map, 'iteritems') and skip is not None:
2886 for f, s in map.iteritems():
2889 for f, s in map.iteritems():
2887 if s[0] != skip:
2890 if s[0] != skip:
2888 addpath(f)
2891 addpath(f)
2889 else:
2892 else:
2890 for f in map:
2893 for f in map:
2891 addpath(f)
2894 addpath(f)
2892
2895
2893 def addpath(self, path):
2896 def addpath(self, path):
2894 dirs = self._dirs
2897 dirs = self._dirs
2895 for base in finddirs(path):
2898 for base in finddirs(path):
2896 if base in dirs:
2899 if base in dirs:
2897 dirs[base] += 1
2900 dirs[base] += 1
2898 return
2901 return
2899 dirs[base] = 1
2902 dirs[base] = 1
2900
2903
2901 def delpath(self, path):
2904 def delpath(self, path):
2902 dirs = self._dirs
2905 dirs = self._dirs
2903 for base in finddirs(path):
2906 for base in finddirs(path):
2904 if dirs[base] > 1:
2907 if dirs[base] > 1:
2905 dirs[base] -= 1
2908 dirs[base] -= 1
2906 return
2909 return
2907 del dirs[base]
2910 del dirs[base]
2908
2911
2909 def __iter__(self):
2912 def __iter__(self):
2910 return self._dirs.iterkeys()
2913 return self._dirs.iterkeys()
2911
2914
2912 def __contains__(self, d):
2915 def __contains__(self, d):
2913 return d in self._dirs
2916 return d in self._dirs
2914
2917
2915 if safehasattr(parsers, 'dirs'):
2918 if safehasattr(parsers, 'dirs'):
2916 dirs = parsers.dirs
2919 dirs = parsers.dirs
2917
2920
2918 def finddirs(path):
2921 def finddirs(path):
2919 pos = path.rfind('/')
2922 pos = path.rfind('/')
2920 while pos != -1:
2923 while pos != -1:
2921 yield path[:pos]
2924 yield path[:pos]
2922 pos = path.rfind('/', 0, pos)
2925 pos = path.rfind('/', 0, pos)
2923
2926
2924 class ctxmanager(object):
2927 class ctxmanager(object):
2925 '''A context manager for use in 'with' blocks to allow multiple
2928 '''A context manager for use in 'with' blocks to allow multiple
2926 contexts to be entered at once. This is both safer and more
2929 contexts to be entered at once. This is both safer and more
2927 flexible than contextlib.nested.
2930 flexible than contextlib.nested.
2928
2931
2929 Once Mercurial supports Python 2.7+, this will become mostly
2932 Once Mercurial supports Python 2.7+, this will become mostly
2930 unnecessary.
2933 unnecessary.
2931 '''
2934 '''
2932
2935
2933 def __init__(self, *args):
2936 def __init__(self, *args):
2934 '''Accepts a list of no-argument functions that return context
2937 '''Accepts a list of no-argument functions that return context
2935 managers. These will be invoked at __call__ time.'''
2938 managers. These will be invoked at __call__ time.'''
2936 self._pending = args
2939 self._pending = args
2937 self._atexit = []
2940 self._atexit = []
2938
2941
2939 def __enter__(self):
2942 def __enter__(self):
2940 return self
2943 return self
2941
2944
2942 def enter(self):
2945 def enter(self):
2943 '''Create and enter context managers in the order in which they were
2946 '''Create and enter context managers in the order in which they were
2944 passed to the constructor.'''
2947 passed to the constructor.'''
2945 values = []
2948 values = []
2946 for func in self._pending:
2949 for func in self._pending:
2947 obj = func()
2950 obj = func()
2948 values.append(obj.__enter__())
2951 values.append(obj.__enter__())
2949 self._atexit.append(obj.__exit__)
2952 self._atexit.append(obj.__exit__)
2950 del self._pending
2953 del self._pending
2951 return values
2954 return values
2952
2955
2953 def atexit(self, func, *args, **kwargs):
2956 def atexit(self, func, *args, **kwargs):
2954 '''Add a function to call when this context manager exits. The
2957 '''Add a function to call when this context manager exits. The
2955 ordering of multiple atexit calls is unspecified, save that
2958 ordering of multiple atexit calls is unspecified, save that
2956 they will happen before any __exit__ functions.'''
2959 they will happen before any __exit__ functions.'''
2957 def wrapper(exc_type, exc_val, exc_tb):
2960 def wrapper(exc_type, exc_val, exc_tb):
2958 func(*args, **kwargs)
2961 func(*args, **kwargs)
2959 self._atexit.append(wrapper)
2962 self._atexit.append(wrapper)
2960 return func
2963 return func
2961
2964
2962 def __exit__(self, exc_type, exc_val, exc_tb):
2965 def __exit__(self, exc_type, exc_val, exc_tb):
2963 '''Context managers are exited in the reverse order from which
2966 '''Context managers are exited in the reverse order from which
2964 they were created.'''
2967 they were created.'''
2965 received = exc_type is not None
2968 received = exc_type is not None
2966 suppressed = False
2969 suppressed = False
2967 pending = None
2970 pending = None
2968 self._atexit.reverse()
2971 self._atexit.reverse()
2969 for exitfunc in self._atexit:
2972 for exitfunc in self._atexit:
2970 try:
2973 try:
2971 if exitfunc(exc_type, exc_val, exc_tb):
2974 if exitfunc(exc_type, exc_val, exc_tb):
2972 suppressed = True
2975 suppressed = True
2973 exc_type = None
2976 exc_type = None
2974 exc_val = None
2977 exc_val = None
2975 exc_tb = None
2978 exc_tb = None
2976 except BaseException:
2979 except BaseException:
2977 pending = sys.exc_info()
2980 pending = sys.exc_info()
2978 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2981 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2979 del self._atexit
2982 del self._atexit
2980 if pending:
2983 if pending:
2981 raise exc_val
2984 raise exc_val
2982 return received and suppressed
2985 return received and suppressed
2983
2986
2984 # compression code
2987 # compression code
2985
2988
2986 SERVERROLE = 'server'
2989 SERVERROLE = 'server'
2987 CLIENTROLE = 'client'
2990 CLIENTROLE = 'client'
2988
2991
2989 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
2992 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
2990 (u'name', u'serverpriority',
2993 (u'name', u'serverpriority',
2991 u'clientpriority'))
2994 u'clientpriority'))
2992
2995
2993 class compressormanager(object):
2996 class compressormanager(object):
2994 """Holds registrations of various compression engines.
2997 """Holds registrations of various compression engines.
2995
2998
2996 This class essentially abstracts the differences between compression
2999 This class essentially abstracts the differences between compression
2997 engines to allow new compression formats to be added easily, possibly from
3000 engines to allow new compression formats to be added easily, possibly from
2998 extensions.
3001 extensions.
2999
3002
3000 Compressors are registered against the global instance by calling its
3003 Compressors are registered against the global instance by calling its
3001 ``register()`` method.
3004 ``register()`` method.
3002 """
3005 """
3003 def __init__(self):
3006 def __init__(self):
3004 self._engines = {}
3007 self._engines = {}
3005 # Bundle spec human name to engine name.
3008 # Bundle spec human name to engine name.
3006 self._bundlenames = {}
3009 self._bundlenames = {}
3007 # Internal bundle identifier to engine name.
3010 # Internal bundle identifier to engine name.
3008 self._bundletypes = {}
3011 self._bundletypes = {}
3009 # Revlog header to engine name.
3012 # Revlog header to engine name.
3010 self._revlogheaders = {}
3013 self._revlogheaders = {}
3011 # Wire proto identifier to engine name.
3014 # Wire proto identifier to engine name.
3012 self._wiretypes = {}
3015 self._wiretypes = {}
3013
3016
3014 def __getitem__(self, key):
3017 def __getitem__(self, key):
3015 return self._engines[key]
3018 return self._engines[key]
3016
3019
3017 def __contains__(self, key):
3020 def __contains__(self, key):
3018 return key in self._engines
3021 return key in self._engines
3019
3022
3020 def __iter__(self):
3023 def __iter__(self):
3021 return iter(self._engines.keys())
3024 return iter(self._engines.keys())
3022
3025
3023 def register(self, engine):
3026 def register(self, engine):
3024 """Register a compression engine with the manager.
3027 """Register a compression engine with the manager.
3025
3028
3026 The argument must be a ``compressionengine`` instance.
3029 The argument must be a ``compressionengine`` instance.
3027 """
3030 """
3028 if not isinstance(engine, compressionengine):
3031 if not isinstance(engine, compressionengine):
3029 raise ValueError(_('argument must be a compressionengine'))
3032 raise ValueError(_('argument must be a compressionengine'))
3030
3033
3031 name = engine.name()
3034 name = engine.name()
3032
3035
3033 if name in self._engines:
3036 if name in self._engines:
3034 raise error.Abort(_('compression engine %s already registered') %
3037 raise error.Abort(_('compression engine %s already registered') %
3035 name)
3038 name)
3036
3039
3037 bundleinfo = engine.bundletype()
3040 bundleinfo = engine.bundletype()
3038 if bundleinfo:
3041 if bundleinfo:
3039 bundlename, bundletype = bundleinfo
3042 bundlename, bundletype = bundleinfo
3040
3043
3041 if bundlename in self._bundlenames:
3044 if bundlename in self._bundlenames:
3042 raise error.Abort(_('bundle name %s already registered') %
3045 raise error.Abort(_('bundle name %s already registered') %
3043 bundlename)
3046 bundlename)
3044 if bundletype in self._bundletypes:
3047 if bundletype in self._bundletypes:
3045 raise error.Abort(_('bundle type %s already registered by %s') %
3048 raise error.Abort(_('bundle type %s already registered by %s') %
3046 (bundletype, self._bundletypes[bundletype]))
3049 (bundletype, self._bundletypes[bundletype]))
3047
3050
3048 # No external facing name declared.
3051 # No external facing name declared.
3049 if bundlename:
3052 if bundlename:
3050 self._bundlenames[bundlename] = name
3053 self._bundlenames[bundlename] = name
3051
3054
3052 self._bundletypes[bundletype] = name
3055 self._bundletypes[bundletype] = name
3053
3056
3054 wiresupport = engine.wireprotosupport()
3057 wiresupport = engine.wireprotosupport()
3055 if wiresupport:
3058 if wiresupport:
3056 wiretype = wiresupport.name
3059 wiretype = wiresupport.name
3057 if wiretype in self._wiretypes:
3060 if wiretype in self._wiretypes:
3058 raise error.Abort(_('wire protocol compression %s already '
3061 raise error.Abort(_('wire protocol compression %s already '
3059 'registered by %s') %
3062 'registered by %s') %
3060 (wiretype, self._wiretypes[wiretype]))
3063 (wiretype, self._wiretypes[wiretype]))
3061
3064
3062 self._wiretypes[wiretype] = name
3065 self._wiretypes[wiretype] = name
3063
3066
3064 revlogheader = engine.revlogheader()
3067 revlogheader = engine.revlogheader()
3065 if revlogheader and revlogheader in self._revlogheaders:
3068 if revlogheader and revlogheader in self._revlogheaders:
3066 raise error.Abort(_('revlog header %s already registered by %s') %
3069 raise error.Abort(_('revlog header %s already registered by %s') %
3067 (revlogheader, self._revlogheaders[revlogheader]))
3070 (revlogheader, self._revlogheaders[revlogheader]))
3068
3071
3069 if revlogheader:
3072 if revlogheader:
3070 self._revlogheaders[revlogheader] = name
3073 self._revlogheaders[revlogheader] = name
3071
3074
3072 self._engines[name] = engine
3075 self._engines[name] = engine
3073
3076
3074 @property
3077 @property
3075 def supportedbundlenames(self):
3078 def supportedbundlenames(self):
3076 return set(self._bundlenames.keys())
3079 return set(self._bundlenames.keys())
3077
3080
3078 @property
3081 @property
3079 def supportedbundletypes(self):
3082 def supportedbundletypes(self):
3080 return set(self._bundletypes.keys())
3083 return set(self._bundletypes.keys())
3081
3084
3082 def forbundlename(self, bundlename):
3085 def forbundlename(self, bundlename):
3083 """Obtain a compression engine registered to a bundle name.
3086 """Obtain a compression engine registered to a bundle name.
3084
3087
3085 Will raise KeyError if the bundle type isn't registered.
3088 Will raise KeyError if the bundle type isn't registered.
3086
3089
3087 Will abort if the engine is known but not available.
3090 Will abort if the engine is known but not available.
3088 """
3091 """
3089 engine = self._engines[self._bundlenames[bundlename]]
3092 engine = self._engines[self._bundlenames[bundlename]]
3090 if not engine.available():
3093 if not engine.available():
3091 raise error.Abort(_('compression engine %s could not be loaded') %
3094 raise error.Abort(_('compression engine %s could not be loaded') %
3092 engine.name())
3095 engine.name())
3093 return engine
3096 return engine
3094
3097
3095 def forbundletype(self, bundletype):
3098 def forbundletype(self, bundletype):
3096 """Obtain a compression engine registered to a bundle type.
3099 """Obtain a compression engine registered to a bundle type.
3097
3100
3098 Will raise KeyError if the bundle type isn't registered.
3101 Will raise KeyError if the bundle type isn't registered.
3099
3102
3100 Will abort if the engine is known but not available.
3103 Will abort if the engine is known but not available.
3101 """
3104 """
3102 engine = self._engines[self._bundletypes[bundletype]]
3105 engine = self._engines[self._bundletypes[bundletype]]
3103 if not engine.available():
3106 if not engine.available():
3104 raise error.Abort(_('compression engine %s could not be loaded') %
3107 raise error.Abort(_('compression engine %s could not be loaded') %
3105 engine.name())
3108 engine.name())
3106 return engine
3109 return engine
3107
3110
3108 def supportedwireengines(self, role, onlyavailable=True):
3111 def supportedwireengines(self, role, onlyavailable=True):
3109 """Obtain compression engines that support the wire protocol.
3112 """Obtain compression engines that support the wire protocol.
3110
3113
3111 Returns a list of engines in prioritized order, most desired first.
3114 Returns a list of engines in prioritized order, most desired first.
3112
3115
3113 If ``onlyavailable`` is set, filter out engines that can't be
3116 If ``onlyavailable`` is set, filter out engines that can't be
3114 loaded.
3117 loaded.
3115 """
3118 """
3116 assert role in (SERVERROLE, CLIENTROLE)
3119 assert role in (SERVERROLE, CLIENTROLE)
3117
3120
3118 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3121 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3119
3122
3120 engines = [self._engines[e] for e in self._wiretypes.values()]
3123 engines = [self._engines[e] for e in self._wiretypes.values()]
3121 if onlyavailable:
3124 if onlyavailable:
3122 engines = [e for e in engines if e.available()]
3125 engines = [e for e in engines if e.available()]
3123
3126
3124 def getkey(e):
3127 def getkey(e):
3125 # Sort first by priority, highest first. In case of tie, sort
3128 # Sort first by priority, highest first. In case of tie, sort
3126 # alphabetically. This is arbitrary, but ensures output is
3129 # alphabetically. This is arbitrary, but ensures output is
3127 # stable.
3130 # stable.
3128 w = e.wireprotosupport()
3131 w = e.wireprotosupport()
3129 return -1 * getattr(w, attr), w.name
3132 return -1 * getattr(w, attr), w.name
3130
3133
3131 return list(sorted(engines, key=getkey))
3134 return list(sorted(engines, key=getkey))
3132
3135
3133 def forwiretype(self, wiretype):
3136 def forwiretype(self, wiretype):
3134 engine = self._engines[self._wiretypes[wiretype]]
3137 engine = self._engines[self._wiretypes[wiretype]]
3135 if not engine.available():
3138 if not engine.available():
3136 raise error.Abort(_('compression engine %s could not be loaded') %
3139 raise error.Abort(_('compression engine %s could not be loaded') %
3137 engine.name())
3140 engine.name())
3138 return engine
3141 return engine
3139
3142
3140 def forrevlogheader(self, header):
3143 def forrevlogheader(self, header):
3141 """Obtain a compression engine registered to a revlog header.
3144 """Obtain a compression engine registered to a revlog header.
3142
3145
3143 Will raise KeyError if the revlog header value isn't registered.
3146 Will raise KeyError if the revlog header value isn't registered.
3144 """
3147 """
3145 return self._engines[self._revlogheaders[header]]
3148 return self._engines[self._revlogheaders[header]]
3146
3149
3147 compengines = compressormanager()
3150 compengines = compressormanager()
3148
3151
3149 class compressionengine(object):
3152 class compressionengine(object):
3150 """Base class for compression engines.
3153 """Base class for compression engines.
3151
3154
3152 Compression engines must implement the interface defined by this class.
3155 Compression engines must implement the interface defined by this class.
3153 """
3156 """
3154 def name(self):
3157 def name(self):
3155 """Returns the name of the compression engine.
3158 """Returns the name of the compression engine.
3156
3159
3157 This is the key the engine is registered under.
3160 This is the key the engine is registered under.
3158
3161
3159 This method must be implemented.
3162 This method must be implemented.
3160 """
3163 """
3161 raise NotImplementedError()
3164 raise NotImplementedError()
3162
3165
3163 def available(self):
3166 def available(self):
3164 """Whether the compression engine is available.
3167 """Whether the compression engine is available.
3165
3168
3166 The intent of this method is to allow optional compression engines
3169 The intent of this method is to allow optional compression engines
3167 that may not be available in all installations (such as engines relying
3170 that may not be available in all installations (such as engines relying
3168 on C extensions that may not be present).
3171 on C extensions that may not be present).
3169 """
3172 """
3170 return True
3173 return True
3171
3174
3172 def bundletype(self):
3175 def bundletype(self):
3173 """Describes bundle identifiers for this engine.
3176 """Describes bundle identifiers for this engine.
3174
3177
3175 If this compression engine isn't supported for bundles, returns None.
3178 If this compression engine isn't supported for bundles, returns None.
3176
3179
3177 If this engine can be used for bundles, returns a 2-tuple of strings of
3180 If this engine can be used for bundles, returns a 2-tuple of strings of
3178 the user-facing "bundle spec" compression name and an internal
3181 the user-facing "bundle spec" compression name and an internal
3179 identifier used to denote the compression format within bundles. To
3182 identifier used to denote the compression format within bundles. To
3180 exclude the name from external usage, set the first element to ``None``.
3183 exclude the name from external usage, set the first element to ``None``.
3181
3184
3182 If bundle compression is supported, the class must also implement
3185 If bundle compression is supported, the class must also implement
3183 ``compressstream`` and `decompressorreader``.
3186 ``compressstream`` and `decompressorreader``.
3184 """
3187 """
3185 return None
3188 return None
3186
3189
3187 def wireprotosupport(self):
3190 def wireprotosupport(self):
3188 """Declare support for this compression format on the wire protocol.
3191 """Declare support for this compression format on the wire protocol.
3189
3192
3190 If this compression engine isn't supported for compressing wire
3193 If this compression engine isn't supported for compressing wire
3191 protocol payloads, returns None.
3194 protocol payloads, returns None.
3192
3195
3193 Otherwise, returns ``compenginewireprotosupport`` with the following
3196 Otherwise, returns ``compenginewireprotosupport`` with the following
3194 fields:
3197 fields:
3195
3198
3196 * String format identifier
3199 * String format identifier
3197 * Integer priority for the server
3200 * Integer priority for the server
3198 * Integer priority for the client
3201 * Integer priority for the client
3199
3202
3200 The integer priorities are used to order the advertisement of format
3203 The integer priorities are used to order the advertisement of format
3201 support by server and client. The highest integer is advertised
3204 support by server and client. The highest integer is advertised
3202 first. Integers with non-positive values aren't advertised.
3205 first. Integers with non-positive values aren't advertised.
3203
3206
3204 The priority values are somewhat arbitrary and only used for default
3207 The priority values are somewhat arbitrary and only used for default
3205 ordering. The relative order can be changed via config options.
3208 ordering. The relative order can be changed via config options.
3206
3209
3207 If wire protocol compression is supported, the class must also implement
3210 If wire protocol compression is supported, the class must also implement
3208 ``compressstream`` and ``decompressorreader``.
3211 ``compressstream`` and ``decompressorreader``.
3209 """
3212 """
3210 return None
3213 return None
3211
3214
3212 def revlogheader(self):
3215 def revlogheader(self):
3213 """Header added to revlog chunks that identifies this engine.
3216 """Header added to revlog chunks that identifies this engine.
3214
3217
3215 If this engine can be used to compress revlogs, this method should
3218 If this engine can be used to compress revlogs, this method should
3216 return the bytes used to identify chunks compressed with this engine.
3219 return the bytes used to identify chunks compressed with this engine.
3217 Else, the method should return ``None`` to indicate it does not
3220 Else, the method should return ``None`` to indicate it does not
3218 participate in revlog compression.
3221 participate in revlog compression.
3219 """
3222 """
3220 return None
3223 return None
3221
3224
3222 def compressstream(self, it, opts=None):
3225 def compressstream(self, it, opts=None):
3223 """Compress an iterator of chunks.
3226 """Compress an iterator of chunks.
3224
3227
3225 The method receives an iterator (ideally a generator) of chunks of
3228 The method receives an iterator (ideally a generator) of chunks of
3226 bytes to be compressed. It returns an iterator (ideally a generator)
3229 bytes to be compressed. It returns an iterator (ideally a generator)
3227 of bytes of chunks representing the compressed output.
3230 of bytes of chunks representing the compressed output.
3228
3231
3229 Optionally accepts an argument defining how to perform compression.
3232 Optionally accepts an argument defining how to perform compression.
3230 Each engine treats this argument differently.
3233 Each engine treats this argument differently.
3231 """
3234 """
3232 raise NotImplementedError()
3235 raise NotImplementedError()
3233
3236
3234 def decompressorreader(self, fh):
3237 def decompressorreader(self, fh):
3235 """Perform decompression on a file object.
3238 """Perform decompression on a file object.
3236
3239
3237 Argument is an object with a ``read(size)`` method that returns
3240 Argument is an object with a ``read(size)`` method that returns
3238 compressed data. Return value is an object with a ``read(size)`` that
3241 compressed data. Return value is an object with a ``read(size)`` that
3239 returns uncompressed data.
3242 returns uncompressed data.
3240 """
3243 """
3241 raise NotImplementedError()
3244 raise NotImplementedError()
3242
3245
3243 def revlogcompressor(self, opts=None):
3246 def revlogcompressor(self, opts=None):
3244 """Obtain an object that can be used to compress revlog entries.
3247 """Obtain an object that can be used to compress revlog entries.
3245
3248
3246 The object has a ``compress(data)`` method that compresses binary
3249 The object has a ``compress(data)`` method that compresses binary
3247 data. This method returns compressed binary data or ``None`` if
3250 data. This method returns compressed binary data or ``None`` if
3248 the data could not be compressed (too small, not compressible, etc).
3251 the data could not be compressed (too small, not compressible, etc).
3249 The returned data should have a header uniquely identifying this
3252 The returned data should have a header uniquely identifying this
3250 compression format so decompression can be routed to this engine.
3253 compression format so decompression can be routed to this engine.
3251 This header should be identified by the ``revlogheader()`` return
3254 This header should be identified by the ``revlogheader()`` return
3252 value.
3255 value.
3253
3256
3254 The object has a ``decompress(data)`` method that decompresses
3257 The object has a ``decompress(data)`` method that decompresses
3255 data. The method will only be called if ``data`` begins with
3258 data. The method will only be called if ``data`` begins with
3256 ``revlogheader()``. The method should return the raw, uncompressed
3259 ``revlogheader()``. The method should return the raw, uncompressed
3257 data or raise a ``RevlogError``.
3260 data or raise a ``RevlogError``.
3258
3261
3259 The object is reusable but is not thread safe.
3262 The object is reusable but is not thread safe.
3260 """
3263 """
3261 raise NotImplementedError()
3264 raise NotImplementedError()
3262
3265
3263 class _zlibengine(compressionengine):
3266 class _zlibengine(compressionengine):
3264 def name(self):
3267 def name(self):
3265 return 'zlib'
3268 return 'zlib'
3266
3269
3267 def bundletype(self):
3270 def bundletype(self):
3268 return 'gzip', 'GZ'
3271 return 'gzip', 'GZ'
3269
3272
3270 def wireprotosupport(self):
3273 def wireprotosupport(self):
3271 return compewireprotosupport('zlib', 20, 20)
3274 return compewireprotosupport('zlib', 20, 20)
3272
3275
3273 def revlogheader(self):
3276 def revlogheader(self):
3274 return 'x'
3277 return 'x'
3275
3278
3276 def compressstream(self, it, opts=None):
3279 def compressstream(self, it, opts=None):
3277 opts = opts or {}
3280 opts = opts or {}
3278
3281
3279 z = zlib.compressobj(opts.get('level', -1))
3282 z = zlib.compressobj(opts.get('level', -1))
3280 for chunk in it:
3283 for chunk in it:
3281 data = z.compress(chunk)
3284 data = z.compress(chunk)
3282 # Not all calls to compress emit data. It is cheaper to inspect
3285 # Not all calls to compress emit data. It is cheaper to inspect
3283 # here than to feed empty chunks through generator.
3286 # here than to feed empty chunks through generator.
3284 if data:
3287 if data:
3285 yield data
3288 yield data
3286
3289
3287 yield z.flush()
3290 yield z.flush()
3288
3291
3289 def decompressorreader(self, fh):
3292 def decompressorreader(self, fh):
3290 def gen():
3293 def gen():
3291 d = zlib.decompressobj()
3294 d = zlib.decompressobj()
3292 for chunk in filechunkiter(fh):
3295 for chunk in filechunkiter(fh):
3293 while chunk:
3296 while chunk:
3294 # Limit output size to limit memory.
3297 # Limit output size to limit memory.
3295 yield d.decompress(chunk, 2 ** 18)
3298 yield d.decompress(chunk, 2 ** 18)
3296 chunk = d.unconsumed_tail
3299 chunk = d.unconsumed_tail
3297
3300
3298 return chunkbuffer(gen())
3301 return chunkbuffer(gen())
3299
3302
3300 class zlibrevlogcompressor(object):
3303 class zlibrevlogcompressor(object):
3301 def compress(self, data):
3304 def compress(self, data):
3302 insize = len(data)
3305 insize = len(data)
3303 # Caller handles empty input case.
3306 # Caller handles empty input case.
3304 assert insize > 0
3307 assert insize > 0
3305
3308
3306 if insize < 44:
3309 if insize < 44:
3307 return None
3310 return None
3308
3311
3309 elif insize <= 1000000:
3312 elif insize <= 1000000:
3310 compressed = zlib.compress(data)
3313 compressed = zlib.compress(data)
3311 if len(compressed) < insize:
3314 if len(compressed) < insize:
3312 return compressed
3315 return compressed
3313 return None
3316 return None
3314
3317
3315 # zlib makes an internal copy of the input buffer, doubling
3318 # zlib makes an internal copy of the input buffer, doubling
3316 # memory usage for large inputs. So do streaming compression
3319 # memory usage for large inputs. So do streaming compression
3317 # on large inputs.
3320 # on large inputs.
3318 else:
3321 else:
3319 z = zlib.compressobj()
3322 z = zlib.compressobj()
3320 parts = []
3323 parts = []
3321 pos = 0
3324 pos = 0
3322 while pos < insize:
3325 while pos < insize:
3323 pos2 = pos + 2**20
3326 pos2 = pos + 2**20
3324 parts.append(z.compress(data[pos:pos2]))
3327 parts.append(z.compress(data[pos:pos2]))
3325 pos = pos2
3328 pos = pos2
3326 parts.append(z.flush())
3329 parts.append(z.flush())
3327
3330
3328 if sum(map(len, parts)) < insize:
3331 if sum(map(len, parts)) < insize:
3329 return ''.join(parts)
3332 return ''.join(parts)
3330 return None
3333 return None
3331
3334
3332 def decompress(self, data):
3335 def decompress(self, data):
3333 try:
3336 try:
3334 return zlib.decompress(data)
3337 return zlib.decompress(data)
3335 except zlib.error as e:
3338 except zlib.error as e:
3336 raise error.RevlogError(_('revlog decompress error: %s') %
3339 raise error.RevlogError(_('revlog decompress error: %s') %
3337 str(e))
3340 str(e))
3338
3341
3339 def revlogcompressor(self, opts=None):
3342 def revlogcompressor(self, opts=None):
3340 return self.zlibrevlogcompressor()
3343 return self.zlibrevlogcompressor()
3341
3344
3342 compengines.register(_zlibengine())
3345 compengines.register(_zlibengine())
3343
3346
3344 class _bz2engine(compressionengine):
3347 class _bz2engine(compressionengine):
3345 def name(self):
3348 def name(self):
3346 return 'bz2'
3349 return 'bz2'
3347
3350
3348 def bundletype(self):
3351 def bundletype(self):
3349 return 'bzip2', 'BZ'
3352 return 'bzip2', 'BZ'
3350
3353
3351 # We declare a protocol name but don't advertise by default because
3354 # We declare a protocol name but don't advertise by default because
3352 # it is slow.
3355 # it is slow.
3353 def wireprotosupport(self):
3356 def wireprotosupport(self):
3354 return compewireprotosupport('bzip2', 0, 0)
3357 return compewireprotosupport('bzip2', 0, 0)
3355
3358
3356 def compressstream(self, it, opts=None):
3359 def compressstream(self, it, opts=None):
3357 opts = opts or {}
3360 opts = opts or {}
3358 z = bz2.BZ2Compressor(opts.get('level', 9))
3361 z = bz2.BZ2Compressor(opts.get('level', 9))
3359 for chunk in it:
3362 for chunk in it:
3360 data = z.compress(chunk)
3363 data = z.compress(chunk)
3361 if data:
3364 if data:
3362 yield data
3365 yield data
3363
3366
3364 yield z.flush()
3367 yield z.flush()
3365
3368
3366 def decompressorreader(self, fh):
3369 def decompressorreader(self, fh):
3367 def gen():
3370 def gen():
3368 d = bz2.BZ2Decompressor()
3371 d = bz2.BZ2Decompressor()
3369 for chunk in filechunkiter(fh):
3372 for chunk in filechunkiter(fh):
3370 yield d.decompress(chunk)
3373 yield d.decompress(chunk)
3371
3374
3372 return chunkbuffer(gen())
3375 return chunkbuffer(gen())
3373
3376
3374 compengines.register(_bz2engine())
3377 compengines.register(_bz2engine())
3375
3378
3376 class _truncatedbz2engine(compressionengine):
3379 class _truncatedbz2engine(compressionengine):
3377 def name(self):
3380 def name(self):
3378 return 'bz2truncated'
3381 return 'bz2truncated'
3379
3382
3380 def bundletype(self):
3383 def bundletype(self):
3381 return None, '_truncatedBZ'
3384 return None, '_truncatedBZ'
3382
3385
3383 # We don't implement compressstream because it is hackily handled elsewhere.
3386 # We don't implement compressstream because it is hackily handled elsewhere.
3384
3387
3385 def decompressorreader(self, fh):
3388 def decompressorreader(self, fh):
3386 def gen():
3389 def gen():
3387 # The input stream doesn't have the 'BZ' header. So add it back.
3390 # The input stream doesn't have the 'BZ' header. So add it back.
3388 d = bz2.BZ2Decompressor()
3391 d = bz2.BZ2Decompressor()
3389 d.decompress('BZ')
3392 d.decompress('BZ')
3390 for chunk in filechunkiter(fh):
3393 for chunk in filechunkiter(fh):
3391 yield d.decompress(chunk)
3394 yield d.decompress(chunk)
3392
3395
3393 return chunkbuffer(gen())
3396 return chunkbuffer(gen())
3394
3397
3395 compengines.register(_truncatedbz2engine())
3398 compengines.register(_truncatedbz2engine())
3396
3399
3397 class _noopengine(compressionengine):
3400 class _noopengine(compressionengine):
3398 def name(self):
3401 def name(self):
3399 return 'none'
3402 return 'none'
3400
3403
3401 def bundletype(self):
3404 def bundletype(self):
3402 return 'none', 'UN'
3405 return 'none', 'UN'
3403
3406
3404 # Clients always support uncompressed payloads. Servers don't because
3407 # Clients always support uncompressed payloads. Servers don't because
3405 # unless you are on a fast network, uncompressed payloads can easily
3408 # unless you are on a fast network, uncompressed payloads can easily
3406 # saturate your network pipe.
3409 # saturate your network pipe.
3407 def wireprotosupport(self):
3410 def wireprotosupport(self):
3408 return compewireprotosupport('none', 0, 10)
3411 return compewireprotosupport('none', 0, 10)
3409
3412
3410 # We don't implement revlogheader because it is handled specially
3413 # We don't implement revlogheader because it is handled specially
3411 # in the revlog class.
3414 # in the revlog class.
3412
3415
3413 def compressstream(self, it, opts=None):
3416 def compressstream(self, it, opts=None):
3414 return it
3417 return it
3415
3418
3416 def decompressorreader(self, fh):
3419 def decompressorreader(self, fh):
3417 return fh
3420 return fh
3418
3421
3419 class nooprevlogcompressor(object):
3422 class nooprevlogcompressor(object):
3420 def compress(self, data):
3423 def compress(self, data):
3421 return None
3424 return None
3422
3425
3423 def revlogcompressor(self, opts=None):
3426 def revlogcompressor(self, opts=None):
3424 return self.nooprevlogcompressor()
3427 return self.nooprevlogcompressor()
3425
3428
3426 compengines.register(_noopengine())
3429 compengines.register(_noopengine())
3427
3430
3428 class _zstdengine(compressionengine):
3431 class _zstdengine(compressionengine):
3429 def name(self):
3432 def name(self):
3430 return 'zstd'
3433 return 'zstd'
3431
3434
3432 @propertycache
3435 @propertycache
3433 def _module(self):
3436 def _module(self):
3434 # Not all installs have the zstd module available. So defer importing
3437 # Not all installs have the zstd module available. So defer importing
3435 # until first access.
3438 # until first access.
3436 try:
3439 try:
3437 from . import zstd
3440 from . import zstd
3438 # Force delayed import.
3441 # Force delayed import.
3439 zstd.__version__
3442 zstd.__version__
3440 return zstd
3443 return zstd
3441 except ImportError:
3444 except ImportError:
3442 return None
3445 return None
3443
3446
3444 def available(self):
3447 def available(self):
3445 return bool(self._module)
3448 return bool(self._module)
3446
3449
3447 def bundletype(self):
3450 def bundletype(self):
3448 return 'zstd', 'ZS'
3451 return 'zstd', 'ZS'
3449
3452
3450 def wireprotosupport(self):
3453 def wireprotosupport(self):
3451 return compewireprotosupport('zstd', 50, 50)
3454 return compewireprotosupport('zstd', 50, 50)
3452
3455
3453 def revlogheader(self):
3456 def revlogheader(self):
3454 return '\x28'
3457 return '\x28'
3455
3458
3456 def compressstream(self, it, opts=None):
3459 def compressstream(self, it, opts=None):
3457 opts = opts or {}
3460 opts = opts or {}
3458 # zstd level 3 is almost always significantly faster than zlib
3461 # zstd level 3 is almost always significantly faster than zlib
3459 # while providing no worse compression. It strikes a good balance
3462 # while providing no worse compression. It strikes a good balance
3460 # between speed and compression.
3463 # between speed and compression.
3461 level = opts.get('level', 3)
3464 level = opts.get('level', 3)
3462
3465
3463 zstd = self._module
3466 zstd = self._module
3464 z = zstd.ZstdCompressor(level=level).compressobj()
3467 z = zstd.ZstdCompressor(level=level).compressobj()
3465 for chunk in it:
3468 for chunk in it:
3466 data = z.compress(chunk)
3469 data = z.compress(chunk)
3467 if data:
3470 if data:
3468 yield data
3471 yield data
3469
3472
3470 yield z.flush()
3473 yield z.flush()
3471
3474
3472 def decompressorreader(self, fh):
3475 def decompressorreader(self, fh):
3473 zstd = self._module
3476 zstd = self._module
3474 dctx = zstd.ZstdDecompressor()
3477 dctx = zstd.ZstdDecompressor()
3475 return chunkbuffer(dctx.read_from(fh))
3478 return chunkbuffer(dctx.read_from(fh))
3476
3479
3477 class zstdrevlogcompressor(object):
3480 class zstdrevlogcompressor(object):
3478 def __init__(self, zstd, level=3):
3481 def __init__(self, zstd, level=3):
3479 # Writing the content size adds a few bytes to the output. However,
3482 # Writing the content size adds a few bytes to the output. However,
3480 # it allows decompression to be more optimal since we can
3483 # it allows decompression to be more optimal since we can
3481 # pre-allocate a buffer to hold the result.
3484 # pre-allocate a buffer to hold the result.
3482 self._cctx = zstd.ZstdCompressor(level=level,
3485 self._cctx = zstd.ZstdCompressor(level=level,
3483 write_content_size=True)
3486 write_content_size=True)
3484 self._dctx = zstd.ZstdDecompressor()
3487 self._dctx = zstd.ZstdDecompressor()
3485 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3488 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3486 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3489 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3487
3490
3488 def compress(self, data):
3491 def compress(self, data):
3489 insize = len(data)
3492 insize = len(data)
3490 # Caller handles empty input case.
3493 # Caller handles empty input case.
3491 assert insize > 0
3494 assert insize > 0
3492
3495
3493 if insize < 50:
3496 if insize < 50:
3494 return None
3497 return None
3495
3498
3496 elif insize <= 1000000:
3499 elif insize <= 1000000:
3497 compressed = self._cctx.compress(data)
3500 compressed = self._cctx.compress(data)
3498 if len(compressed) < insize:
3501 if len(compressed) < insize:
3499 return compressed
3502 return compressed
3500 return None
3503 return None
3501 else:
3504 else:
3502 z = self._cctx.compressobj()
3505 z = self._cctx.compressobj()
3503 chunks = []
3506 chunks = []
3504 pos = 0
3507 pos = 0
3505 while pos < insize:
3508 while pos < insize:
3506 pos2 = pos + self._compinsize
3509 pos2 = pos + self._compinsize
3507 chunk = z.compress(data[pos:pos2])
3510 chunk = z.compress(data[pos:pos2])
3508 if chunk:
3511 if chunk:
3509 chunks.append(chunk)
3512 chunks.append(chunk)
3510 pos = pos2
3513 pos = pos2
3511 chunks.append(z.flush())
3514 chunks.append(z.flush())
3512
3515
3513 if sum(map(len, chunks)) < insize:
3516 if sum(map(len, chunks)) < insize:
3514 return ''.join(chunks)
3517 return ''.join(chunks)
3515 return None
3518 return None
3516
3519
3517 def decompress(self, data):
3520 def decompress(self, data):
3518 insize = len(data)
3521 insize = len(data)
3519
3522
3520 try:
3523 try:
3521 # This was measured to be faster than other streaming
3524 # This was measured to be faster than other streaming
3522 # decompressors.
3525 # decompressors.
3523 dobj = self._dctx.decompressobj()
3526 dobj = self._dctx.decompressobj()
3524 chunks = []
3527 chunks = []
3525 pos = 0
3528 pos = 0
3526 while pos < insize:
3529 while pos < insize:
3527 pos2 = pos + self._decompinsize
3530 pos2 = pos + self._decompinsize
3528 chunk = dobj.decompress(data[pos:pos2])
3531 chunk = dobj.decompress(data[pos:pos2])
3529 if chunk:
3532 if chunk:
3530 chunks.append(chunk)
3533 chunks.append(chunk)
3531 pos = pos2
3534 pos = pos2
3532 # Frame should be exhausted, so no finish() API.
3535 # Frame should be exhausted, so no finish() API.
3533
3536
3534 return ''.join(chunks)
3537 return ''.join(chunks)
3535 except Exception as e:
3538 except Exception as e:
3536 raise error.RevlogError(_('revlog decompress error: %s') %
3539 raise error.RevlogError(_('revlog decompress error: %s') %
3537 str(e))
3540 str(e))
3538
3541
3539 def revlogcompressor(self, opts=None):
3542 def revlogcompressor(self, opts=None):
3540 opts = opts or {}
3543 opts = opts or {}
3541 return self.zstdrevlogcompressor(self._module,
3544 return self.zstdrevlogcompressor(self._module,
3542 level=opts.get('level', 3))
3545 level=opts.get('level', 3))
3543
3546
3544 compengines.register(_zstdengine())
3547 compengines.register(_zstdengine())
3545
3548
3546 # convenient shortcut
3549 # convenient shortcut
3547 dst = debugstacktrace
3550 dst = debugstacktrace
@@ -1,139 +1,138
1 $ cat << EOF >> $HGRCPATH
1 $ cat << EOF >> $HGRCPATH
2 > [format]
2 > [format]
3 > usegeneraldelta=yes
3 > usegeneraldelta=yes
4 > EOF
4 > EOF
5
5
6 $ hg init debugrevlog
6 $ hg init debugrevlog
7 $ cd debugrevlog
7 $ cd debugrevlog
8 $ echo a > a
8 $ echo a > a
9 $ hg ci -Am adda
9 $ hg ci -Am adda
10 adding a
10 adding a
11 $ hg debugrevlog -m
11 $ hg debugrevlog -m
12 format : 1
12 format : 1
13 flags : inline, generaldelta
13 flags : inline, generaldelta
14
14
15 revisions : 1
15 revisions : 1
16 merges : 0 ( 0.00%)
16 merges : 0 ( 0.00%)
17 normal : 1 (100.00%)
17 normal : 1 (100.00%)
18 revisions : 1
18 revisions : 1
19 full : 1 (100.00%)
19 full : 1 (100.00%)
20 deltas : 0 ( 0.00%)
20 deltas : 0 ( 0.00%)
21 revision size : 44
21 revision size : 44
22 full : 44 (100.00%)
22 full : 44 (100.00%)
23 deltas : 0 ( 0.00%)
23 deltas : 0 ( 0.00%)
24
24
25 chunks : 1
25 chunks : 1
26 0x75 (u) : 1 (100.00%)
26 0x75 (u) : 1 (100.00%)
27 chunks size : 44
27 chunks size : 44
28 0x75 (u) : 44 (100.00%)
28 0x75 (u) : 44 (100.00%)
29
29
30 avg chain length : 0
30 avg chain length : 0
31 max chain length : 0
31 max chain length : 0
32 compression ratio : 0
32 compression ratio : 0
33
33
34 uncompressed data size (min/max/avg) : 43 / 43 / 43
34 uncompressed data size (min/max/avg) : 43 / 43 / 43
35 full revision size (min/max/avg) : 44 / 44 / 44
35 full revision size (min/max/avg) : 44 / 44 / 44
36 delta size (min/max/avg) : 0 / 0 / 0
36 delta size (min/max/avg) : 0 / 0 / 0
37
37
38 Test debugindex, with and without the --debug flag
38 Test debugindex, with and without the --debug flag
39 $ hg debugindex a
39 $ hg debugindex a
40 rev offset length ..... linkrev nodeid p1 p2 (re)
40 rev offset length ..... linkrev nodeid p1 p2 (re)
41 0 0 3 .... 0 b789fdd96dc2 000000000000 000000000000 (re)
41 0 0 3 .... 0 b789fdd96dc2 000000000000 000000000000 (re)
42 $ hg --debug debugindex a
42 $ hg --debug debugindex a
43 rev offset length ..... linkrev nodeid p1 p2 (re)
43 rev offset length ..... linkrev nodeid p1 p2 (re)
44 0 0 3 .... 0 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 (re)
44 0 0 3 .... 0 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 (re)
45 $ hg debugindex -f 1 a
45 $ hg debugindex -f 1 a
46 rev flag offset length size ..... link p1 p2 nodeid (re)
46 rev flag offset length size ..... link p1 p2 nodeid (re)
47 0 0000 0 3 2 .... 0 -1 -1 b789fdd96dc2 (re)
47 0 0000 0 3 2 .... 0 -1 -1 b789fdd96dc2 (re)
48 $ hg --debug debugindex -f 1 a
48 $ hg --debug debugindex -f 1 a
49 rev flag offset length size ..... link p1 p2 nodeid (re)
49 rev flag offset length size ..... link p1 p2 nodeid (re)
50 0 0000 0 3 2 .... 0 -1 -1 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 (re)
50 0 0000 0 3 2 .... 0 -1 -1 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 (re)
51
51
52 debugdelta chain basic output
52 debugdelta chain basic output
53
53
54 $ hg debugdeltachain -m
54 $ hg debugdeltachain -m
55 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
55 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
56 0 1 1 -1 base 44 43 44 1.02326 44 0 0.00000
56 0 1 1 -1 base 44 43 44 1.02326 44 0 0.00000
57
57
58 $ hg debugdeltachain -m -T '{rev} {chainid} {chainlen}\n'
58 $ hg debugdeltachain -m -T '{rev} {chainid} {chainlen}\n'
59 0 1 1
59 0 1 1
60
60
61 $ hg debugdeltachain -m -Tjson
61 $ hg debugdeltachain -m -Tjson
62 [
62 [
63 {
63 {
64 "chainid": 1,
64 "chainid": 1,
65 "chainlen": 1,
65 "chainlen": 1,
66 "chainratio": 1.02325581395,
66 "chainratio": 1.02325581395,
67 "chainsize": 44,
67 "chainsize": 44,
68 "compsize": 44,
68 "compsize": 44,
69 "deltatype": "base",
69 "deltatype": "base",
70 "extradist": 0,
70 "extradist": 0,
71 "extraratio": 0.0,
71 "extraratio": 0.0,
72 "lindist": 44,
72 "lindist": 44,
73 "prevrev": -1,
73 "prevrev": -1,
74 "rev": 0,
74 "rev": 0,
75 "uncompsize": 43
75 "uncompsize": 43
76 }
76 }
77 ]
77 ]
78
78
79 Test max chain len
79 Test max chain len
80 $ cat >> $HGRCPATH << EOF
80 $ cat >> $HGRCPATH << EOF
81 > [format]
81 > [format]
82 > maxchainlen=4
82 > maxchainlen=4
83 > EOF
83 > EOF
84
84
85 $ printf "This test checks if maxchainlen config value is respected also it can serve as basic test for debugrevlog -d <file>.\n" >> a
85 $ printf "This test checks if maxchainlen config value is respected also it can serve as basic test for debugrevlog -d <file>.\n" >> a
86 $ hg ci -m a
86 $ hg ci -m a
87 $ printf "b\n" >> a
87 $ printf "b\n" >> a
88 $ hg ci -m a
88 $ hg ci -m a
89 $ printf "c\n" >> a
89 $ printf "c\n" >> a
90 $ hg ci -m a
90 $ hg ci -m a
91 $ printf "d\n" >> a
91 $ printf "d\n" >> a
92 $ hg ci -m a
92 $ hg ci -m a
93 $ printf "e\n" >> a
93 $ printf "e\n" >> a
94 $ hg ci -m a
94 $ hg ci -m a
95 $ printf "f\n" >> a
95 $ printf "f\n" >> a
96 $ hg ci -m a
96 $ hg ci -m a
97 $ printf 'g\n' >> a
97 $ printf 'g\n' >> a
98 $ hg ci -m a
98 $ hg ci -m a
99 $ printf 'h\n' >> a
99 $ printf 'h\n' >> a
100 $ hg ci -m a
100 $ hg ci -m a
101 $ hg debugrevlog -d a
101 $ hg debugrevlog -d a
102 # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
102 # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
103 0 -1 -1 0 ??? 0 0 0 0 ??? ???? ? 1 0 (glob)
103 0 -1 -1 0 ??? 0 0 0 0 ??? ???? ? 1 0 (glob)
104 1 0 -1 ??? ??? 0 0 0 0 ??? ???? ? 1 1 (glob)
104 1 0 -1 ??? ??? 0 0 0 0 ??? ???? ? 1 1 (glob)
105 2 1 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob)
105 2 1 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob)
106 3 2 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob)
106 3 2 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob)
107 4 3 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 4 (glob)
107 4 3 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 4 (glob)
108 5 4 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 0 (glob)
108 5 4 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 0 (glob)
109 6 5 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 1 (glob)
109 6 5 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 1 (glob)
110 7 6 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob)
110 7 6 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob)
111 8 7 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob)
111 8 7 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob)
112 $ cd ..
112 $ cd ..
113
113
114 Test internal debugstacktrace command
114 Test internal debugstacktrace command
115
115
116 $ cat > debugstacktrace.py << EOF
116 $ cat > debugstacktrace.py << EOF
117 > from mercurial.util import debugstacktrace, dst, sys
117 > from mercurial.util import debugstacktrace, dst, sys
118 > def f():
118 > def f():
119 > debugstacktrace(f=sys.stdout)
119 > debugstacktrace(f=sys.stdout)
120 > g()
120 > g()
121 > def g():
121 > def g():
122 > dst('hello from g\\n', skip=1)
122 > dst('hello from g\\n', skip=1)
123 > h()
123 > h()
124 > def h():
124 > def h():
125 > dst('hi ...\\nfrom h hidden in g', 1)
125 > dst('hi ...\\nfrom h hidden in g', 1, depth=2)
126 > f()
126 > f()
127 > EOF
127 > EOF
128 $ python debugstacktrace.py
128 $ python debugstacktrace.py
129 stacktrace at:
129 stacktrace at:
130 debugstacktrace.py:10 in * (glob)
130 debugstacktrace.py:10 in * (glob)
131 debugstacktrace.py:3 in f
131 debugstacktrace.py:3 in f
132 hello from g at:
132 hello from g at:
133 debugstacktrace.py:10 in * (glob)
133 debugstacktrace.py:10 in * (glob)
134 debugstacktrace.py:4 in f
134 debugstacktrace.py:4 in f
135 hi ...
135 hi ...
136 from h hidden in g at:
136 from h hidden in g at:
137 debugstacktrace.py:10 in * (glob)
137 debugstacktrace.py:4 in f
138 debugstacktrace.py:4 in f
138 debugstacktrace.py:7 in g
139 debugstacktrace.py:7 in g
General Comments 0
You need to be logged in to leave comments. Login now