##// END OF EJS Templates
util: always force line buffered stdout when stdout is a tty (BC)...
Simon Farnsworth -
r30876:3a4c0905 default
parent child Browse files
Show More
@@ -1,172 +1,163 b''
1 1 # pager.py - display output using a pager
2 2 #
3 3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 #
8 8 # To load the extension, add it to your configuration file:
9 9 #
10 10 # [extension]
11 11 # pager =
12 12 #
13 13 # Run 'hg help pager' to get info on configuration.
14 14
15 15 '''browse command output with an external pager
16 16
17 17 To set the pager that should be used, set the application variable::
18 18
19 19 [pager]
20 20 pager = less -FRX
21 21
22 22 If no pager is set, the pager extensions uses the environment variable
23 23 $PAGER. If neither pager.pager, nor $PAGER is set, no pager is used.
24 24
25 25 You can disable the pager for certain commands by adding them to the
26 26 pager.ignore list::
27 27
28 28 [pager]
29 29 ignore = version, help, update
30 30
31 31 You can also enable the pager only for certain commands using
32 32 pager.attend. Below is the default list of commands to be paged::
33 33
34 34 [pager]
35 35 attend = annotate, cat, diff, export, glog, log, qdiff
36 36
37 37 Setting pager.attend to an empty value will cause all commands to be
38 38 paged.
39 39
40 40 If pager.attend is present, pager.ignore will be ignored.
41 41
42 42 Lastly, you can enable and disable paging for individual commands with
43 43 the attend-<command> option. This setting takes precedence over
44 44 existing attend and ignore options and defaults::
45 45
46 46 [pager]
47 47 attend-cat = false
48 48
49 49 To ignore global commands like :hg:`version` or :hg:`help`, you have
50 50 to specify them in your user configuration file.
51 51
52 52 To control whether the pager is used at all for an individual command,
53 53 you can use --pager=<value>::
54 54
55 55 - use as needed: `auto`.
56 56 - require the pager: `yes` or `on`.
57 57 - suppress the pager: `no` or `off` (any unrecognized value
58 58 will also work).
59 59
60 60 '''
61 61 from __future__ import absolute_import
62 62
63 63 import atexit
64 64 import os
65 65 import signal
66 66 import subprocess
67 67 import sys
68 68
69 69 from mercurial.i18n import _
70 70 from mercurial import (
71 71 cmdutil,
72 72 commands,
73 73 dispatch,
74 74 encoding,
75 75 extensions,
76 76 util,
77 77 )
78 78
79 79 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
80 80 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
81 81 # be specifying the version(s) of Mercurial they are tested with, or
82 82 # leave the attribute unspecified.
83 83 testedwith = 'ships-with-hg-core'
84 84
85 85 def _runpager(ui, p):
86 86 pager = subprocess.Popen(p, shell=True, bufsize=-1,
87 87 close_fds=util.closefds, stdin=subprocess.PIPE,
88 88 stdout=util.stdout, stderr=util.stderr)
89 89
90 # back up original file objects and descriptors
91 olduifout = ui.fout
92 oldstdout = util.stdout
90 # back up original file descriptors
93 91 stdoutfd = os.dup(util.stdout.fileno())
94 92 stderrfd = os.dup(util.stderr.fileno())
95 93
96 # create new line-buffered stdout so that output can show up immediately
97 ui.fout = util.stdout = newstdout = os.fdopen(util.stdout.fileno(), 'wb', 1)
98 94 os.dup2(pager.stdin.fileno(), util.stdout.fileno())
99 95 if ui._isatty(util.stderr):
100 96 os.dup2(pager.stdin.fileno(), util.stderr.fileno())
101 97
102 98 @atexit.register
103 99 def killpager():
104 100 if util.safehasattr(signal, "SIGINT"):
105 101 signal.signal(signal.SIGINT, signal.SIG_IGN)
106 pager.stdin.close()
107 ui.fout = olduifout
108 util.stdout = oldstdout
109 # close new stdout while it's associated with pager; otherwise stdout
110 # fd would be closed when newstdout is deleted
111 newstdout.close()
112 # restore original fds: stdout is open again
102 # restore original fds, closing pager.stdin copies in the process
113 103 os.dup2(stdoutfd, util.stdout.fileno())
114 104 os.dup2(stderrfd, util.stderr.fileno())
105 pager.stdin.close()
115 106 pager.wait()
116 107
117 108 def uisetup(ui):
118 109 class pagerui(ui.__class__):
119 110 def _runpager(self, pagercmd):
120 111 _runpager(self, pagercmd)
121 112
122 113 ui.__class__ = pagerui
123 114
124 115 def pagecmd(orig, ui, options, cmd, cmdfunc):
125 116 p = ui.config("pager", "pager", encoding.environ.get("PAGER"))
126 117 usepager = False
127 118 always = util.parsebool(options['pager'])
128 119 auto = options['pager'] == 'auto'
129 120
130 121 if not p or '--debugger' in sys.argv or not ui.formatted():
131 122 pass
132 123 elif always:
133 124 usepager = True
134 125 elif not auto:
135 126 usepager = False
136 127 else:
137 128 attend = ui.configlist('pager', 'attend', attended)
138 129 ignore = ui.configlist('pager', 'ignore')
139 130 cmds, _ = cmdutil.findcmd(cmd, commands.table)
140 131
141 132 for cmd in cmds:
142 133 var = 'attend-%s' % cmd
143 134 if ui.config('pager', var):
144 135 usepager = ui.configbool('pager', var)
145 136 break
146 137 if (cmd in attend or
147 138 (cmd not in ignore and not attend)):
148 139 usepager = True
149 140 break
150 141
151 142 setattr(ui, 'pageractive', usepager)
152 143
153 144 if usepager:
154 145 ui.setconfig('ui', 'formatted', ui.formatted(), 'pager')
155 146 ui.setconfig('ui', 'interactive', False, 'pager')
156 147 ui._runpager(p)
157 148 return orig(ui, options, cmd, cmdfunc)
158 149
159 150 # Wrap dispatch._runcommand after color is loaded so color can see
160 151 # ui.pageractive. Otherwise, if we loaded first, color's wrapped
161 152 # dispatch._runcommand would run without having access to ui.pageractive.
162 153 def afterloaded(loaded):
163 154 extensions.wrapfunction(dispatch, '_runcommand', pagecmd)
164 155 extensions.afterloaded('color', afterloaded)
165 156
166 157 def extsetup(ui):
167 158 commands.globalopts.append(
168 159 ('', 'pager', 'auto',
169 160 _("when to paginate (boolean, always, auto, or never)"),
170 161 _('TYPE')))
171 162
172 163 attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff']
@@ -1,3545 +1,3551 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import collections
21 21 import datetime
22 22 import errno
23 23 import gc
24 24 import hashlib
25 25 import imp
26 26 import os
27 27 import platform as pyplatform
28 28 import re as remod
29 29 import shutil
30 30 import signal
31 31 import socket
32 32 import stat
33 33 import string
34 34 import subprocess
35 35 import sys
36 36 import tempfile
37 37 import textwrap
38 38 import time
39 39 import traceback
40 40 import zlib
41 41
42 42 from . import (
43 43 encoding,
44 44 error,
45 45 i18n,
46 46 osutil,
47 47 parsers,
48 48 pycompat,
49 49 )
50 50
51 51 empty = pycompat.empty
52 52 httplib = pycompat.httplib
53 53 httpserver = pycompat.httpserver
54 54 pickle = pycompat.pickle
55 55 queue = pycompat.queue
56 56 socketserver = pycompat.socketserver
57 57 stderr = pycompat.stderr
58 58 stdin = pycompat.stdin
59 59 stdout = pycompat.stdout
60 60 stringio = pycompat.stringio
61 61 urlerr = pycompat.urlerr
62 62 urlparse = pycompat.urlparse
63 63 urlreq = pycompat.urlreq
64 64 xmlrpclib = pycompat.xmlrpclib
65 65
66 def isatty(fp):
67 try:
68 return fp.isatty()
69 except AttributeError:
70 return False
71
72 # glibc determines buffering on first write to stdout - if we replace a TTY
73 # destined stdout with a pipe destined stdout (e.g. pager), we want line
74 # buffering
75 if isatty(stdout):
76 stdout = os.fdopen(stdout.fileno(), 'wb', 1)
77
66 78 if pycompat.osname == 'nt':
67 79 from . import windows as platform
68 stdout = platform.winstdout(pycompat.stdout)
80 stdout = platform.winstdout(stdout)
69 81 else:
70 82 from . import posix as platform
71 83
72 84 _ = i18n._
73 85
74 86 bindunixsocket = platform.bindunixsocket
75 87 cachestat = platform.cachestat
76 88 checkexec = platform.checkexec
77 89 checklink = platform.checklink
78 90 copymode = platform.copymode
79 91 executablepath = platform.executablepath
80 92 expandglobs = platform.expandglobs
81 93 explainexit = platform.explainexit
82 94 findexe = platform.findexe
83 95 gethgcmd = platform.gethgcmd
84 96 getuser = platform.getuser
85 97 getpid = os.getpid
86 98 groupmembers = platform.groupmembers
87 99 groupname = platform.groupname
88 100 hidewindow = platform.hidewindow
89 101 isexec = platform.isexec
90 102 isowner = platform.isowner
91 103 localpath = platform.localpath
92 104 lookupreg = platform.lookupreg
93 105 makedir = platform.makedir
94 106 nlinks = platform.nlinks
95 107 normpath = platform.normpath
96 108 normcase = platform.normcase
97 109 normcasespec = platform.normcasespec
98 110 normcasefallback = platform.normcasefallback
99 111 openhardlinks = platform.openhardlinks
100 112 oslink = platform.oslink
101 113 parsepatchoutput = platform.parsepatchoutput
102 114 pconvert = platform.pconvert
103 115 poll = platform.poll
104 116 popen = platform.popen
105 117 posixfile = platform.posixfile
106 118 quotecommand = platform.quotecommand
107 119 readpipe = platform.readpipe
108 120 rename = platform.rename
109 121 removedirs = platform.removedirs
110 122 samedevice = platform.samedevice
111 123 samefile = platform.samefile
112 124 samestat = platform.samestat
113 125 setbinary = platform.setbinary
114 126 setflags = platform.setflags
115 127 setsignalhandler = platform.setsignalhandler
116 128 shellquote = platform.shellquote
117 129 spawndetached = platform.spawndetached
118 130 split = platform.split
119 131 sshargs = platform.sshargs
120 132 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
121 133 statisexec = platform.statisexec
122 134 statislink = platform.statislink
123 135 testpid = platform.testpid
124 136 umask = platform.umask
125 137 unlink = platform.unlink
126 138 unlinkpath = platform.unlinkpath
127 139 username = platform.username
128 140
129 141 # Python compatibility
130 142
131 143 _notset = object()
132 144
133 145 # disable Python's problematic floating point timestamps (issue4836)
134 146 # (Python hypocritically says you shouldn't change this behavior in
135 147 # libraries, and sure enough Mercurial is not a library.)
136 148 os.stat_float_times(False)
137 149
138 150 def safehasattr(thing, attr):
139 151 return getattr(thing, attr, _notset) is not _notset
140 152
141 153 def bitsfrom(container):
142 154 bits = 0
143 155 for bit in container:
144 156 bits |= bit
145 157 return bits
146 158
147 159 DIGESTS = {
148 160 'md5': hashlib.md5,
149 161 'sha1': hashlib.sha1,
150 162 'sha512': hashlib.sha512,
151 163 }
152 164 # List of digest types from strongest to weakest
153 165 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
154 166
155 167 for k in DIGESTS_BY_STRENGTH:
156 168 assert k in DIGESTS
157 169
158 170 class digester(object):
159 171 """helper to compute digests.
160 172
161 173 This helper can be used to compute one or more digests given their name.
162 174
163 175 >>> d = digester(['md5', 'sha1'])
164 176 >>> d.update('foo')
165 177 >>> [k for k in sorted(d)]
166 178 ['md5', 'sha1']
167 179 >>> d['md5']
168 180 'acbd18db4cc2f85cedef654fccc4a4d8'
169 181 >>> d['sha1']
170 182 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
171 183 >>> digester.preferred(['md5', 'sha1'])
172 184 'sha1'
173 185 """
174 186
175 187 def __init__(self, digests, s=''):
176 188 self._hashes = {}
177 189 for k in digests:
178 190 if k not in DIGESTS:
179 191 raise Abort(_('unknown digest type: %s') % k)
180 192 self._hashes[k] = DIGESTS[k]()
181 193 if s:
182 194 self.update(s)
183 195
184 196 def update(self, data):
185 197 for h in self._hashes.values():
186 198 h.update(data)
187 199
188 200 def __getitem__(self, key):
189 201 if key not in DIGESTS:
190 202 raise Abort(_('unknown digest type: %s') % k)
191 203 return self._hashes[key].hexdigest()
192 204
193 205 def __iter__(self):
194 206 return iter(self._hashes)
195 207
196 208 @staticmethod
197 209 def preferred(supported):
198 210 """returns the strongest digest type in both supported and DIGESTS."""
199 211
200 212 for k in DIGESTS_BY_STRENGTH:
201 213 if k in supported:
202 214 return k
203 215 return None
204 216
205 217 class digestchecker(object):
206 218 """file handle wrapper that additionally checks content against a given
207 219 size and digests.
208 220
209 221 d = digestchecker(fh, size, {'md5': '...'})
210 222
211 223 When multiple digests are given, all of them are validated.
212 224 """
213 225
214 226 def __init__(self, fh, size, digests):
215 227 self._fh = fh
216 228 self._size = size
217 229 self._got = 0
218 230 self._digests = dict(digests)
219 231 self._digester = digester(self._digests.keys())
220 232
221 233 def read(self, length=-1):
222 234 content = self._fh.read(length)
223 235 self._digester.update(content)
224 236 self._got += len(content)
225 237 return content
226 238
227 239 def validate(self):
228 240 if self._size != self._got:
229 241 raise Abort(_('size mismatch: expected %d, got %d') %
230 242 (self._size, self._got))
231 243 for k, v in self._digests.items():
232 244 if v != self._digester[k]:
233 245 # i18n: first parameter is a digest name
234 246 raise Abort(_('%s mismatch: expected %s, got %s') %
235 247 (k, v, self._digester[k]))
236 248
237 249 try:
238 250 buffer = buffer
239 251 except NameError:
240 252 if not pycompat.ispy3:
241 253 def buffer(sliceable, offset=0, length=None):
242 254 if length is not None:
243 255 return sliceable[offset:offset + length]
244 256 return sliceable[offset:]
245 257 else:
246 258 def buffer(sliceable, offset=0, length=None):
247 259 if length is not None:
248 260 return memoryview(sliceable)[offset:offset + length]
249 261 return memoryview(sliceable)[offset:]
250 262
251 263 closefds = pycompat.osname == 'posix'
252 264
253 265 _chunksize = 4096
254 266
255 267 class bufferedinputpipe(object):
256 268 """a manually buffered input pipe
257 269
258 270 Python will not let us use buffered IO and lazy reading with 'polling' at
259 271 the same time. We cannot probe the buffer state and select will not detect
260 272 that data are ready to read if they are already buffered.
261 273
262 274 This class let us work around that by implementing its own buffering
263 275 (allowing efficient readline) while offering a way to know if the buffer is
264 276 empty from the output (allowing collaboration of the buffer with polling).
265 277
266 278 This class lives in the 'util' module because it makes use of the 'os'
267 279 module from the python stdlib.
268 280 """
269 281
270 282 def __init__(self, input):
271 283 self._input = input
272 284 self._buffer = []
273 285 self._eof = False
274 286 self._lenbuf = 0
275 287
276 288 @property
277 289 def hasbuffer(self):
278 290 """True is any data is currently buffered
279 291
280 292 This will be used externally a pre-step for polling IO. If there is
281 293 already data then no polling should be set in place."""
282 294 return bool(self._buffer)
283 295
284 296 @property
285 297 def closed(self):
286 298 return self._input.closed
287 299
288 300 def fileno(self):
289 301 return self._input.fileno()
290 302
291 303 def close(self):
292 304 return self._input.close()
293 305
294 306 def read(self, size):
295 307 while (not self._eof) and (self._lenbuf < size):
296 308 self._fillbuffer()
297 309 return self._frombuffer(size)
298 310
299 311 def readline(self, *args, **kwargs):
300 312 if 1 < len(self._buffer):
301 313 # this should not happen because both read and readline end with a
302 314 # _frombuffer call that collapse it.
303 315 self._buffer = [''.join(self._buffer)]
304 316 self._lenbuf = len(self._buffer[0])
305 317 lfi = -1
306 318 if self._buffer:
307 319 lfi = self._buffer[-1].find('\n')
308 320 while (not self._eof) and lfi < 0:
309 321 self._fillbuffer()
310 322 if self._buffer:
311 323 lfi = self._buffer[-1].find('\n')
312 324 size = lfi + 1
313 325 if lfi < 0: # end of file
314 326 size = self._lenbuf
315 327 elif 1 < len(self._buffer):
316 328 # we need to take previous chunks into account
317 329 size += self._lenbuf - len(self._buffer[-1])
318 330 return self._frombuffer(size)
319 331
320 332 def _frombuffer(self, size):
321 333 """return at most 'size' data from the buffer
322 334
323 335 The data are removed from the buffer."""
324 336 if size == 0 or not self._buffer:
325 337 return ''
326 338 buf = self._buffer[0]
327 339 if 1 < len(self._buffer):
328 340 buf = ''.join(self._buffer)
329 341
330 342 data = buf[:size]
331 343 buf = buf[len(data):]
332 344 if buf:
333 345 self._buffer = [buf]
334 346 self._lenbuf = len(buf)
335 347 else:
336 348 self._buffer = []
337 349 self._lenbuf = 0
338 350 return data
339 351
340 352 def _fillbuffer(self):
341 353 """read data to the buffer"""
342 354 data = os.read(self._input.fileno(), _chunksize)
343 355 if not data:
344 356 self._eof = True
345 357 else:
346 358 self._lenbuf += len(data)
347 359 self._buffer.append(data)
348 360
349 361 def popen2(cmd, env=None, newlines=False):
350 362 # Setting bufsize to -1 lets the system decide the buffer size.
351 363 # The default for bufsize is 0, meaning unbuffered. This leads to
352 364 # poor performance on Mac OS X: http://bugs.python.org/issue4194
353 365 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
354 366 close_fds=closefds,
355 367 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
356 368 universal_newlines=newlines,
357 369 env=env)
358 370 return p.stdin, p.stdout
359 371
360 372 def popen3(cmd, env=None, newlines=False):
361 373 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
362 374 return stdin, stdout, stderr
363 375
364 376 def popen4(cmd, env=None, newlines=False, bufsize=-1):
365 377 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
366 378 close_fds=closefds,
367 379 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
368 380 stderr=subprocess.PIPE,
369 381 universal_newlines=newlines,
370 382 env=env)
371 383 return p.stdin, p.stdout, p.stderr, p
372 384
373 385 def version():
374 386 """Return version information if available."""
375 387 try:
376 388 from . import __version__
377 389 return __version__.version
378 390 except ImportError:
379 391 return 'unknown'
380 392
381 393 def versiontuple(v=None, n=4):
382 394 """Parses a Mercurial version string into an N-tuple.
383 395
384 396 The version string to be parsed is specified with the ``v`` argument.
385 397 If it isn't defined, the current Mercurial version string will be parsed.
386 398
387 399 ``n`` can be 2, 3, or 4. Here is how some version strings map to
388 400 returned values:
389 401
390 402 >>> v = '3.6.1+190-df9b73d2d444'
391 403 >>> versiontuple(v, 2)
392 404 (3, 6)
393 405 >>> versiontuple(v, 3)
394 406 (3, 6, 1)
395 407 >>> versiontuple(v, 4)
396 408 (3, 6, 1, '190-df9b73d2d444')
397 409
398 410 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
399 411 (3, 6, 1, '190-df9b73d2d444+20151118')
400 412
401 413 >>> v = '3.6'
402 414 >>> versiontuple(v, 2)
403 415 (3, 6)
404 416 >>> versiontuple(v, 3)
405 417 (3, 6, None)
406 418 >>> versiontuple(v, 4)
407 419 (3, 6, None, None)
408 420
409 421 >>> v = '3.9-rc'
410 422 >>> versiontuple(v, 2)
411 423 (3, 9)
412 424 >>> versiontuple(v, 3)
413 425 (3, 9, None)
414 426 >>> versiontuple(v, 4)
415 427 (3, 9, None, 'rc')
416 428
417 429 >>> v = '3.9-rc+2-02a8fea4289b'
418 430 >>> versiontuple(v, 2)
419 431 (3, 9)
420 432 >>> versiontuple(v, 3)
421 433 (3, 9, None)
422 434 >>> versiontuple(v, 4)
423 435 (3, 9, None, 'rc+2-02a8fea4289b')
424 436 """
425 437 if not v:
426 438 v = version()
427 439 parts = remod.split('[\+-]', v, 1)
428 440 if len(parts) == 1:
429 441 vparts, extra = parts[0], None
430 442 else:
431 443 vparts, extra = parts
432 444
433 445 vints = []
434 446 for i in vparts.split('.'):
435 447 try:
436 448 vints.append(int(i))
437 449 except ValueError:
438 450 break
439 451 # (3, 6) -> (3, 6, None)
440 452 while len(vints) < 3:
441 453 vints.append(None)
442 454
443 455 if n == 2:
444 456 return (vints[0], vints[1])
445 457 if n == 3:
446 458 return (vints[0], vints[1], vints[2])
447 459 if n == 4:
448 460 return (vints[0], vints[1], vints[2], extra)
449 461
450 462 # used by parsedate
451 463 defaultdateformats = (
452 464 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
453 465 '%Y-%m-%dT%H:%M', # without seconds
454 466 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
455 467 '%Y-%m-%dT%H%M', # without seconds
456 468 '%Y-%m-%d %H:%M:%S', # our common legal variant
457 469 '%Y-%m-%d %H:%M', # without seconds
458 470 '%Y-%m-%d %H%M%S', # without :
459 471 '%Y-%m-%d %H%M', # without seconds
460 472 '%Y-%m-%d %I:%M:%S%p',
461 473 '%Y-%m-%d %H:%M',
462 474 '%Y-%m-%d %I:%M%p',
463 475 '%Y-%m-%d',
464 476 '%m-%d',
465 477 '%m/%d',
466 478 '%m/%d/%y',
467 479 '%m/%d/%Y',
468 480 '%a %b %d %H:%M:%S %Y',
469 481 '%a %b %d %I:%M:%S%p %Y',
470 482 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
471 483 '%b %d %H:%M:%S %Y',
472 484 '%b %d %I:%M:%S%p %Y',
473 485 '%b %d %H:%M:%S',
474 486 '%b %d %I:%M:%S%p',
475 487 '%b %d %H:%M',
476 488 '%b %d %I:%M%p',
477 489 '%b %d %Y',
478 490 '%b %d',
479 491 '%H:%M:%S',
480 492 '%I:%M:%S%p',
481 493 '%H:%M',
482 494 '%I:%M%p',
483 495 )
484 496
485 497 extendeddateformats = defaultdateformats + (
486 498 "%Y",
487 499 "%Y-%m",
488 500 "%b",
489 501 "%b %Y",
490 502 )
491 503
492 504 def cachefunc(func):
493 505 '''cache the result of function calls'''
494 506 # XXX doesn't handle keywords args
495 507 if func.__code__.co_argcount == 0:
496 508 cache = []
497 509 def f():
498 510 if len(cache) == 0:
499 511 cache.append(func())
500 512 return cache[0]
501 513 return f
502 514 cache = {}
503 515 if func.__code__.co_argcount == 1:
504 516 # we gain a small amount of time because
505 517 # we don't need to pack/unpack the list
506 518 def f(arg):
507 519 if arg not in cache:
508 520 cache[arg] = func(arg)
509 521 return cache[arg]
510 522 else:
511 523 def f(*args):
512 524 if args not in cache:
513 525 cache[args] = func(*args)
514 526 return cache[args]
515 527
516 528 return f
517 529
518 530 class sortdict(dict):
519 531 '''a simple sorted dictionary'''
520 532 def __init__(self, data=None):
521 533 self._list = []
522 534 if data:
523 535 self.update(data)
524 536 def copy(self):
525 537 return sortdict(self)
526 538 def __setitem__(self, key, val):
527 539 if key in self:
528 540 self._list.remove(key)
529 541 self._list.append(key)
530 542 dict.__setitem__(self, key, val)
531 543 def __iter__(self):
532 544 return self._list.__iter__()
533 545 def update(self, src):
534 546 if isinstance(src, dict):
535 547 src = src.iteritems()
536 548 for k, v in src:
537 549 self[k] = v
538 550 def clear(self):
539 551 dict.clear(self)
540 552 self._list = []
541 553 def items(self):
542 554 return [(k, self[k]) for k in self._list]
543 555 def __delitem__(self, key):
544 556 dict.__delitem__(self, key)
545 557 self._list.remove(key)
546 558 def pop(self, key, *args, **kwargs):
547 559 dict.pop(self, key, *args, **kwargs)
548 560 try:
549 561 self._list.remove(key)
550 562 except ValueError:
551 563 pass
552 564 def keys(self):
553 565 return self._list[:]
554 566 def iterkeys(self):
555 567 return self._list.__iter__()
556 568 def iteritems(self):
557 569 for k in self._list:
558 570 yield k, self[k]
559 571 def insert(self, index, key, val):
560 572 self._list.insert(index, key)
561 573 dict.__setitem__(self, key, val)
562 574 def __repr__(self):
563 575 if not self:
564 576 return '%s()' % self.__class__.__name__
565 577 return '%s(%r)' % (self.__class__.__name__, self.items())
566 578
567 579 class _lrucachenode(object):
568 580 """A node in a doubly linked list.
569 581
570 582 Holds a reference to nodes on either side as well as a key-value
571 583 pair for the dictionary entry.
572 584 """
573 585 __slots__ = (u'next', u'prev', u'key', u'value')
574 586
575 587 def __init__(self):
576 588 self.next = None
577 589 self.prev = None
578 590
579 591 self.key = _notset
580 592 self.value = None
581 593
582 594 def markempty(self):
583 595 """Mark the node as emptied."""
584 596 self.key = _notset
585 597
586 598 class lrucachedict(object):
587 599 """Dict that caches most recent accesses and sets.
588 600
589 601 The dict consists of an actual backing dict - indexed by original
590 602 key - and a doubly linked circular list defining the order of entries in
591 603 the cache.
592 604
593 605 The head node is the newest entry in the cache. If the cache is full,
594 606 we recycle head.prev and make it the new head. Cache accesses result in
595 607 the node being moved to before the existing head and being marked as the
596 608 new head node.
597 609 """
598 610 def __init__(self, max):
599 611 self._cache = {}
600 612
601 613 self._head = head = _lrucachenode()
602 614 head.prev = head
603 615 head.next = head
604 616 self._size = 1
605 617 self._capacity = max
606 618
607 619 def __len__(self):
608 620 return len(self._cache)
609 621
610 622 def __contains__(self, k):
611 623 return k in self._cache
612 624
613 625 def __iter__(self):
614 626 # We don't have to iterate in cache order, but why not.
615 627 n = self._head
616 628 for i in range(len(self._cache)):
617 629 yield n.key
618 630 n = n.next
619 631
620 632 def __getitem__(self, k):
621 633 node = self._cache[k]
622 634 self._movetohead(node)
623 635 return node.value
624 636
625 637 def __setitem__(self, k, v):
626 638 node = self._cache.get(k)
627 639 # Replace existing value and mark as newest.
628 640 if node is not None:
629 641 node.value = v
630 642 self._movetohead(node)
631 643 return
632 644
633 645 if self._size < self._capacity:
634 646 node = self._addcapacity()
635 647 else:
636 648 # Grab the last/oldest item.
637 649 node = self._head.prev
638 650
639 651 # At capacity. Kill the old entry.
640 652 if node.key is not _notset:
641 653 del self._cache[node.key]
642 654
643 655 node.key = k
644 656 node.value = v
645 657 self._cache[k] = node
646 658 # And mark it as newest entry. No need to adjust order since it
647 659 # is already self._head.prev.
648 660 self._head = node
649 661
650 662 def __delitem__(self, k):
651 663 node = self._cache.pop(k)
652 664 node.markempty()
653 665
654 666 # Temporarily mark as newest item before re-adjusting head to make
655 667 # this node the oldest item.
656 668 self._movetohead(node)
657 669 self._head = node.next
658 670
659 671 # Additional dict methods.
660 672
661 673 def get(self, k, default=None):
662 674 try:
663 675 return self._cache[k].value
664 676 except KeyError:
665 677 return default
666 678
667 679 def clear(self):
668 680 n = self._head
669 681 while n.key is not _notset:
670 682 n.markempty()
671 683 n = n.next
672 684
673 685 self._cache.clear()
674 686
675 687 def copy(self):
676 688 result = lrucachedict(self._capacity)
677 689 n = self._head.prev
678 690 # Iterate in oldest-to-newest order, so the copy has the right ordering
679 691 for i in range(len(self._cache)):
680 692 result[n.key] = n.value
681 693 n = n.prev
682 694 return result
683 695
684 696 def _movetohead(self, node):
685 697 """Mark a node as the newest, making it the new head.
686 698
687 699 When a node is accessed, it becomes the freshest entry in the LRU
688 700 list, which is denoted by self._head.
689 701
690 702 Visually, let's make ``N`` the new head node (* denotes head):
691 703
692 704 previous/oldest <-> head <-> next/next newest
693 705
694 706 ----<->--- A* ---<->-----
695 707 | |
696 708 E <-> D <-> N <-> C <-> B
697 709
698 710 To:
699 711
700 712 ----<->--- N* ---<->-----
701 713 | |
702 714 E <-> D <-> C <-> B <-> A
703 715
704 716 This requires the following moves:
705 717
706 718 C.next = D (node.prev.next = node.next)
707 719 D.prev = C (node.next.prev = node.prev)
708 720 E.next = N (head.prev.next = node)
709 721 N.prev = E (node.prev = head.prev)
710 722 N.next = A (node.next = head)
711 723 A.prev = N (head.prev = node)
712 724 """
713 725 head = self._head
714 726 # C.next = D
715 727 node.prev.next = node.next
716 728 # D.prev = C
717 729 node.next.prev = node.prev
718 730 # N.prev = E
719 731 node.prev = head.prev
720 732 # N.next = A
721 733 # It is tempting to do just "head" here, however if node is
722 734 # adjacent to head, this will do bad things.
723 735 node.next = head.prev.next
724 736 # E.next = N
725 737 node.next.prev = node
726 738 # A.prev = N
727 739 node.prev.next = node
728 740
729 741 self._head = node
730 742
731 743 def _addcapacity(self):
732 744 """Add a node to the circular linked list.
733 745
734 746 The new node is inserted before the head node.
735 747 """
736 748 head = self._head
737 749 node = _lrucachenode()
738 750 head.prev.next = node
739 751 node.prev = head.prev
740 752 node.next = head
741 753 head.prev = node
742 754 self._size += 1
743 755 return node
744 756
745 757 def lrucachefunc(func):
746 758 '''cache most recent results of function calls'''
747 759 cache = {}
748 760 order = collections.deque()
749 761 if func.__code__.co_argcount == 1:
750 762 def f(arg):
751 763 if arg not in cache:
752 764 if len(cache) > 20:
753 765 del cache[order.popleft()]
754 766 cache[arg] = func(arg)
755 767 else:
756 768 order.remove(arg)
757 769 order.append(arg)
758 770 return cache[arg]
759 771 else:
760 772 def f(*args):
761 773 if args not in cache:
762 774 if len(cache) > 20:
763 775 del cache[order.popleft()]
764 776 cache[args] = func(*args)
765 777 else:
766 778 order.remove(args)
767 779 order.append(args)
768 780 return cache[args]
769 781
770 782 return f
771 783
772 784 class propertycache(object):
773 785 def __init__(self, func):
774 786 self.func = func
775 787 self.name = func.__name__
776 788 def __get__(self, obj, type=None):
777 789 result = self.func(obj)
778 790 self.cachevalue(obj, result)
779 791 return result
780 792
781 793 def cachevalue(self, obj, value):
782 794 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
783 795 obj.__dict__[self.name] = value
784 796
785 797 def pipefilter(s, cmd):
786 798 '''filter string S through command CMD, returning its output'''
787 799 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
788 800 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
789 801 pout, perr = p.communicate(s)
790 802 return pout
791 803
792 804 def tempfilter(s, cmd):
793 805 '''filter string S through a pair of temporary files with CMD.
794 806 CMD is used as a template to create the real command to be run,
795 807 with the strings INFILE and OUTFILE replaced by the real names of
796 808 the temporary files generated.'''
797 809 inname, outname = None, None
798 810 try:
799 811 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
800 812 fp = os.fdopen(infd, 'wb')
801 813 fp.write(s)
802 814 fp.close()
803 815 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
804 816 os.close(outfd)
805 817 cmd = cmd.replace('INFILE', inname)
806 818 cmd = cmd.replace('OUTFILE', outname)
807 819 code = os.system(cmd)
808 820 if pycompat.sysplatform == 'OpenVMS' and code & 1:
809 821 code = 0
810 822 if code:
811 823 raise Abort(_("command '%s' failed: %s") %
812 824 (cmd, explainexit(code)))
813 825 return readfile(outname)
814 826 finally:
815 827 try:
816 828 if inname:
817 829 os.unlink(inname)
818 830 except OSError:
819 831 pass
820 832 try:
821 833 if outname:
822 834 os.unlink(outname)
823 835 except OSError:
824 836 pass
825 837
826 838 filtertable = {
827 839 'tempfile:': tempfilter,
828 840 'pipe:': pipefilter,
829 841 }
830 842
831 843 def filter(s, cmd):
832 844 "filter a string through a command that transforms its input to its output"
833 845 for name, fn in filtertable.iteritems():
834 846 if cmd.startswith(name):
835 847 return fn(s, cmd[len(name):].lstrip())
836 848 return pipefilter(s, cmd)
837 849
838 850 def binary(s):
839 851 """return true if a string is binary data"""
840 852 return bool(s and '\0' in s)
841 853
842 854 def increasingchunks(source, min=1024, max=65536):
843 855 '''return no less than min bytes per chunk while data remains,
844 856 doubling min after each chunk until it reaches max'''
845 857 def log2(x):
846 858 if not x:
847 859 return 0
848 860 i = 0
849 861 while x:
850 862 x >>= 1
851 863 i += 1
852 864 return i - 1
853 865
854 866 buf = []
855 867 blen = 0
856 868 for chunk in source:
857 869 buf.append(chunk)
858 870 blen += len(chunk)
859 871 if blen >= min:
860 872 if min < max:
861 873 min = min << 1
862 874 nmin = 1 << log2(blen)
863 875 if nmin > min:
864 876 min = nmin
865 877 if min > max:
866 878 min = max
867 879 yield ''.join(buf)
868 880 blen = 0
869 881 buf = []
870 882 if buf:
871 883 yield ''.join(buf)
872 884
873 885 Abort = error.Abort
874 886
875 887 def always(fn):
876 888 return True
877 889
878 890 def never(fn):
879 891 return False
880 892
881 893 def nogc(func):
882 894 """disable garbage collector
883 895
884 896 Python's garbage collector triggers a GC each time a certain number of
885 897 container objects (the number being defined by gc.get_threshold()) are
886 898 allocated even when marked not to be tracked by the collector. Tracking has
887 899 no effect on when GCs are triggered, only on what objects the GC looks
888 900 into. As a workaround, disable GC while building complex (huge)
889 901 containers.
890 902
891 903 This garbage collector issue have been fixed in 2.7.
892 904 """
893 905 if sys.version_info >= (2, 7):
894 906 return func
895 907 def wrapper(*args, **kwargs):
896 908 gcenabled = gc.isenabled()
897 909 gc.disable()
898 910 try:
899 911 return func(*args, **kwargs)
900 912 finally:
901 913 if gcenabled:
902 914 gc.enable()
903 915 return wrapper
904 916
905 917 def pathto(root, n1, n2):
906 918 '''return the relative path from one place to another.
907 919 root should use os.sep to separate directories
908 920 n1 should use os.sep to separate directories
909 921 n2 should use "/" to separate directories
910 922 returns an os.sep-separated path.
911 923
912 924 If n1 is a relative path, it's assumed it's
913 925 relative to root.
914 926 n2 should always be relative to root.
915 927 '''
916 928 if not n1:
917 929 return localpath(n2)
918 930 if os.path.isabs(n1):
919 931 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
920 932 return os.path.join(root, localpath(n2))
921 933 n2 = '/'.join((pconvert(root), n2))
922 934 a, b = splitpath(n1), n2.split('/')
923 935 a.reverse()
924 936 b.reverse()
925 937 while a and b and a[-1] == b[-1]:
926 938 a.pop()
927 939 b.pop()
928 940 b.reverse()
929 941 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
930 942
931 943 def mainfrozen():
932 944 """return True if we are a frozen executable.
933 945
934 946 The code supports py2exe (most common, Windows only) and tools/freeze
935 947 (portable, not much used).
936 948 """
937 949 return (safehasattr(sys, "frozen") or # new py2exe
938 950 safehasattr(sys, "importers") or # old py2exe
939 951 imp.is_frozen(u"__main__")) # tools/freeze
940 952
941 953 # the location of data files matching the source code
942 954 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
943 955 # executable version (py2exe) doesn't support __file__
944 956 datapath = os.path.dirname(pycompat.sysexecutable)
945 957 else:
946 958 datapath = os.path.dirname(__file__)
947 959
948 960 if not isinstance(datapath, bytes):
949 961 datapath = pycompat.fsencode(datapath)
950 962
951 963 i18n.setdatapath(datapath)
952 964
953 965 _hgexecutable = None
954 966
955 967 def hgexecutable():
956 968 """return location of the 'hg' executable.
957 969
958 970 Defaults to $HG or 'hg' in the search path.
959 971 """
960 972 if _hgexecutable is None:
961 973 hg = encoding.environ.get('HG')
962 974 mainmod = sys.modules['__main__']
963 975 if hg:
964 976 _sethgexecutable(hg)
965 977 elif mainfrozen():
966 978 if getattr(sys, 'frozen', None) == 'macosx_app':
967 979 # Env variable set by py2app
968 980 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
969 981 else:
970 982 _sethgexecutable(pycompat.sysexecutable)
971 983 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
972 984 _sethgexecutable(mainmod.__file__)
973 985 else:
974 986 exe = findexe('hg') or os.path.basename(sys.argv[0])
975 987 _sethgexecutable(exe)
976 988 return _hgexecutable
977 989
978 990 def _sethgexecutable(path):
979 991 """set location of the 'hg' executable"""
980 992 global _hgexecutable
981 993 _hgexecutable = path
982 994
983 995 def _isstdout(f):
984 996 fileno = getattr(f, 'fileno', None)
985 997 return fileno and fileno() == sys.__stdout__.fileno()
986 998
987 999 def shellenviron(environ=None):
988 1000 """return environ with optional override, useful for shelling out"""
989 1001 def py2shell(val):
990 1002 'convert python object into string that is useful to shell'
991 1003 if val is None or val is False:
992 1004 return '0'
993 1005 if val is True:
994 1006 return '1'
995 1007 return str(val)
996 1008 env = dict(encoding.environ)
997 1009 if environ:
998 1010 env.update((k, py2shell(v)) for k, v in environ.iteritems())
999 1011 env['HG'] = hgexecutable()
1000 1012 return env
1001 1013
1002 1014 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
1003 1015 '''enhanced shell command execution.
1004 1016 run with environment maybe modified, maybe in different dir.
1005 1017
1006 1018 if command fails and onerr is None, return status, else raise onerr
1007 1019 object as exception.
1008 1020
1009 1021 if out is specified, it is assumed to be a file-like object that has a
1010 1022 write() method. stdout and stderr will be redirected to out.'''
1011 1023 try:
1012 1024 stdout.flush()
1013 1025 except Exception:
1014 1026 pass
1015 1027 origcmd = cmd
1016 1028 cmd = quotecommand(cmd)
1017 1029 if pycompat.sysplatform == 'plan9' and (sys.version_info[0] == 2
1018 1030 and sys.version_info[1] < 7):
1019 1031 # subprocess kludge to work around issues in half-baked Python
1020 1032 # ports, notably bichued/python:
1021 1033 if not cwd is None:
1022 1034 os.chdir(cwd)
1023 1035 rc = os.system(cmd)
1024 1036 else:
1025 1037 env = shellenviron(environ)
1026 1038 if out is None or _isstdout(out):
1027 1039 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1028 1040 env=env, cwd=cwd)
1029 1041 else:
1030 1042 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1031 1043 env=env, cwd=cwd, stdout=subprocess.PIPE,
1032 1044 stderr=subprocess.STDOUT)
1033 1045 for line in iter(proc.stdout.readline, ''):
1034 1046 out.write(line)
1035 1047 proc.wait()
1036 1048 rc = proc.returncode
1037 1049 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1038 1050 rc = 0
1039 1051 if rc and onerr:
1040 1052 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1041 1053 explainexit(rc)[0])
1042 1054 if errprefix:
1043 1055 errmsg = '%s: %s' % (errprefix, errmsg)
1044 1056 raise onerr(errmsg)
1045 1057 return rc
1046 1058
1047 1059 def checksignature(func):
1048 1060 '''wrap a function with code to check for calling errors'''
1049 1061 def check(*args, **kwargs):
1050 1062 try:
1051 1063 return func(*args, **kwargs)
1052 1064 except TypeError:
1053 1065 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1054 1066 raise error.SignatureError
1055 1067 raise
1056 1068
1057 1069 return check
1058 1070
1059 1071 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1060 1072 '''copy a file, preserving mode and optionally other stat info like
1061 1073 atime/mtime
1062 1074
1063 1075 checkambig argument is used with filestat, and is useful only if
1064 1076 destination file is guarded by any lock (e.g. repo.lock or
1065 1077 repo.wlock).
1066 1078
1067 1079 copystat and checkambig should be exclusive.
1068 1080 '''
1069 1081 assert not (copystat and checkambig)
1070 1082 oldstat = None
1071 1083 if os.path.lexists(dest):
1072 1084 if checkambig:
1073 1085 oldstat = checkambig and filestat(dest)
1074 1086 unlink(dest)
1075 1087 # hardlinks are problematic on CIFS, quietly ignore this flag
1076 1088 # until we find a way to work around it cleanly (issue4546)
1077 1089 if False and hardlink:
1078 1090 try:
1079 1091 oslink(src, dest)
1080 1092 return
1081 1093 except (IOError, OSError):
1082 1094 pass # fall back to normal copy
1083 1095 if os.path.islink(src):
1084 1096 os.symlink(os.readlink(src), dest)
1085 1097 # copytime is ignored for symlinks, but in general copytime isn't needed
1086 1098 # for them anyway
1087 1099 else:
1088 1100 try:
1089 1101 shutil.copyfile(src, dest)
1090 1102 if copystat:
1091 1103 # copystat also copies mode
1092 1104 shutil.copystat(src, dest)
1093 1105 else:
1094 1106 shutil.copymode(src, dest)
1095 1107 if oldstat and oldstat.stat:
1096 1108 newstat = filestat(dest)
1097 1109 if newstat.isambig(oldstat):
1098 1110 # stat of copied file is ambiguous to original one
1099 1111 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1100 1112 os.utime(dest, (advanced, advanced))
1101 1113 except shutil.Error as inst:
1102 1114 raise Abort(str(inst))
1103 1115
1104 1116 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1105 1117 """Copy a directory tree using hardlinks if possible."""
1106 1118 num = 0
1107 1119
1108 1120 if hardlink is None:
1109 1121 hardlink = (os.stat(src).st_dev ==
1110 1122 os.stat(os.path.dirname(dst)).st_dev)
1111 1123 if hardlink:
1112 1124 topic = _('linking')
1113 1125 else:
1114 1126 topic = _('copying')
1115 1127
1116 1128 if os.path.isdir(src):
1117 1129 os.mkdir(dst)
1118 1130 for name, kind in osutil.listdir(src):
1119 1131 srcname = os.path.join(src, name)
1120 1132 dstname = os.path.join(dst, name)
1121 1133 def nprog(t, pos):
1122 1134 if pos is not None:
1123 1135 return progress(t, pos + num)
1124 1136 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1125 1137 num += n
1126 1138 else:
1127 1139 if hardlink:
1128 1140 try:
1129 1141 oslink(src, dst)
1130 1142 except (IOError, OSError):
1131 1143 hardlink = False
1132 1144 shutil.copy(src, dst)
1133 1145 else:
1134 1146 shutil.copy(src, dst)
1135 1147 num += 1
1136 1148 progress(topic, num)
1137 1149 progress(topic, None)
1138 1150
1139 1151 return hardlink, num
1140 1152
1141 1153 _winreservednames = '''con prn aux nul
1142 1154 com1 com2 com3 com4 com5 com6 com7 com8 com9
1143 1155 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1144 1156 _winreservedchars = ':*?"<>|'
1145 1157 def checkwinfilename(path):
1146 1158 r'''Check that the base-relative path is a valid filename on Windows.
1147 1159 Returns None if the path is ok, or a UI string describing the problem.
1148 1160
1149 1161 >>> checkwinfilename("just/a/normal/path")
1150 1162 >>> checkwinfilename("foo/bar/con.xml")
1151 1163 "filename contains 'con', which is reserved on Windows"
1152 1164 >>> checkwinfilename("foo/con.xml/bar")
1153 1165 "filename contains 'con', which is reserved on Windows"
1154 1166 >>> checkwinfilename("foo/bar/xml.con")
1155 1167 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1156 1168 "filename contains 'AUX', which is reserved on Windows"
1157 1169 >>> checkwinfilename("foo/bar/bla:.txt")
1158 1170 "filename contains ':', which is reserved on Windows"
1159 1171 >>> checkwinfilename("foo/bar/b\07la.txt")
1160 1172 "filename contains '\\x07', which is invalid on Windows"
1161 1173 >>> checkwinfilename("foo/bar/bla ")
1162 1174 "filename ends with ' ', which is not allowed on Windows"
1163 1175 >>> checkwinfilename("../bar")
1164 1176 >>> checkwinfilename("foo\\")
1165 1177 "filename ends with '\\', which is invalid on Windows"
1166 1178 >>> checkwinfilename("foo\\/bar")
1167 1179 "directory name ends with '\\', which is invalid on Windows"
1168 1180 '''
1169 1181 if path.endswith('\\'):
1170 1182 return _("filename ends with '\\', which is invalid on Windows")
1171 1183 if '\\/' in path:
1172 1184 return _("directory name ends with '\\', which is invalid on Windows")
1173 1185 for n in path.replace('\\', '/').split('/'):
1174 1186 if not n:
1175 1187 continue
1176 1188 for c in n:
1177 1189 if c in _winreservedchars:
1178 1190 return _("filename contains '%s', which is reserved "
1179 1191 "on Windows") % c
1180 1192 if ord(c) <= 31:
1181 1193 return _("filename contains %r, which is invalid "
1182 1194 "on Windows") % c
1183 1195 base = n.split('.')[0]
1184 1196 if base and base.lower() in _winreservednames:
1185 1197 return _("filename contains '%s', which is reserved "
1186 1198 "on Windows") % base
1187 1199 t = n[-1]
1188 1200 if t in '. ' and n not in '..':
1189 1201 return _("filename ends with '%s', which is not allowed "
1190 1202 "on Windows") % t
1191 1203
1192 1204 if pycompat.osname == 'nt':
1193 1205 checkosfilename = checkwinfilename
1194 1206 else:
1195 1207 checkosfilename = platform.checkosfilename
1196 1208
1197 1209 def makelock(info, pathname):
1198 1210 try:
1199 1211 return os.symlink(info, pathname)
1200 1212 except OSError as why:
1201 1213 if why.errno == errno.EEXIST:
1202 1214 raise
1203 1215 except AttributeError: # no symlink in os
1204 1216 pass
1205 1217
1206 1218 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1207 1219 os.write(ld, info)
1208 1220 os.close(ld)
1209 1221
1210 1222 def readlock(pathname):
1211 1223 try:
1212 1224 return os.readlink(pathname)
1213 1225 except OSError as why:
1214 1226 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1215 1227 raise
1216 1228 except AttributeError: # no symlink in os
1217 1229 pass
1218 1230 fp = posixfile(pathname)
1219 1231 r = fp.read()
1220 1232 fp.close()
1221 1233 return r
1222 1234
1223 1235 def fstat(fp):
1224 1236 '''stat file object that may not have fileno method.'''
1225 1237 try:
1226 1238 return os.fstat(fp.fileno())
1227 1239 except AttributeError:
1228 1240 return os.stat(fp.name)
1229 1241
1230 1242 # File system features
1231 1243
1232 1244 def fscasesensitive(path):
1233 1245 """
1234 1246 Return true if the given path is on a case-sensitive filesystem
1235 1247
1236 1248 Requires a path (like /foo/.hg) ending with a foldable final
1237 1249 directory component.
1238 1250 """
1239 1251 s1 = os.lstat(path)
1240 1252 d, b = os.path.split(path)
1241 1253 b2 = b.upper()
1242 1254 if b == b2:
1243 1255 b2 = b.lower()
1244 1256 if b == b2:
1245 1257 return True # no evidence against case sensitivity
1246 1258 p2 = os.path.join(d, b2)
1247 1259 try:
1248 1260 s2 = os.lstat(p2)
1249 1261 if s2 == s1:
1250 1262 return False
1251 1263 return True
1252 1264 except OSError:
1253 1265 return True
1254 1266
1255 1267 try:
1256 1268 import re2
1257 1269 _re2 = None
1258 1270 except ImportError:
1259 1271 _re2 = False
1260 1272
1261 1273 class _re(object):
1262 1274 def _checkre2(self):
1263 1275 global _re2
1264 1276 try:
1265 1277 # check if match works, see issue3964
1266 1278 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1267 1279 except ImportError:
1268 1280 _re2 = False
1269 1281
1270 1282 def compile(self, pat, flags=0):
1271 1283 '''Compile a regular expression, using re2 if possible
1272 1284
1273 1285 For best performance, use only re2-compatible regexp features. The
1274 1286 only flags from the re module that are re2-compatible are
1275 1287 IGNORECASE and MULTILINE.'''
1276 1288 if _re2 is None:
1277 1289 self._checkre2()
1278 1290 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1279 1291 if flags & remod.IGNORECASE:
1280 1292 pat = '(?i)' + pat
1281 1293 if flags & remod.MULTILINE:
1282 1294 pat = '(?m)' + pat
1283 1295 try:
1284 1296 return re2.compile(pat)
1285 1297 except re2.error:
1286 1298 pass
1287 1299 return remod.compile(pat, flags)
1288 1300
1289 1301 @propertycache
1290 1302 def escape(self):
1291 1303 '''Return the version of escape corresponding to self.compile.
1292 1304
1293 1305 This is imperfect because whether re2 or re is used for a particular
1294 1306 function depends on the flags, etc, but it's the best we can do.
1295 1307 '''
1296 1308 global _re2
1297 1309 if _re2 is None:
1298 1310 self._checkre2()
1299 1311 if _re2:
1300 1312 return re2.escape
1301 1313 else:
1302 1314 return remod.escape
1303 1315
1304 1316 re = _re()
1305 1317
1306 1318 _fspathcache = {}
1307 1319 def fspath(name, root):
1308 1320 '''Get name in the case stored in the filesystem
1309 1321
1310 1322 The name should be relative to root, and be normcase-ed for efficiency.
1311 1323
1312 1324 Note that this function is unnecessary, and should not be
1313 1325 called, for case-sensitive filesystems (simply because it's expensive).
1314 1326
1315 1327 The root should be normcase-ed, too.
1316 1328 '''
1317 1329 def _makefspathcacheentry(dir):
1318 1330 return dict((normcase(n), n) for n in os.listdir(dir))
1319 1331
1320 1332 seps = pycompat.ossep
1321 1333 if pycompat.osaltsep:
1322 1334 seps = seps + pycompat.osaltsep
1323 1335 # Protect backslashes. This gets silly very quickly.
1324 1336 seps.replace('\\','\\\\')
1325 1337 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1326 1338 dir = os.path.normpath(root)
1327 1339 result = []
1328 1340 for part, sep in pattern.findall(name):
1329 1341 if sep:
1330 1342 result.append(sep)
1331 1343 continue
1332 1344
1333 1345 if dir not in _fspathcache:
1334 1346 _fspathcache[dir] = _makefspathcacheentry(dir)
1335 1347 contents = _fspathcache[dir]
1336 1348
1337 1349 found = contents.get(part)
1338 1350 if not found:
1339 1351 # retry "once per directory" per "dirstate.walk" which
1340 1352 # may take place for each patches of "hg qpush", for example
1341 1353 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1342 1354 found = contents.get(part)
1343 1355
1344 1356 result.append(found or part)
1345 1357 dir = os.path.join(dir, part)
1346 1358
1347 1359 return ''.join(result)
1348 1360
1349 1361 def checknlink(testfile):
1350 1362 '''check whether hardlink count reporting works properly'''
1351 1363
1352 1364 # testfile may be open, so we need a separate file for checking to
1353 1365 # work around issue2543 (or testfile may get lost on Samba shares)
1354 1366 f1 = testfile + ".hgtmp1"
1355 1367 if os.path.lexists(f1):
1356 1368 return False
1357 1369 try:
1358 1370 posixfile(f1, 'w').close()
1359 1371 except IOError:
1360 1372 try:
1361 1373 os.unlink(f1)
1362 1374 except OSError:
1363 1375 pass
1364 1376 return False
1365 1377
1366 1378 f2 = testfile + ".hgtmp2"
1367 1379 fd = None
1368 1380 try:
1369 1381 oslink(f1, f2)
1370 1382 # nlinks() may behave differently for files on Windows shares if
1371 1383 # the file is open.
1372 1384 fd = posixfile(f2)
1373 1385 return nlinks(f2) > 1
1374 1386 except OSError:
1375 1387 return False
1376 1388 finally:
1377 1389 if fd is not None:
1378 1390 fd.close()
1379 1391 for f in (f1, f2):
1380 1392 try:
1381 1393 os.unlink(f)
1382 1394 except OSError:
1383 1395 pass
1384 1396
1385 1397 def endswithsep(path):
1386 1398 '''Check path ends with os.sep or os.altsep.'''
1387 1399 return (path.endswith(pycompat.ossep)
1388 1400 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1389 1401
1390 1402 def splitpath(path):
1391 1403 '''Split path by os.sep.
1392 1404 Note that this function does not use os.altsep because this is
1393 1405 an alternative of simple "xxx.split(os.sep)".
1394 1406 It is recommended to use os.path.normpath() before using this
1395 1407 function if need.'''
1396 1408 return path.split(pycompat.ossep)
1397 1409
1398 1410 def gui():
1399 1411 '''Are we running in a GUI?'''
1400 1412 if pycompat.sysplatform == 'darwin':
1401 1413 if 'SSH_CONNECTION' in encoding.environ:
1402 1414 # handle SSH access to a box where the user is logged in
1403 1415 return False
1404 1416 elif getattr(osutil, 'isgui', None):
1405 1417 # check if a CoreGraphics session is available
1406 1418 return osutil.isgui()
1407 1419 else:
1408 1420 # pure build; use a safe default
1409 1421 return True
1410 1422 else:
1411 1423 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1412 1424
1413 1425 def mktempcopy(name, emptyok=False, createmode=None):
1414 1426 """Create a temporary file with the same contents from name
1415 1427
1416 1428 The permission bits are copied from the original file.
1417 1429
1418 1430 If the temporary file is going to be truncated immediately, you
1419 1431 can use emptyok=True as an optimization.
1420 1432
1421 1433 Returns the name of the temporary file.
1422 1434 """
1423 1435 d, fn = os.path.split(name)
1424 1436 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1425 1437 os.close(fd)
1426 1438 # Temporary files are created with mode 0600, which is usually not
1427 1439 # what we want. If the original file already exists, just copy
1428 1440 # its mode. Otherwise, manually obey umask.
1429 1441 copymode(name, temp, createmode)
1430 1442 if emptyok:
1431 1443 return temp
1432 1444 try:
1433 1445 try:
1434 1446 ifp = posixfile(name, "rb")
1435 1447 except IOError as inst:
1436 1448 if inst.errno == errno.ENOENT:
1437 1449 return temp
1438 1450 if not getattr(inst, 'filename', None):
1439 1451 inst.filename = name
1440 1452 raise
1441 1453 ofp = posixfile(temp, "wb")
1442 1454 for chunk in filechunkiter(ifp):
1443 1455 ofp.write(chunk)
1444 1456 ifp.close()
1445 1457 ofp.close()
1446 1458 except: # re-raises
1447 1459 try: os.unlink(temp)
1448 1460 except OSError: pass
1449 1461 raise
1450 1462 return temp
1451 1463
1452 1464 class filestat(object):
1453 1465 """help to exactly detect change of a file
1454 1466
1455 1467 'stat' attribute is result of 'os.stat()' if specified 'path'
1456 1468 exists. Otherwise, it is None. This can avoid preparative
1457 1469 'exists()' examination on client side of this class.
1458 1470 """
1459 1471 def __init__(self, path):
1460 1472 try:
1461 1473 self.stat = os.stat(path)
1462 1474 except OSError as err:
1463 1475 if err.errno != errno.ENOENT:
1464 1476 raise
1465 1477 self.stat = None
1466 1478
1467 1479 __hash__ = object.__hash__
1468 1480
1469 1481 def __eq__(self, old):
1470 1482 try:
1471 1483 # if ambiguity between stat of new and old file is
1472 1484 # avoided, comparison of size, ctime and mtime is enough
1473 1485 # to exactly detect change of a file regardless of platform
1474 1486 return (self.stat.st_size == old.stat.st_size and
1475 1487 self.stat.st_ctime == old.stat.st_ctime and
1476 1488 self.stat.st_mtime == old.stat.st_mtime)
1477 1489 except AttributeError:
1478 1490 return False
1479 1491
1480 1492 def isambig(self, old):
1481 1493 """Examine whether new (= self) stat is ambiguous against old one
1482 1494
1483 1495 "S[N]" below means stat of a file at N-th change:
1484 1496
1485 1497 - S[n-1].ctime < S[n].ctime: can detect change of a file
1486 1498 - S[n-1].ctime == S[n].ctime
1487 1499 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1488 1500 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1489 1501 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1490 1502 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1491 1503
1492 1504 Case (*2) above means that a file was changed twice or more at
1493 1505 same time in sec (= S[n-1].ctime), and comparison of timestamp
1494 1506 is ambiguous.
1495 1507
1496 1508 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1497 1509 timestamp is ambiguous".
1498 1510
1499 1511 But advancing mtime only in case (*2) doesn't work as
1500 1512 expected, because naturally advanced S[n].mtime in case (*1)
1501 1513 might be equal to manually advanced S[n-1 or earlier].mtime.
1502 1514
1503 1515 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1504 1516 treated as ambiguous regardless of mtime, to avoid overlooking
1505 1517 by confliction between such mtime.
1506 1518
1507 1519 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1508 1520 S[n].mtime", even if size of a file isn't changed.
1509 1521 """
1510 1522 try:
1511 1523 return (self.stat.st_ctime == old.stat.st_ctime)
1512 1524 except AttributeError:
1513 1525 return False
1514 1526
1515 1527 def avoidambig(self, path, old):
1516 1528 """Change file stat of specified path to avoid ambiguity
1517 1529
1518 1530 'old' should be previous filestat of 'path'.
1519 1531
1520 1532 This skips avoiding ambiguity, if a process doesn't have
1521 1533 appropriate privileges for 'path'.
1522 1534 """
1523 1535 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1524 1536 try:
1525 1537 os.utime(path, (advanced, advanced))
1526 1538 except OSError as inst:
1527 1539 if inst.errno == errno.EPERM:
1528 1540 # utime() on the file created by another user causes EPERM,
1529 1541 # if a process doesn't have appropriate privileges
1530 1542 return
1531 1543 raise
1532 1544
1533 1545 def __ne__(self, other):
1534 1546 return not self == other
1535 1547
1536 1548 class atomictempfile(object):
1537 1549 '''writable file object that atomically updates a file
1538 1550
1539 1551 All writes will go to a temporary copy of the original file. Call
1540 1552 close() when you are done writing, and atomictempfile will rename
1541 1553 the temporary copy to the original name, making the changes
1542 1554 visible. If the object is destroyed without being closed, all your
1543 1555 writes are discarded.
1544 1556
1545 1557 checkambig argument of constructor is used with filestat, and is
1546 1558 useful only if target file is guarded by any lock (e.g. repo.lock
1547 1559 or repo.wlock).
1548 1560 '''
1549 1561 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1550 1562 self.__name = name # permanent name
1551 1563 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1552 1564 createmode=createmode)
1553 1565 self._fp = posixfile(self._tempname, mode)
1554 1566 self._checkambig = checkambig
1555 1567
1556 1568 # delegated methods
1557 1569 self.read = self._fp.read
1558 1570 self.write = self._fp.write
1559 1571 self.seek = self._fp.seek
1560 1572 self.tell = self._fp.tell
1561 1573 self.fileno = self._fp.fileno
1562 1574
1563 1575 def close(self):
1564 1576 if not self._fp.closed:
1565 1577 self._fp.close()
1566 1578 filename = localpath(self.__name)
1567 1579 oldstat = self._checkambig and filestat(filename)
1568 1580 if oldstat and oldstat.stat:
1569 1581 rename(self._tempname, filename)
1570 1582 newstat = filestat(filename)
1571 1583 if newstat.isambig(oldstat):
1572 1584 # stat of changed file is ambiguous to original one
1573 1585 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1574 1586 os.utime(filename, (advanced, advanced))
1575 1587 else:
1576 1588 rename(self._tempname, filename)
1577 1589
1578 1590 def discard(self):
1579 1591 if not self._fp.closed:
1580 1592 try:
1581 1593 os.unlink(self._tempname)
1582 1594 except OSError:
1583 1595 pass
1584 1596 self._fp.close()
1585 1597
1586 1598 def __del__(self):
1587 1599 if safehasattr(self, '_fp'): # constructor actually did something
1588 1600 self.discard()
1589 1601
1590 1602 def __enter__(self):
1591 1603 return self
1592 1604
1593 1605 def __exit__(self, exctype, excvalue, traceback):
1594 1606 if exctype is not None:
1595 1607 self.discard()
1596 1608 else:
1597 1609 self.close()
1598 1610
1599 1611 def makedirs(name, mode=None, notindexed=False):
1600 1612 """recursive directory creation with parent mode inheritance
1601 1613
1602 1614 Newly created directories are marked as "not to be indexed by
1603 1615 the content indexing service", if ``notindexed`` is specified
1604 1616 for "write" mode access.
1605 1617 """
1606 1618 try:
1607 1619 makedir(name, notindexed)
1608 1620 except OSError as err:
1609 1621 if err.errno == errno.EEXIST:
1610 1622 return
1611 1623 if err.errno != errno.ENOENT or not name:
1612 1624 raise
1613 1625 parent = os.path.dirname(os.path.abspath(name))
1614 1626 if parent == name:
1615 1627 raise
1616 1628 makedirs(parent, mode, notindexed)
1617 1629 try:
1618 1630 makedir(name, notindexed)
1619 1631 except OSError as err:
1620 1632 # Catch EEXIST to handle races
1621 1633 if err.errno == errno.EEXIST:
1622 1634 return
1623 1635 raise
1624 1636 if mode is not None:
1625 1637 os.chmod(name, mode)
1626 1638
1627 1639 def readfile(path):
1628 1640 with open(path, 'rb') as fp:
1629 1641 return fp.read()
1630 1642
1631 1643 def writefile(path, text):
1632 1644 with open(path, 'wb') as fp:
1633 1645 fp.write(text)
1634 1646
1635 1647 def appendfile(path, text):
1636 1648 with open(path, 'ab') as fp:
1637 1649 fp.write(text)
1638 1650
1639 1651 class chunkbuffer(object):
1640 1652 """Allow arbitrary sized chunks of data to be efficiently read from an
1641 1653 iterator over chunks of arbitrary size."""
1642 1654
1643 1655 def __init__(self, in_iter):
1644 1656 """in_iter is the iterator that's iterating over the input chunks.
1645 1657 targetsize is how big a buffer to try to maintain."""
1646 1658 def splitbig(chunks):
1647 1659 for chunk in chunks:
1648 1660 if len(chunk) > 2**20:
1649 1661 pos = 0
1650 1662 while pos < len(chunk):
1651 1663 end = pos + 2 ** 18
1652 1664 yield chunk[pos:end]
1653 1665 pos = end
1654 1666 else:
1655 1667 yield chunk
1656 1668 self.iter = splitbig(in_iter)
1657 1669 self._queue = collections.deque()
1658 1670 self._chunkoffset = 0
1659 1671
1660 1672 def read(self, l=None):
1661 1673 """Read L bytes of data from the iterator of chunks of data.
1662 1674 Returns less than L bytes if the iterator runs dry.
1663 1675
1664 1676 If size parameter is omitted, read everything"""
1665 1677 if l is None:
1666 1678 return ''.join(self.iter)
1667 1679
1668 1680 left = l
1669 1681 buf = []
1670 1682 queue = self._queue
1671 1683 while left > 0:
1672 1684 # refill the queue
1673 1685 if not queue:
1674 1686 target = 2**18
1675 1687 for chunk in self.iter:
1676 1688 queue.append(chunk)
1677 1689 target -= len(chunk)
1678 1690 if target <= 0:
1679 1691 break
1680 1692 if not queue:
1681 1693 break
1682 1694
1683 1695 # The easy way to do this would be to queue.popleft(), modify the
1684 1696 # chunk (if necessary), then queue.appendleft(). However, for cases
1685 1697 # where we read partial chunk content, this incurs 2 dequeue
1686 1698 # mutations and creates a new str for the remaining chunk in the
1687 1699 # queue. Our code below avoids this overhead.
1688 1700
1689 1701 chunk = queue[0]
1690 1702 chunkl = len(chunk)
1691 1703 offset = self._chunkoffset
1692 1704
1693 1705 # Use full chunk.
1694 1706 if offset == 0 and left >= chunkl:
1695 1707 left -= chunkl
1696 1708 queue.popleft()
1697 1709 buf.append(chunk)
1698 1710 # self._chunkoffset remains at 0.
1699 1711 continue
1700 1712
1701 1713 chunkremaining = chunkl - offset
1702 1714
1703 1715 # Use all of unconsumed part of chunk.
1704 1716 if left >= chunkremaining:
1705 1717 left -= chunkremaining
1706 1718 queue.popleft()
1707 1719 # offset == 0 is enabled by block above, so this won't merely
1708 1720 # copy via ``chunk[0:]``.
1709 1721 buf.append(chunk[offset:])
1710 1722 self._chunkoffset = 0
1711 1723
1712 1724 # Partial chunk needed.
1713 1725 else:
1714 1726 buf.append(chunk[offset:offset + left])
1715 1727 self._chunkoffset += left
1716 1728 left -= chunkremaining
1717 1729
1718 1730 return ''.join(buf)
1719 1731
1720 1732 def filechunkiter(f, size=131072, limit=None):
1721 1733 """Create a generator that produces the data in the file size
1722 1734 (default 131072) bytes at a time, up to optional limit (default is
1723 1735 to read all data). Chunks may be less than size bytes if the
1724 1736 chunk is the last chunk in the file, or the file is a socket or
1725 1737 some other type of file that sometimes reads less data than is
1726 1738 requested."""
1727 1739 assert size >= 0
1728 1740 assert limit is None or limit >= 0
1729 1741 while True:
1730 1742 if limit is None:
1731 1743 nbytes = size
1732 1744 else:
1733 1745 nbytes = min(limit, size)
1734 1746 s = nbytes and f.read(nbytes)
1735 1747 if not s:
1736 1748 break
1737 1749 if limit:
1738 1750 limit -= len(s)
1739 1751 yield s
1740 1752
1741 1753 def makedate(timestamp=None):
1742 1754 '''Return a unix timestamp (or the current time) as a (unixtime,
1743 1755 offset) tuple based off the local timezone.'''
1744 1756 if timestamp is None:
1745 1757 timestamp = time.time()
1746 1758 if timestamp < 0:
1747 1759 hint = _("check your clock")
1748 1760 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1749 1761 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1750 1762 datetime.datetime.fromtimestamp(timestamp))
1751 1763 tz = delta.days * 86400 + delta.seconds
1752 1764 return timestamp, tz
1753 1765
1754 1766 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1755 1767 """represent a (unixtime, offset) tuple as a localized time.
1756 1768 unixtime is seconds since the epoch, and offset is the time zone's
1757 1769 number of seconds away from UTC.
1758 1770
1759 1771 >>> datestr((0, 0))
1760 1772 'Thu Jan 01 00:00:00 1970 +0000'
1761 1773 >>> datestr((42, 0))
1762 1774 'Thu Jan 01 00:00:42 1970 +0000'
1763 1775 >>> datestr((-42, 0))
1764 1776 'Wed Dec 31 23:59:18 1969 +0000'
1765 1777 >>> datestr((0x7fffffff, 0))
1766 1778 'Tue Jan 19 03:14:07 2038 +0000'
1767 1779 >>> datestr((-0x80000000, 0))
1768 1780 'Fri Dec 13 20:45:52 1901 +0000'
1769 1781 """
1770 1782 t, tz = date or makedate()
1771 1783 if "%1" in format or "%2" in format or "%z" in format:
1772 1784 sign = (tz > 0) and "-" or "+"
1773 1785 minutes = abs(tz) // 60
1774 1786 q, r = divmod(minutes, 60)
1775 1787 format = format.replace("%z", "%1%2")
1776 1788 format = format.replace("%1", "%c%02d" % (sign, q))
1777 1789 format = format.replace("%2", "%02d" % r)
1778 1790 d = t - tz
1779 1791 if d > 0x7fffffff:
1780 1792 d = 0x7fffffff
1781 1793 elif d < -0x80000000:
1782 1794 d = -0x80000000
1783 1795 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1784 1796 # because they use the gmtime() system call which is buggy on Windows
1785 1797 # for negative values.
1786 1798 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1787 1799 s = t.strftime(format)
1788 1800 return s
1789 1801
1790 1802 def shortdate(date=None):
1791 1803 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1792 1804 return datestr(date, format='%Y-%m-%d')
1793 1805
1794 1806 def parsetimezone(s):
1795 1807 """find a trailing timezone, if any, in string, and return a
1796 1808 (offset, remainder) pair"""
1797 1809
1798 1810 if s.endswith("GMT") or s.endswith("UTC"):
1799 1811 return 0, s[:-3].rstrip()
1800 1812
1801 1813 # Unix-style timezones [+-]hhmm
1802 1814 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1803 1815 sign = (s[-5] == "+") and 1 or -1
1804 1816 hours = int(s[-4:-2])
1805 1817 minutes = int(s[-2:])
1806 1818 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1807 1819
1808 1820 # ISO8601 trailing Z
1809 1821 if s.endswith("Z") and s[-2:-1].isdigit():
1810 1822 return 0, s[:-1]
1811 1823
1812 1824 # ISO8601-style [+-]hh:mm
1813 1825 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1814 1826 s[-5:-3].isdigit() and s[-2:].isdigit()):
1815 1827 sign = (s[-6] == "+") and 1 or -1
1816 1828 hours = int(s[-5:-3])
1817 1829 minutes = int(s[-2:])
1818 1830 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1819 1831
1820 1832 return None, s
1821 1833
1822 1834 def strdate(string, format, defaults=[]):
1823 1835 """parse a localized time string and return a (unixtime, offset) tuple.
1824 1836 if the string cannot be parsed, ValueError is raised."""
1825 1837 # NOTE: unixtime = localunixtime + offset
1826 1838 offset, date = parsetimezone(string)
1827 1839
1828 1840 # add missing elements from defaults
1829 1841 usenow = False # default to using biased defaults
1830 1842 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1831 1843 found = [True for p in part if ("%"+p) in format]
1832 1844 if not found:
1833 1845 date += "@" + defaults[part][usenow]
1834 1846 format += "@%" + part[0]
1835 1847 else:
1836 1848 # We've found a specific time element, less specific time
1837 1849 # elements are relative to today
1838 1850 usenow = True
1839 1851
1840 1852 timetuple = time.strptime(date, format)
1841 1853 localunixtime = int(calendar.timegm(timetuple))
1842 1854 if offset is None:
1843 1855 # local timezone
1844 1856 unixtime = int(time.mktime(timetuple))
1845 1857 offset = unixtime - localunixtime
1846 1858 else:
1847 1859 unixtime = localunixtime + offset
1848 1860 return unixtime, offset
1849 1861
1850 1862 def parsedate(date, formats=None, bias=None):
1851 1863 """parse a localized date/time and return a (unixtime, offset) tuple.
1852 1864
1853 1865 The date may be a "unixtime offset" string or in one of the specified
1854 1866 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1855 1867
1856 1868 >>> parsedate(' today ') == parsedate(\
1857 1869 datetime.date.today().strftime('%b %d'))
1858 1870 True
1859 1871 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1860 1872 datetime.timedelta(days=1)\
1861 1873 ).strftime('%b %d'))
1862 1874 True
1863 1875 >>> now, tz = makedate()
1864 1876 >>> strnow, strtz = parsedate('now')
1865 1877 >>> (strnow - now) < 1
1866 1878 True
1867 1879 >>> tz == strtz
1868 1880 True
1869 1881 """
1870 1882 if bias is None:
1871 1883 bias = {}
1872 1884 if not date:
1873 1885 return 0, 0
1874 1886 if isinstance(date, tuple) and len(date) == 2:
1875 1887 return date
1876 1888 if not formats:
1877 1889 formats = defaultdateformats
1878 1890 date = date.strip()
1879 1891
1880 1892 if date == 'now' or date == _('now'):
1881 1893 return makedate()
1882 1894 if date == 'today' or date == _('today'):
1883 1895 date = datetime.date.today().strftime('%b %d')
1884 1896 elif date == 'yesterday' or date == _('yesterday'):
1885 1897 date = (datetime.date.today() -
1886 1898 datetime.timedelta(days=1)).strftime('%b %d')
1887 1899
1888 1900 try:
1889 1901 when, offset = map(int, date.split(' '))
1890 1902 except ValueError:
1891 1903 # fill out defaults
1892 1904 now = makedate()
1893 1905 defaults = {}
1894 1906 for part in ("d", "mb", "yY", "HI", "M", "S"):
1895 1907 # this piece is for rounding the specific end of unknowns
1896 1908 b = bias.get(part)
1897 1909 if b is None:
1898 1910 if part[0] in "HMS":
1899 1911 b = "00"
1900 1912 else:
1901 1913 b = "0"
1902 1914
1903 1915 # this piece is for matching the generic end to today's date
1904 1916 n = datestr(now, "%" + part[0])
1905 1917
1906 1918 defaults[part] = (b, n)
1907 1919
1908 1920 for format in formats:
1909 1921 try:
1910 1922 when, offset = strdate(date, format, defaults)
1911 1923 except (ValueError, OverflowError):
1912 1924 pass
1913 1925 else:
1914 1926 break
1915 1927 else:
1916 1928 raise Abort(_('invalid date: %r') % date)
1917 1929 # validate explicit (probably user-specified) date and
1918 1930 # time zone offset. values must fit in signed 32 bits for
1919 1931 # current 32-bit linux runtimes. timezones go from UTC-12
1920 1932 # to UTC+14
1921 1933 if when < -0x80000000 or when > 0x7fffffff:
1922 1934 raise Abort(_('date exceeds 32 bits: %d') % when)
1923 1935 if offset < -50400 or offset > 43200:
1924 1936 raise Abort(_('impossible time zone offset: %d') % offset)
1925 1937 return when, offset
1926 1938
1927 1939 def matchdate(date):
1928 1940 """Return a function that matches a given date match specifier
1929 1941
1930 1942 Formats include:
1931 1943
1932 1944 '{date}' match a given date to the accuracy provided
1933 1945
1934 1946 '<{date}' on or before a given date
1935 1947
1936 1948 '>{date}' on or after a given date
1937 1949
1938 1950 >>> p1 = parsedate("10:29:59")
1939 1951 >>> p2 = parsedate("10:30:00")
1940 1952 >>> p3 = parsedate("10:30:59")
1941 1953 >>> p4 = parsedate("10:31:00")
1942 1954 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1943 1955 >>> f = matchdate("10:30")
1944 1956 >>> f(p1[0])
1945 1957 False
1946 1958 >>> f(p2[0])
1947 1959 True
1948 1960 >>> f(p3[0])
1949 1961 True
1950 1962 >>> f(p4[0])
1951 1963 False
1952 1964 >>> f(p5[0])
1953 1965 False
1954 1966 """
1955 1967
1956 1968 def lower(date):
1957 1969 d = {'mb': "1", 'd': "1"}
1958 1970 return parsedate(date, extendeddateformats, d)[0]
1959 1971
1960 1972 def upper(date):
1961 1973 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1962 1974 for days in ("31", "30", "29"):
1963 1975 try:
1964 1976 d["d"] = days
1965 1977 return parsedate(date, extendeddateformats, d)[0]
1966 1978 except Abort:
1967 1979 pass
1968 1980 d["d"] = "28"
1969 1981 return parsedate(date, extendeddateformats, d)[0]
1970 1982
1971 1983 date = date.strip()
1972 1984
1973 1985 if not date:
1974 1986 raise Abort(_("dates cannot consist entirely of whitespace"))
1975 1987 elif date[0] == "<":
1976 1988 if not date[1:]:
1977 1989 raise Abort(_("invalid day spec, use '<DATE'"))
1978 1990 when = upper(date[1:])
1979 1991 return lambda x: x <= when
1980 1992 elif date[0] == ">":
1981 1993 if not date[1:]:
1982 1994 raise Abort(_("invalid day spec, use '>DATE'"))
1983 1995 when = lower(date[1:])
1984 1996 return lambda x: x >= when
1985 1997 elif date[0] == "-":
1986 1998 try:
1987 1999 days = int(date[1:])
1988 2000 except ValueError:
1989 2001 raise Abort(_("invalid day spec: %s") % date[1:])
1990 2002 if days < 0:
1991 2003 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1992 2004 % date[1:])
1993 2005 when = makedate()[0] - days * 3600 * 24
1994 2006 return lambda x: x >= when
1995 2007 elif " to " in date:
1996 2008 a, b = date.split(" to ")
1997 2009 start, stop = lower(a), upper(b)
1998 2010 return lambda x: x >= start and x <= stop
1999 2011 else:
2000 2012 start, stop = lower(date), upper(date)
2001 2013 return lambda x: x >= start and x <= stop
2002 2014
2003 2015 def stringmatcher(pattern, casesensitive=True):
2004 2016 """
2005 2017 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2006 2018 returns the matcher name, pattern, and matcher function.
2007 2019 missing or unknown prefixes are treated as literal matches.
2008 2020
2009 2021 helper for tests:
2010 2022 >>> def test(pattern, *tests):
2011 2023 ... kind, pattern, matcher = stringmatcher(pattern)
2012 2024 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2013 2025 >>> def itest(pattern, *tests):
2014 2026 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2015 2027 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2016 2028
2017 2029 exact matching (no prefix):
2018 2030 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2019 2031 ('literal', 'abcdefg', [False, False, True])
2020 2032
2021 2033 regex matching ('re:' prefix)
2022 2034 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2023 2035 ('re', 'a.+b', [False, False, True])
2024 2036
2025 2037 force exact matches ('literal:' prefix)
2026 2038 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2027 2039 ('literal', 're:foobar', [False, True])
2028 2040
2029 2041 unknown prefixes are ignored and treated as literals
2030 2042 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2031 2043 ('literal', 'foo:bar', [False, False, True])
2032 2044
2033 2045 case insensitive regex matches
2034 2046 >>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
2035 2047 ('re', 'A.+b', [False, False, True])
2036 2048
2037 2049 case insensitive literal matches
2038 2050 >>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
2039 2051 ('literal', 'ABCDEFG', [False, False, True])
2040 2052 """
2041 2053 if pattern.startswith('re:'):
2042 2054 pattern = pattern[3:]
2043 2055 try:
2044 2056 flags = 0
2045 2057 if not casesensitive:
2046 2058 flags = remod.I
2047 2059 regex = remod.compile(pattern, flags)
2048 2060 except remod.error as e:
2049 2061 raise error.ParseError(_('invalid regular expression: %s')
2050 2062 % e)
2051 2063 return 're', pattern, regex.search
2052 2064 elif pattern.startswith('literal:'):
2053 2065 pattern = pattern[8:]
2054 2066
2055 2067 match = pattern.__eq__
2056 2068
2057 2069 if not casesensitive:
2058 2070 ipat = encoding.lower(pattern)
2059 2071 match = lambda s: ipat == encoding.lower(s)
2060 2072 return 'literal', pattern, match
2061 2073
2062 2074 def shortuser(user):
2063 2075 """Return a short representation of a user name or email address."""
2064 2076 f = user.find('@')
2065 2077 if f >= 0:
2066 2078 user = user[:f]
2067 2079 f = user.find('<')
2068 2080 if f >= 0:
2069 2081 user = user[f + 1:]
2070 2082 f = user.find(' ')
2071 2083 if f >= 0:
2072 2084 user = user[:f]
2073 2085 f = user.find('.')
2074 2086 if f >= 0:
2075 2087 user = user[:f]
2076 2088 return user
2077 2089
2078 2090 def emailuser(user):
2079 2091 """Return the user portion of an email address."""
2080 2092 f = user.find('@')
2081 2093 if f >= 0:
2082 2094 user = user[:f]
2083 2095 f = user.find('<')
2084 2096 if f >= 0:
2085 2097 user = user[f + 1:]
2086 2098 return user
2087 2099
2088 2100 def email(author):
2089 2101 '''get email of author.'''
2090 2102 r = author.find('>')
2091 2103 if r == -1:
2092 2104 r = None
2093 2105 return author[author.find('<') + 1:r]
2094 2106
2095 2107 def ellipsis(text, maxlength=400):
2096 2108 """Trim string to at most maxlength (default: 400) columns in display."""
2097 2109 return encoding.trim(text, maxlength, ellipsis='...')
2098 2110
2099 2111 def unitcountfn(*unittable):
2100 2112 '''return a function that renders a readable count of some quantity'''
2101 2113
2102 2114 def go(count):
2103 2115 for multiplier, divisor, format in unittable:
2104 2116 if count >= divisor * multiplier:
2105 2117 return format % (count / float(divisor))
2106 2118 return unittable[-1][2] % count
2107 2119
2108 2120 return go
2109 2121
2110 2122 bytecount = unitcountfn(
2111 2123 (100, 1 << 30, _('%.0f GB')),
2112 2124 (10, 1 << 30, _('%.1f GB')),
2113 2125 (1, 1 << 30, _('%.2f GB')),
2114 2126 (100, 1 << 20, _('%.0f MB')),
2115 2127 (10, 1 << 20, _('%.1f MB')),
2116 2128 (1, 1 << 20, _('%.2f MB')),
2117 2129 (100, 1 << 10, _('%.0f KB')),
2118 2130 (10, 1 << 10, _('%.1f KB')),
2119 2131 (1, 1 << 10, _('%.2f KB')),
2120 2132 (1, 1, _('%.0f bytes')),
2121 2133 )
2122 2134
2123 2135 def uirepr(s):
2124 2136 # Avoid double backslash in Windows path repr()
2125 2137 return repr(s).replace('\\\\', '\\')
2126 2138
2127 2139 # delay import of textwrap
2128 2140 def MBTextWrapper(**kwargs):
2129 2141 class tw(textwrap.TextWrapper):
2130 2142 """
2131 2143 Extend TextWrapper for width-awareness.
2132 2144
2133 2145 Neither number of 'bytes' in any encoding nor 'characters' is
2134 2146 appropriate to calculate terminal columns for specified string.
2135 2147
2136 2148 Original TextWrapper implementation uses built-in 'len()' directly,
2137 2149 so overriding is needed to use width information of each characters.
2138 2150
2139 2151 In addition, characters classified into 'ambiguous' width are
2140 2152 treated as wide in East Asian area, but as narrow in other.
2141 2153
2142 2154 This requires use decision to determine width of such characters.
2143 2155 """
2144 2156 def _cutdown(self, ucstr, space_left):
2145 2157 l = 0
2146 2158 colwidth = encoding.ucolwidth
2147 2159 for i in xrange(len(ucstr)):
2148 2160 l += colwidth(ucstr[i])
2149 2161 if space_left < l:
2150 2162 return (ucstr[:i], ucstr[i:])
2151 2163 return ucstr, ''
2152 2164
2153 2165 # overriding of base class
2154 2166 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2155 2167 space_left = max(width - cur_len, 1)
2156 2168
2157 2169 if self.break_long_words:
2158 2170 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2159 2171 cur_line.append(cut)
2160 2172 reversed_chunks[-1] = res
2161 2173 elif not cur_line:
2162 2174 cur_line.append(reversed_chunks.pop())
2163 2175
2164 2176 # this overriding code is imported from TextWrapper of Python 2.6
2165 2177 # to calculate columns of string by 'encoding.ucolwidth()'
2166 2178 def _wrap_chunks(self, chunks):
2167 2179 colwidth = encoding.ucolwidth
2168 2180
2169 2181 lines = []
2170 2182 if self.width <= 0:
2171 2183 raise ValueError("invalid width %r (must be > 0)" % self.width)
2172 2184
2173 2185 # Arrange in reverse order so items can be efficiently popped
2174 2186 # from a stack of chucks.
2175 2187 chunks.reverse()
2176 2188
2177 2189 while chunks:
2178 2190
2179 2191 # Start the list of chunks that will make up the current line.
2180 2192 # cur_len is just the length of all the chunks in cur_line.
2181 2193 cur_line = []
2182 2194 cur_len = 0
2183 2195
2184 2196 # Figure out which static string will prefix this line.
2185 2197 if lines:
2186 2198 indent = self.subsequent_indent
2187 2199 else:
2188 2200 indent = self.initial_indent
2189 2201
2190 2202 # Maximum width for this line.
2191 2203 width = self.width - len(indent)
2192 2204
2193 2205 # First chunk on line is whitespace -- drop it, unless this
2194 2206 # is the very beginning of the text (i.e. no lines started yet).
2195 2207 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2196 2208 del chunks[-1]
2197 2209
2198 2210 while chunks:
2199 2211 l = colwidth(chunks[-1])
2200 2212
2201 2213 # Can at least squeeze this chunk onto the current line.
2202 2214 if cur_len + l <= width:
2203 2215 cur_line.append(chunks.pop())
2204 2216 cur_len += l
2205 2217
2206 2218 # Nope, this line is full.
2207 2219 else:
2208 2220 break
2209 2221
2210 2222 # The current line is full, and the next chunk is too big to
2211 2223 # fit on *any* line (not just this one).
2212 2224 if chunks and colwidth(chunks[-1]) > width:
2213 2225 self._handle_long_word(chunks, cur_line, cur_len, width)
2214 2226
2215 2227 # If the last chunk on this line is all whitespace, drop it.
2216 2228 if (self.drop_whitespace and
2217 2229 cur_line and cur_line[-1].strip() == ''):
2218 2230 del cur_line[-1]
2219 2231
2220 2232 # Convert current line back to a string and store it in list
2221 2233 # of all lines (return value).
2222 2234 if cur_line:
2223 2235 lines.append(indent + ''.join(cur_line))
2224 2236
2225 2237 return lines
2226 2238
2227 2239 global MBTextWrapper
2228 2240 MBTextWrapper = tw
2229 2241 return tw(**kwargs)
2230 2242
2231 2243 def wrap(line, width, initindent='', hangindent=''):
2232 2244 maxindent = max(len(hangindent), len(initindent))
2233 2245 if width <= maxindent:
2234 2246 # adjust for weird terminal size
2235 2247 width = max(78, maxindent + 1)
2236 2248 line = line.decode(encoding.encoding, encoding.encodingmode)
2237 2249 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2238 2250 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2239 2251 wrapper = MBTextWrapper(width=width,
2240 2252 initial_indent=initindent,
2241 2253 subsequent_indent=hangindent)
2242 2254 return wrapper.fill(line).encode(encoding.encoding)
2243 2255
2244 2256 if (pyplatform.python_implementation() == 'CPython' and
2245 2257 sys.version_info < (3, 0)):
2246 2258 # There is an issue in CPython that some IO methods do not handle EINTR
2247 2259 # correctly. The following table shows what CPython version (and functions)
2248 2260 # are affected (buggy: has the EINTR bug, okay: otherwise):
2249 2261 #
2250 2262 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2251 2263 # --------------------------------------------------
2252 2264 # fp.__iter__ | buggy | buggy | okay
2253 2265 # fp.read* | buggy | okay [1] | okay
2254 2266 #
2255 2267 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2256 2268 #
2257 2269 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2258 2270 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2259 2271 #
2260 2272 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2261 2273 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2262 2274 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2263 2275 # fp.__iter__ but not other fp.read* methods.
2264 2276 #
2265 2277 # On modern systems like Linux, the "read" syscall cannot be interrupted
2266 2278 # when reading "fast" files like on-disk files. So the EINTR issue only
2267 2279 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2268 2280 # files approximately as "fast" files and use the fast (unsafe) code path,
2269 2281 # to minimize the performance impact.
2270 2282 if sys.version_info >= (2, 7, 4):
2271 2283 # fp.readline deals with EINTR correctly, use it as a workaround.
2272 2284 def _safeiterfile(fp):
2273 2285 return iter(fp.readline, '')
2274 2286 else:
2275 2287 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2276 2288 # note: this may block longer than necessary because of bufsize.
2277 2289 def _safeiterfile(fp, bufsize=4096):
2278 2290 fd = fp.fileno()
2279 2291 line = ''
2280 2292 while True:
2281 2293 try:
2282 2294 buf = os.read(fd, bufsize)
2283 2295 except OSError as ex:
2284 2296 # os.read only raises EINTR before any data is read
2285 2297 if ex.errno == errno.EINTR:
2286 2298 continue
2287 2299 else:
2288 2300 raise
2289 2301 line += buf
2290 2302 if '\n' in buf:
2291 2303 splitted = line.splitlines(True)
2292 2304 line = ''
2293 2305 for l in splitted:
2294 2306 if l[-1] == '\n':
2295 2307 yield l
2296 2308 else:
2297 2309 line = l
2298 2310 if not buf:
2299 2311 break
2300 2312 if line:
2301 2313 yield line
2302 2314
2303 2315 def iterfile(fp):
2304 2316 fastpath = True
2305 2317 if type(fp) is file:
2306 2318 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2307 2319 if fastpath:
2308 2320 return fp
2309 2321 else:
2310 2322 return _safeiterfile(fp)
2311 2323 else:
2312 2324 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2313 2325 def iterfile(fp):
2314 2326 return fp
2315 2327
2316 2328 def iterlines(iterator):
2317 2329 for chunk in iterator:
2318 2330 for line in chunk.splitlines():
2319 2331 yield line
2320 2332
2321 2333 def expandpath(path):
2322 2334 return os.path.expanduser(os.path.expandvars(path))
2323 2335
2324 2336 def hgcmd():
2325 2337 """Return the command used to execute current hg
2326 2338
2327 2339 This is different from hgexecutable() because on Windows we want
2328 2340 to avoid things opening new shell windows like batch files, so we
2329 2341 get either the python call or current executable.
2330 2342 """
2331 2343 if mainfrozen():
2332 2344 if getattr(sys, 'frozen', None) == 'macosx_app':
2333 2345 # Env variable set by py2app
2334 2346 return [encoding.environ['EXECUTABLEPATH']]
2335 2347 else:
2336 2348 return [pycompat.sysexecutable]
2337 2349 return gethgcmd()
2338 2350
2339 2351 def rundetached(args, condfn):
2340 2352 """Execute the argument list in a detached process.
2341 2353
2342 2354 condfn is a callable which is called repeatedly and should return
2343 2355 True once the child process is known to have started successfully.
2344 2356 At this point, the child process PID is returned. If the child
2345 2357 process fails to start or finishes before condfn() evaluates to
2346 2358 True, return -1.
2347 2359 """
2348 2360 # Windows case is easier because the child process is either
2349 2361 # successfully starting and validating the condition or exiting
2350 2362 # on failure. We just poll on its PID. On Unix, if the child
2351 2363 # process fails to start, it will be left in a zombie state until
2352 2364 # the parent wait on it, which we cannot do since we expect a long
2353 2365 # running process on success. Instead we listen for SIGCHLD telling
2354 2366 # us our child process terminated.
2355 2367 terminated = set()
2356 2368 def handler(signum, frame):
2357 2369 terminated.add(os.wait())
2358 2370 prevhandler = None
2359 2371 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2360 2372 if SIGCHLD is not None:
2361 2373 prevhandler = signal.signal(SIGCHLD, handler)
2362 2374 try:
2363 2375 pid = spawndetached(args)
2364 2376 while not condfn():
2365 2377 if ((pid in terminated or not testpid(pid))
2366 2378 and not condfn()):
2367 2379 return -1
2368 2380 time.sleep(0.1)
2369 2381 return pid
2370 2382 finally:
2371 2383 if prevhandler is not None:
2372 2384 signal.signal(signal.SIGCHLD, prevhandler)
2373 2385
2374 2386 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2375 2387 """Return the result of interpolating items in the mapping into string s.
2376 2388
2377 2389 prefix is a single character string, or a two character string with
2378 2390 a backslash as the first character if the prefix needs to be escaped in
2379 2391 a regular expression.
2380 2392
2381 2393 fn is an optional function that will be applied to the replacement text
2382 2394 just before replacement.
2383 2395
2384 2396 escape_prefix is an optional flag that allows using doubled prefix for
2385 2397 its escaping.
2386 2398 """
2387 2399 fn = fn or (lambda s: s)
2388 2400 patterns = '|'.join(mapping.keys())
2389 2401 if escape_prefix:
2390 2402 patterns += '|' + prefix
2391 2403 if len(prefix) > 1:
2392 2404 prefix_char = prefix[1:]
2393 2405 else:
2394 2406 prefix_char = prefix
2395 2407 mapping[prefix_char] = prefix_char
2396 2408 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2397 2409 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2398 2410
2399 2411 def getport(port):
2400 2412 """Return the port for a given network service.
2401 2413
2402 2414 If port is an integer, it's returned as is. If it's a string, it's
2403 2415 looked up using socket.getservbyname(). If there's no matching
2404 2416 service, error.Abort is raised.
2405 2417 """
2406 2418 try:
2407 2419 return int(port)
2408 2420 except ValueError:
2409 2421 pass
2410 2422
2411 2423 try:
2412 2424 return socket.getservbyname(port)
2413 2425 except socket.error:
2414 2426 raise Abort(_("no port number associated with service '%s'") % port)
2415 2427
2416 2428 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2417 2429 '0': False, 'no': False, 'false': False, 'off': False,
2418 2430 'never': False}
2419 2431
2420 2432 def parsebool(s):
2421 2433 """Parse s into a boolean.
2422 2434
2423 2435 If s is not a valid boolean, returns None.
2424 2436 """
2425 2437 return _booleans.get(s.lower(), None)
2426 2438
2427 2439 _hextochr = dict((a + b, chr(int(a + b, 16)))
2428 2440 for a in string.hexdigits for b in string.hexdigits)
2429 2441
2430 2442 class url(object):
2431 2443 r"""Reliable URL parser.
2432 2444
2433 2445 This parses URLs and provides attributes for the following
2434 2446 components:
2435 2447
2436 2448 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2437 2449
2438 2450 Missing components are set to None. The only exception is
2439 2451 fragment, which is set to '' if present but empty.
2440 2452
2441 2453 If parsefragment is False, fragment is included in query. If
2442 2454 parsequery is False, query is included in path. If both are
2443 2455 False, both fragment and query are included in path.
2444 2456
2445 2457 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2446 2458
2447 2459 Note that for backward compatibility reasons, bundle URLs do not
2448 2460 take host names. That means 'bundle://../' has a path of '../'.
2449 2461
2450 2462 Examples:
2451 2463
2452 2464 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2453 2465 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2454 2466 >>> url('ssh://[::1]:2200//home/joe/repo')
2455 2467 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2456 2468 >>> url('file:///home/joe/repo')
2457 2469 <url scheme: 'file', path: '/home/joe/repo'>
2458 2470 >>> url('file:///c:/temp/foo/')
2459 2471 <url scheme: 'file', path: 'c:/temp/foo/'>
2460 2472 >>> url('bundle:foo')
2461 2473 <url scheme: 'bundle', path: 'foo'>
2462 2474 >>> url('bundle://../foo')
2463 2475 <url scheme: 'bundle', path: '../foo'>
2464 2476 >>> url(r'c:\foo\bar')
2465 2477 <url path: 'c:\\foo\\bar'>
2466 2478 >>> url(r'\\blah\blah\blah')
2467 2479 <url path: '\\\\blah\\blah\\blah'>
2468 2480 >>> url(r'\\blah\blah\blah#baz')
2469 2481 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2470 2482 >>> url(r'file:///C:\users\me')
2471 2483 <url scheme: 'file', path: 'C:\\users\\me'>
2472 2484
2473 2485 Authentication credentials:
2474 2486
2475 2487 >>> url('ssh://joe:xyz@x/repo')
2476 2488 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2477 2489 >>> url('ssh://joe@x/repo')
2478 2490 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2479 2491
2480 2492 Query strings and fragments:
2481 2493
2482 2494 >>> url('http://host/a?b#c')
2483 2495 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2484 2496 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2485 2497 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2486 2498
2487 2499 Empty path:
2488 2500
2489 2501 >>> url('')
2490 2502 <url path: ''>
2491 2503 >>> url('#a')
2492 2504 <url path: '', fragment: 'a'>
2493 2505 >>> url('http://host/')
2494 2506 <url scheme: 'http', host: 'host', path: ''>
2495 2507 >>> url('http://host/#a')
2496 2508 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2497 2509
2498 2510 Only scheme:
2499 2511
2500 2512 >>> url('http:')
2501 2513 <url scheme: 'http'>
2502 2514 """
2503 2515
2504 2516 _safechars = "!~*'()+"
2505 2517 _safepchars = "/!~*'()+:\\"
2506 2518 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2507 2519
2508 2520 def __init__(self, path, parsequery=True, parsefragment=True):
2509 2521 # We slowly chomp away at path until we have only the path left
2510 2522 self.scheme = self.user = self.passwd = self.host = None
2511 2523 self.port = self.path = self.query = self.fragment = None
2512 2524 self._localpath = True
2513 2525 self._hostport = ''
2514 2526 self._origpath = path
2515 2527
2516 2528 if parsefragment and '#' in path:
2517 2529 path, self.fragment = path.split('#', 1)
2518 2530
2519 2531 # special case for Windows drive letters and UNC paths
2520 2532 if hasdriveletter(path) or path.startswith('\\\\'):
2521 2533 self.path = path
2522 2534 return
2523 2535
2524 2536 # For compatibility reasons, we can't handle bundle paths as
2525 2537 # normal URLS
2526 2538 if path.startswith('bundle:'):
2527 2539 self.scheme = 'bundle'
2528 2540 path = path[7:]
2529 2541 if path.startswith('//'):
2530 2542 path = path[2:]
2531 2543 self.path = path
2532 2544 return
2533 2545
2534 2546 if self._matchscheme(path):
2535 2547 parts = path.split(':', 1)
2536 2548 if parts[0]:
2537 2549 self.scheme, path = parts
2538 2550 self._localpath = False
2539 2551
2540 2552 if not path:
2541 2553 path = None
2542 2554 if self._localpath:
2543 2555 self.path = ''
2544 2556 return
2545 2557 else:
2546 2558 if self._localpath:
2547 2559 self.path = path
2548 2560 return
2549 2561
2550 2562 if parsequery and '?' in path:
2551 2563 path, self.query = path.split('?', 1)
2552 2564 if not path:
2553 2565 path = None
2554 2566 if not self.query:
2555 2567 self.query = None
2556 2568
2557 2569 # // is required to specify a host/authority
2558 2570 if path and path.startswith('//'):
2559 2571 parts = path[2:].split('/', 1)
2560 2572 if len(parts) > 1:
2561 2573 self.host, path = parts
2562 2574 else:
2563 2575 self.host = parts[0]
2564 2576 path = None
2565 2577 if not self.host:
2566 2578 self.host = None
2567 2579 # path of file:///d is /d
2568 2580 # path of file:///d:/ is d:/, not /d:/
2569 2581 if path and not hasdriveletter(path):
2570 2582 path = '/' + path
2571 2583
2572 2584 if self.host and '@' in self.host:
2573 2585 self.user, self.host = self.host.rsplit('@', 1)
2574 2586 if ':' in self.user:
2575 2587 self.user, self.passwd = self.user.split(':', 1)
2576 2588 if not self.host:
2577 2589 self.host = None
2578 2590
2579 2591 # Don't split on colons in IPv6 addresses without ports
2580 2592 if (self.host and ':' in self.host and
2581 2593 not (self.host.startswith('[') and self.host.endswith(']'))):
2582 2594 self._hostport = self.host
2583 2595 self.host, self.port = self.host.rsplit(':', 1)
2584 2596 if not self.host:
2585 2597 self.host = None
2586 2598
2587 2599 if (self.host and self.scheme == 'file' and
2588 2600 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2589 2601 raise Abort(_('file:// URLs can only refer to localhost'))
2590 2602
2591 2603 self.path = path
2592 2604
2593 2605 # leave the query string escaped
2594 2606 for a in ('user', 'passwd', 'host', 'port',
2595 2607 'path', 'fragment'):
2596 2608 v = getattr(self, a)
2597 2609 if v is not None:
2598 2610 setattr(self, a, pycompat.urlunquote(v))
2599 2611
2600 2612 def __repr__(self):
2601 2613 attrs = []
2602 2614 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2603 2615 'query', 'fragment'):
2604 2616 v = getattr(self, a)
2605 2617 if v is not None:
2606 2618 attrs.append('%s: %r' % (a, v))
2607 2619 return '<url %s>' % ', '.join(attrs)
2608 2620
2609 2621 def __str__(self):
2610 2622 r"""Join the URL's components back into a URL string.
2611 2623
2612 2624 Examples:
2613 2625
2614 2626 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2615 2627 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2616 2628 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2617 2629 'http://user:pw@host:80/?foo=bar&baz=42'
2618 2630 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2619 2631 'http://user:pw@host:80/?foo=bar%3dbaz'
2620 2632 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2621 2633 'ssh://user:pw@[::1]:2200//home/joe#'
2622 2634 >>> str(url('http://localhost:80//'))
2623 2635 'http://localhost:80//'
2624 2636 >>> str(url('http://localhost:80/'))
2625 2637 'http://localhost:80/'
2626 2638 >>> str(url('http://localhost:80'))
2627 2639 'http://localhost:80/'
2628 2640 >>> str(url('bundle:foo'))
2629 2641 'bundle:foo'
2630 2642 >>> str(url('bundle://../foo'))
2631 2643 'bundle:../foo'
2632 2644 >>> str(url('path'))
2633 2645 'path'
2634 2646 >>> str(url('file:///tmp/foo/bar'))
2635 2647 'file:///tmp/foo/bar'
2636 2648 >>> str(url('file:///c:/tmp/foo/bar'))
2637 2649 'file:///c:/tmp/foo/bar'
2638 2650 >>> print url(r'bundle:foo\bar')
2639 2651 bundle:foo\bar
2640 2652 >>> print url(r'file:///D:\data\hg')
2641 2653 file:///D:\data\hg
2642 2654 """
2643 2655 if self._localpath:
2644 2656 s = self.path
2645 2657 if self.scheme == 'bundle':
2646 2658 s = 'bundle:' + s
2647 2659 if self.fragment:
2648 2660 s += '#' + self.fragment
2649 2661 return s
2650 2662
2651 2663 s = self.scheme + ':'
2652 2664 if self.user or self.passwd or self.host:
2653 2665 s += '//'
2654 2666 elif self.scheme and (not self.path or self.path.startswith('/')
2655 2667 or hasdriveletter(self.path)):
2656 2668 s += '//'
2657 2669 if hasdriveletter(self.path):
2658 2670 s += '/'
2659 2671 if self.user:
2660 2672 s += urlreq.quote(self.user, safe=self._safechars)
2661 2673 if self.passwd:
2662 2674 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2663 2675 if self.user or self.passwd:
2664 2676 s += '@'
2665 2677 if self.host:
2666 2678 if not (self.host.startswith('[') and self.host.endswith(']')):
2667 2679 s += urlreq.quote(self.host)
2668 2680 else:
2669 2681 s += self.host
2670 2682 if self.port:
2671 2683 s += ':' + urlreq.quote(self.port)
2672 2684 if self.host:
2673 2685 s += '/'
2674 2686 if self.path:
2675 2687 # TODO: similar to the query string, we should not unescape the
2676 2688 # path when we store it, the path might contain '%2f' = '/',
2677 2689 # which we should *not* escape.
2678 2690 s += urlreq.quote(self.path, safe=self._safepchars)
2679 2691 if self.query:
2680 2692 # we store the query in escaped form.
2681 2693 s += '?' + self.query
2682 2694 if self.fragment is not None:
2683 2695 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2684 2696 return s
2685 2697
2686 2698 def authinfo(self):
2687 2699 user, passwd = self.user, self.passwd
2688 2700 try:
2689 2701 self.user, self.passwd = None, None
2690 2702 s = str(self)
2691 2703 finally:
2692 2704 self.user, self.passwd = user, passwd
2693 2705 if not self.user:
2694 2706 return (s, None)
2695 2707 # authinfo[1] is passed to urllib2 password manager, and its
2696 2708 # URIs must not contain credentials. The host is passed in the
2697 2709 # URIs list because Python < 2.4.3 uses only that to search for
2698 2710 # a password.
2699 2711 return (s, (None, (s, self.host),
2700 2712 self.user, self.passwd or ''))
2701 2713
2702 2714 def isabs(self):
2703 2715 if self.scheme and self.scheme != 'file':
2704 2716 return True # remote URL
2705 2717 if hasdriveletter(self.path):
2706 2718 return True # absolute for our purposes - can't be joined()
2707 2719 if self.path.startswith(r'\\'):
2708 2720 return True # Windows UNC path
2709 2721 if self.path.startswith('/'):
2710 2722 return True # POSIX-style
2711 2723 return False
2712 2724
2713 2725 def localpath(self):
2714 2726 if self.scheme == 'file' or self.scheme == 'bundle':
2715 2727 path = self.path or '/'
2716 2728 # For Windows, we need to promote hosts containing drive
2717 2729 # letters to paths with drive letters.
2718 2730 if hasdriveletter(self._hostport):
2719 2731 path = self._hostport + '/' + self.path
2720 2732 elif (self.host is not None and self.path
2721 2733 and not hasdriveletter(path)):
2722 2734 path = '/' + path
2723 2735 return path
2724 2736 return self._origpath
2725 2737
2726 2738 def islocal(self):
2727 2739 '''whether localpath will return something that posixfile can open'''
2728 2740 return (not self.scheme or self.scheme == 'file'
2729 2741 or self.scheme == 'bundle')
2730 2742
2731 2743 def hasscheme(path):
2732 2744 return bool(url(path).scheme)
2733 2745
2734 2746 def hasdriveletter(path):
2735 2747 return path and path[1:2] == ':' and path[0:1].isalpha()
2736 2748
2737 2749 def urllocalpath(path):
2738 2750 return url(path, parsequery=False, parsefragment=False).localpath()
2739 2751
2740 2752 def hidepassword(u):
2741 2753 '''hide user credential in a url string'''
2742 2754 u = url(u)
2743 2755 if u.passwd:
2744 2756 u.passwd = '***'
2745 2757 return str(u)
2746 2758
2747 2759 def removeauth(u):
2748 2760 '''remove all authentication information from a url string'''
2749 2761 u = url(u)
2750 2762 u.user = u.passwd = None
2751 2763 return str(u)
2752 2764
2753 def isatty(fp):
2754 try:
2755 return fp.isatty()
2756 except AttributeError:
2757 return False
2758
2759 2765 timecount = unitcountfn(
2760 2766 (1, 1e3, _('%.0f s')),
2761 2767 (100, 1, _('%.1f s')),
2762 2768 (10, 1, _('%.2f s')),
2763 2769 (1, 1, _('%.3f s')),
2764 2770 (100, 0.001, _('%.1f ms')),
2765 2771 (10, 0.001, _('%.2f ms')),
2766 2772 (1, 0.001, _('%.3f ms')),
2767 2773 (100, 0.000001, _('%.1f us')),
2768 2774 (10, 0.000001, _('%.2f us')),
2769 2775 (1, 0.000001, _('%.3f us')),
2770 2776 (100, 0.000000001, _('%.1f ns')),
2771 2777 (10, 0.000000001, _('%.2f ns')),
2772 2778 (1, 0.000000001, _('%.3f ns')),
2773 2779 )
2774 2780
2775 2781 _timenesting = [0]
2776 2782
2777 2783 def timed(func):
2778 2784 '''Report the execution time of a function call to stderr.
2779 2785
2780 2786 During development, use as a decorator when you need to measure
2781 2787 the cost of a function, e.g. as follows:
2782 2788
2783 2789 @util.timed
2784 2790 def foo(a, b, c):
2785 2791 pass
2786 2792 '''
2787 2793
2788 2794 def wrapper(*args, **kwargs):
2789 2795 start = time.time()
2790 2796 indent = 2
2791 2797 _timenesting[0] += indent
2792 2798 try:
2793 2799 return func(*args, **kwargs)
2794 2800 finally:
2795 2801 elapsed = time.time() - start
2796 2802 _timenesting[0] -= indent
2797 2803 stderr.write('%s%s: %s\n' %
2798 2804 (' ' * _timenesting[0], func.__name__,
2799 2805 timecount(elapsed)))
2800 2806 return wrapper
2801 2807
2802 2808 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2803 2809 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2804 2810
2805 2811 def sizetoint(s):
2806 2812 '''Convert a space specifier to a byte count.
2807 2813
2808 2814 >>> sizetoint('30')
2809 2815 30
2810 2816 >>> sizetoint('2.2kb')
2811 2817 2252
2812 2818 >>> sizetoint('6M')
2813 2819 6291456
2814 2820 '''
2815 2821 t = s.strip().lower()
2816 2822 try:
2817 2823 for k, u in _sizeunits:
2818 2824 if t.endswith(k):
2819 2825 return int(float(t[:-len(k)]) * u)
2820 2826 return int(t)
2821 2827 except ValueError:
2822 2828 raise error.ParseError(_("couldn't parse size: %s") % s)
2823 2829
2824 2830 class hooks(object):
2825 2831 '''A collection of hook functions that can be used to extend a
2826 2832 function's behavior. Hooks are called in lexicographic order,
2827 2833 based on the names of their sources.'''
2828 2834
2829 2835 def __init__(self):
2830 2836 self._hooks = []
2831 2837
2832 2838 def add(self, source, hook):
2833 2839 self._hooks.append((source, hook))
2834 2840
2835 2841 def __call__(self, *args):
2836 2842 self._hooks.sort(key=lambda x: x[0])
2837 2843 results = []
2838 2844 for source, hook in self._hooks:
2839 2845 results.append(hook(*args))
2840 2846 return results
2841 2847
2842 2848 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2843 2849 '''Yields lines for a nicely formatted stacktrace.
2844 2850 Skips the 'skip' last entries.
2845 2851 Each file+linenumber is formatted according to fileline.
2846 2852 Each line is formatted according to line.
2847 2853 If line is None, it yields:
2848 2854 length of longest filepath+line number,
2849 2855 filepath+linenumber,
2850 2856 function
2851 2857
2852 2858 Not be used in production code but very convenient while developing.
2853 2859 '''
2854 2860 entries = [(fileline % (fn, ln), func)
2855 2861 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2856 2862 if entries:
2857 2863 fnmax = max(len(entry[0]) for entry in entries)
2858 2864 for fnln, func in entries:
2859 2865 if line is None:
2860 2866 yield (fnmax, fnln, func)
2861 2867 else:
2862 2868 yield line % (fnmax, fnln, func)
2863 2869
2864 2870 def debugstacktrace(msg='stacktrace', skip=0, f=stderr, otherf=stdout):
2865 2871 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2866 2872 Skips the 'skip' last entries. By default it will flush stdout first.
2867 2873 It can be used everywhere and intentionally does not require an ui object.
2868 2874 Not be used in production code but very convenient while developing.
2869 2875 '''
2870 2876 if otherf:
2871 2877 otherf.flush()
2872 2878 f.write('%s at:\n' % msg)
2873 2879 for line in getstackframes(skip + 1):
2874 2880 f.write(line)
2875 2881 f.flush()
2876 2882
2877 2883 class dirs(object):
2878 2884 '''a multiset of directory names from a dirstate or manifest'''
2879 2885
2880 2886 def __init__(self, map, skip=None):
2881 2887 self._dirs = {}
2882 2888 addpath = self.addpath
2883 2889 if safehasattr(map, 'iteritems') and skip is not None:
2884 2890 for f, s in map.iteritems():
2885 2891 if s[0] != skip:
2886 2892 addpath(f)
2887 2893 else:
2888 2894 for f in map:
2889 2895 addpath(f)
2890 2896
2891 2897 def addpath(self, path):
2892 2898 dirs = self._dirs
2893 2899 for base in finddirs(path):
2894 2900 if base in dirs:
2895 2901 dirs[base] += 1
2896 2902 return
2897 2903 dirs[base] = 1
2898 2904
2899 2905 def delpath(self, path):
2900 2906 dirs = self._dirs
2901 2907 for base in finddirs(path):
2902 2908 if dirs[base] > 1:
2903 2909 dirs[base] -= 1
2904 2910 return
2905 2911 del dirs[base]
2906 2912
2907 2913 def __iter__(self):
2908 2914 return self._dirs.iterkeys()
2909 2915
2910 2916 def __contains__(self, d):
2911 2917 return d in self._dirs
2912 2918
2913 2919 if safehasattr(parsers, 'dirs'):
2914 2920 dirs = parsers.dirs
2915 2921
2916 2922 def finddirs(path):
2917 2923 pos = path.rfind('/')
2918 2924 while pos != -1:
2919 2925 yield path[:pos]
2920 2926 pos = path.rfind('/', 0, pos)
2921 2927
2922 2928 class ctxmanager(object):
2923 2929 '''A context manager for use in 'with' blocks to allow multiple
2924 2930 contexts to be entered at once. This is both safer and more
2925 2931 flexible than contextlib.nested.
2926 2932
2927 2933 Once Mercurial supports Python 2.7+, this will become mostly
2928 2934 unnecessary.
2929 2935 '''
2930 2936
2931 2937 def __init__(self, *args):
2932 2938 '''Accepts a list of no-argument functions that return context
2933 2939 managers. These will be invoked at __call__ time.'''
2934 2940 self._pending = args
2935 2941 self._atexit = []
2936 2942
2937 2943 def __enter__(self):
2938 2944 return self
2939 2945
2940 2946 def enter(self):
2941 2947 '''Create and enter context managers in the order in which they were
2942 2948 passed to the constructor.'''
2943 2949 values = []
2944 2950 for func in self._pending:
2945 2951 obj = func()
2946 2952 values.append(obj.__enter__())
2947 2953 self._atexit.append(obj.__exit__)
2948 2954 del self._pending
2949 2955 return values
2950 2956
2951 2957 def atexit(self, func, *args, **kwargs):
2952 2958 '''Add a function to call when this context manager exits. The
2953 2959 ordering of multiple atexit calls is unspecified, save that
2954 2960 they will happen before any __exit__ functions.'''
2955 2961 def wrapper(exc_type, exc_val, exc_tb):
2956 2962 func(*args, **kwargs)
2957 2963 self._atexit.append(wrapper)
2958 2964 return func
2959 2965
2960 2966 def __exit__(self, exc_type, exc_val, exc_tb):
2961 2967 '''Context managers are exited in the reverse order from which
2962 2968 they were created.'''
2963 2969 received = exc_type is not None
2964 2970 suppressed = False
2965 2971 pending = None
2966 2972 self._atexit.reverse()
2967 2973 for exitfunc in self._atexit:
2968 2974 try:
2969 2975 if exitfunc(exc_type, exc_val, exc_tb):
2970 2976 suppressed = True
2971 2977 exc_type = None
2972 2978 exc_val = None
2973 2979 exc_tb = None
2974 2980 except BaseException:
2975 2981 pending = sys.exc_info()
2976 2982 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2977 2983 del self._atexit
2978 2984 if pending:
2979 2985 raise exc_val
2980 2986 return received and suppressed
2981 2987
2982 2988 # compression code
2983 2989
2984 2990 SERVERROLE = 'server'
2985 2991 CLIENTROLE = 'client'
2986 2992
2987 2993 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
2988 2994 (u'name', u'serverpriority',
2989 2995 u'clientpriority'))
2990 2996
2991 2997 class compressormanager(object):
2992 2998 """Holds registrations of various compression engines.
2993 2999
2994 3000 This class essentially abstracts the differences between compression
2995 3001 engines to allow new compression formats to be added easily, possibly from
2996 3002 extensions.
2997 3003
2998 3004 Compressors are registered against the global instance by calling its
2999 3005 ``register()`` method.
3000 3006 """
3001 3007 def __init__(self):
3002 3008 self._engines = {}
3003 3009 # Bundle spec human name to engine name.
3004 3010 self._bundlenames = {}
3005 3011 # Internal bundle identifier to engine name.
3006 3012 self._bundletypes = {}
3007 3013 # Revlog header to engine name.
3008 3014 self._revlogheaders = {}
3009 3015 # Wire proto identifier to engine name.
3010 3016 self._wiretypes = {}
3011 3017
3012 3018 def __getitem__(self, key):
3013 3019 return self._engines[key]
3014 3020
3015 3021 def __contains__(self, key):
3016 3022 return key in self._engines
3017 3023
3018 3024 def __iter__(self):
3019 3025 return iter(self._engines.keys())
3020 3026
3021 3027 def register(self, engine):
3022 3028 """Register a compression engine with the manager.
3023 3029
3024 3030 The argument must be a ``compressionengine`` instance.
3025 3031 """
3026 3032 if not isinstance(engine, compressionengine):
3027 3033 raise ValueError(_('argument must be a compressionengine'))
3028 3034
3029 3035 name = engine.name()
3030 3036
3031 3037 if name in self._engines:
3032 3038 raise error.Abort(_('compression engine %s already registered') %
3033 3039 name)
3034 3040
3035 3041 bundleinfo = engine.bundletype()
3036 3042 if bundleinfo:
3037 3043 bundlename, bundletype = bundleinfo
3038 3044
3039 3045 if bundlename in self._bundlenames:
3040 3046 raise error.Abort(_('bundle name %s already registered') %
3041 3047 bundlename)
3042 3048 if bundletype in self._bundletypes:
3043 3049 raise error.Abort(_('bundle type %s already registered by %s') %
3044 3050 (bundletype, self._bundletypes[bundletype]))
3045 3051
3046 3052 # No external facing name declared.
3047 3053 if bundlename:
3048 3054 self._bundlenames[bundlename] = name
3049 3055
3050 3056 self._bundletypes[bundletype] = name
3051 3057
3052 3058 wiresupport = engine.wireprotosupport()
3053 3059 if wiresupport:
3054 3060 wiretype = wiresupport.name
3055 3061 if wiretype in self._wiretypes:
3056 3062 raise error.Abort(_('wire protocol compression %s already '
3057 3063 'registered by %s') %
3058 3064 (wiretype, self._wiretypes[wiretype]))
3059 3065
3060 3066 self._wiretypes[wiretype] = name
3061 3067
3062 3068 revlogheader = engine.revlogheader()
3063 3069 if revlogheader and revlogheader in self._revlogheaders:
3064 3070 raise error.Abort(_('revlog header %s already registered by %s') %
3065 3071 (revlogheader, self._revlogheaders[revlogheader]))
3066 3072
3067 3073 if revlogheader:
3068 3074 self._revlogheaders[revlogheader] = name
3069 3075
3070 3076 self._engines[name] = engine
3071 3077
3072 3078 @property
3073 3079 def supportedbundlenames(self):
3074 3080 return set(self._bundlenames.keys())
3075 3081
3076 3082 @property
3077 3083 def supportedbundletypes(self):
3078 3084 return set(self._bundletypes.keys())
3079 3085
3080 3086 def forbundlename(self, bundlename):
3081 3087 """Obtain a compression engine registered to a bundle name.
3082 3088
3083 3089 Will raise KeyError if the bundle type isn't registered.
3084 3090
3085 3091 Will abort if the engine is known but not available.
3086 3092 """
3087 3093 engine = self._engines[self._bundlenames[bundlename]]
3088 3094 if not engine.available():
3089 3095 raise error.Abort(_('compression engine %s could not be loaded') %
3090 3096 engine.name())
3091 3097 return engine
3092 3098
3093 3099 def forbundletype(self, bundletype):
3094 3100 """Obtain a compression engine registered to a bundle type.
3095 3101
3096 3102 Will raise KeyError if the bundle type isn't registered.
3097 3103
3098 3104 Will abort if the engine is known but not available.
3099 3105 """
3100 3106 engine = self._engines[self._bundletypes[bundletype]]
3101 3107 if not engine.available():
3102 3108 raise error.Abort(_('compression engine %s could not be loaded') %
3103 3109 engine.name())
3104 3110 return engine
3105 3111
3106 3112 def supportedwireengines(self, role, onlyavailable=True):
3107 3113 """Obtain compression engines that support the wire protocol.
3108 3114
3109 3115 Returns a list of engines in prioritized order, most desired first.
3110 3116
3111 3117 If ``onlyavailable`` is set, filter out engines that can't be
3112 3118 loaded.
3113 3119 """
3114 3120 assert role in (SERVERROLE, CLIENTROLE)
3115 3121
3116 3122 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3117 3123
3118 3124 engines = [self._engines[e] for e in self._wiretypes.values()]
3119 3125 if onlyavailable:
3120 3126 engines = [e for e in engines if e.available()]
3121 3127
3122 3128 def getkey(e):
3123 3129 # Sort first by priority, highest first. In case of tie, sort
3124 3130 # alphabetically. This is arbitrary, but ensures output is
3125 3131 # stable.
3126 3132 w = e.wireprotosupport()
3127 3133 return -1 * getattr(w, attr), w.name
3128 3134
3129 3135 return list(sorted(engines, key=getkey))
3130 3136
3131 3137 def forwiretype(self, wiretype):
3132 3138 engine = self._engines[self._wiretypes[wiretype]]
3133 3139 if not engine.available():
3134 3140 raise error.Abort(_('compression engine %s could not be loaded') %
3135 3141 engine.name())
3136 3142 return engine
3137 3143
3138 3144 def forrevlogheader(self, header):
3139 3145 """Obtain a compression engine registered to a revlog header.
3140 3146
3141 3147 Will raise KeyError if the revlog header value isn't registered.
3142 3148 """
3143 3149 return self._engines[self._revlogheaders[header]]
3144 3150
3145 3151 compengines = compressormanager()
3146 3152
3147 3153 class compressionengine(object):
3148 3154 """Base class for compression engines.
3149 3155
3150 3156 Compression engines must implement the interface defined by this class.
3151 3157 """
3152 3158 def name(self):
3153 3159 """Returns the name of the compression engine.
3154 3160
3155 3161 This is the key the engine is registered under.
3156 3162
3157 3163 This method must be implemented.
3158 3164 """
3159 3165 raise NotImplementedError()
3160 3166
3161 3167 def available(self):
3162 3168 """Whether the compression engine is available.
3163 3169
3164 3170 The intent of this method is to allow optional compression engines
3165 3171 that may not be available in all installations (such as engines relying
3166 3172 on C extensions that may not be present).
3167 3173 """
3168 3174 return True
3169 3175
3170 3176 def bundletype(self):
3171 3177 """Describes bundle identifiers for this engine.
3172 3178
3173 3179 If this compression engine isn't supported for bundles, returns None.
3174 3180
3175 3181 If this engine can be used for bundles, returns a 2-tuple of strings of
3176 3182 the user-facing "bundle spec" compression name and an internal
3177 3183 identifier used to denote the compression format within bundles. To
3178 3184 exclude the name from external usage, set the first element to ``None``.
3179 3185
3180 3186 If bundle compression is supported, the class must also implement
3181 3187 ``compressstream`` and `decompressorreader``.
3182 3188 """
3183 3189 return None
3184 3190
3185 3191 def wireprotosupport(self):
3186 3192 """Declare support for this compression format on the wire protocol.
3187 3193
3188 3194 If this compression engine isn't supported for compressing wire
3189 3195 protocol payloads, returns None.
3190 3196
3191 3197 Otherwise, returns ``compenginewireprotosupport`` with the following
3192 3198 fields:
3193 3199
3194 3200 * String format identifier
3195 3201 * Integer priority for the server
3196 3202 * Integer priority for the client
3197 3203
3198 3204 The integer priorities are used to order the advertisement of format
3199 3205 support by server and client. The highest integer is advertised
3200 3206 first. Integers with non-positive values aren't advertised.
3201 3207
3202 3208 The priority values are somewhat arbitrary and only used for default
3203 3209 ordering. The relative order can be changed via config options.
3204 3210
3205 3211 If wire protocol compression is supported, the class must also implement
3206 3212 ``compressstream`` and ``decompressorreader``.
3207 3213 """
3208 3214 return None
3209 3215
3210 3216 def revlogheader(self):
3211 3217 """Header added to revlog chunks that identifies this engine.
3212 3218
3213 3219 If this engine can be used to compress revlogs, this method should
3214 3220 return the bytes used to identify chunks compressed with this engine.
3215 3221 Else, the method should return ``None`` to indicate it does not
3216 3222 participate in revlog compression.
3217 3223 """
3218 3224 return None
3219 3225
3220 3226 def compressstream(self, it, opts=None):
3221 3227 """Compress an iterator of chunks.
3222 3228
3223 3229 The method receives an iterator (ideally a generator) of chunks of
3224 3230 bytes to be compressed. It returns an iterator (ideally a generator)
3225 3231 of bytes of chunks representing the compressed output.
3226 3232
3227 3233 Optionally accepts an argument defining how to perform compression.
3228 3234 Each engine treats this argument differently.
3229 3235 """
3230 3236 raise NotImplementedError()
3231 3237
3232 3238 def decompressorreader(self, fh):
3233 3239 """Perform decompression on a file object.
3234 3240
3235 3241 Argument is an object with a ``read(size)`` method that returns
3236 3242 compressed data. Return value is an object with a ``read(size)`` that
3237 3243 returns uncompressed data.
3238 3244 """
3239 3245 raise NotImplementedError()
3240 3246
3241 3247 def revlogcompressor(self, opts=None):
3242 3248 """Obtain an object that can be used to compress revlog entries.
3243 3249
3244 3250 The object has a ``compress(data)`` method that compresses binary
3245 3251 data. This method returns compressed binary data or ``None`` if
3246 3252 the data could not be compressed (too small, not compressible, etc).
3247 3253 The returned data should have a header uniquely identifying this
3248 3254 compression format so decompression can be routed to this engine.
3249 3255 This header should be identified by the ``revlogheader()`` return
3250 3256 value.
3251 3257
3252 3258 The object has a ``decompress(data)`` method that decompresses
3253 3259 data. The method will only be called if ``data`` begins with
3254 3260 ``revlogheader()``. The method should return the raw, uncompressed
3255 3261 data or raise a ``RevlogError``.
3256 3262
3257 3263 The object is reusable but is not thread safe.
3258 3264 """
3259 3265 raise NotImplementedError()
3260 3266
3261 3267 class _zlibengine(compressionengine):
3262 3268 def name(self):
3263 3269 return 'zlib'
3264 3270
3265 3271 def bundletype(self):
3266 3272 return 'gzip', 'GZ'
3267 3273
3268 3274 def wireprotosupport(self):
3269 3275 return compewireprotosupport('zlib', 20, 20)
3270 3276
3271 3277 def revlogheader(self):
3272 3278 return 'x'
3273 3279
3274 3280 def compressstream(self, it, opts=None):
3275 3281 opts = opts or {}
3276 3282
3277 3283 z = zlib.compressobj(opts.get('level', -1))
3278 3284 for chunk in it:
3279 3285 data = z.compress(chunk)
3280 3286 # Not all calls to compress emit data. It is cheaper to inspect
3281 3287 # here than to feed empty chunks through generator.
3282 3288 if data:
3283 3289 yield data
3284 3290
3285 3291 yield z.flush()
3286 3292
3287 3293 def decompressorreader(self, fh):
3288 3294 def gen():
3289 3295 d = zlib.decompressobj()
3290 3296 for chunk in filechunkiter(fh):
3291 3297 while chunk:
3292 3298 # Limit output size to limit memory.
3293 3299 yield d.decompress(chunk, 2 ** 18)
3294 3300 chunk = d.unconsumed_tail
3295 3301
3296 3302 return chunkbuffer(gen())
3297 3303
3298 3304 class zlibrevlogcompressor(object):
3299 3305 def compress(self, data):
3300 3306 insize = len(data)
3301 3307 # Caller handles empty input case.
3302 3308 assert insize > 0
3303 3309
3304 3310 if insize < 44:
3305 3311 return None
3306 3312
3307 3313 elif insize <= 1000000:
3308 3314 compressed = zlib.compress(data)
3309 3315 if len(compressed) < insize:
3310 3316 return compressed
3311 3317 return None
3312 3318
3313 3319 # zlib makes an internal copy of the input buffer, doubling
3314 3320 # memory usage for large inputs. So do streaming compression
3315 3321 # on large inputs.
3316 3322 else:
3317 3323 z = zlib.compressobj()
3318 3324 parts = []
3319 3325 pos = 0
3320 3326 while pos < insize:
3321 3327 pos2 = pos + 2**20
3322 3328 parts.append(z.compress(data[pos:pos2]))
3323 3329 pos = pos2
3324 3330 parts.append(z.flush())
3325 3331
3326 3332 if sum(map(len, parts)) < insize:
3327 3333 return ''.join(parts)
3328 3334 return None
3329 3335
3330 3336 def decompress(self, data):
3331 3337 try:
3332 3338 return zlib.decompress(data)
3333 3339 except zlib.error as e:
3334 3340 raise error.RevlogError(_('revlog decompress error: %s') %
3335 3341 str(e))
3336 3342
3337 3343 def revlogcompressor(self, opts=None):
3338 3344 return self.zlibrevlogcompressor()
3339 3345
3340 3346 compengines.register(_zlibengine())
3341 3347
3342 3348 class _bz2engine(compressionengine):
3343 3349 def name(self):
3344 3350 return 'bz2'
3345 3351
3346 3352 def bundletype(self):
3347 3353 return 'bzip2', 'BZ'
3348 3354
3349 3355 # We declare a protocol name but don't advertise by default because
3350 3356 # it is slow.
3351 3357 def wireprotosupport(self):
3352 3358 return compewireprotosupport('bzip2', 0, 0)
3353 3359
3354 3360 def compressstream(self, it, opts=None):
3355 3361 opts = opts or {}
3356 3362 z = bz2.BZ2Compressor(opts.get('level', 9))
3357 3363 for chunk in it:
3358 3364 data = z.compress(chunk)
3359 3365 if data:
3360 3366 yield data
3361 3367
3362 3368 yield z.flush()
3363 3369
3364 3370 def decompressorreader(self, fh):
3365 3371 def gen():
3366 3372 d = bz2.BZ2Decompressor()
3367 3373 for chunk in filechunkiter(fh):
3368 3374 yield d.decompress(chunk)
3369 3375
3370 3376 return chunkbuffer(gen())
3371 3377
3372 3378 compengines.register(_bz2engine())
3373 3379
3374 3380 class _truncatedbz2engine(compressionengine):
3375 3381 def name(self):
3376 3382 return 'bz2truncated'
3377 3383
3378 3384 def bundletype(self):
3379 3385 return None, '_truncatedBZ'
3380 3386
3381 3387 # We don't implement compressstream because it is hackily handled elsewhere.
3382 3388
3383 3389 def decompressorreader(self, fh):
3384 3390 def gen():
3385 3391 # The input stream doesn't have the 'BZ' header. So add it back.
3386 3392 d = bz2.BZ2Decompressor()
3387 3393 d.decompress('BZ')
3388 3394 for chunk in filechunkiter(fh):
3389 3395 yield d.decompress(chunk)
3390 3396
3391 3397 return chunkbuffer(gen())
3392 3398
3393 3399 compengines.register(_truncatedbz2engine())
3394 3400
3395 3401 class _noopengine(compressionengine):
3396 3402 def name(self):
3397 3403 return 'none'
3398 3404
3399 3405 def bundletype(self):
3400 3406 return 'none', 'UN'
3401 3407
3402 3408 # Clients always support uncompressed payloads. Servers don't because
3403 3409 # unless you are on a fast network, uncompressed payloads can easily
3404 3410 # saturate your network pipe.
3405 3411 def wireprotosupport(self):
3406 3412 return compewireprotosupport('none', 0, 10)
3407 3413
3408 3414 # We don't implement revlogheader because it is handled specially
3409 3415 # in the revlog class.
3410 3416
3411 3417 def compressstream(self, it, opts=None):
3412 3418 return it
3413 3419
3414 3420 def decompressorreader(self, fh):
3415 3421 return fh
3416 3422
3417 3423 class nooprevlogcompressor(object):
3418 3424 def compress(self, data):
3419 3425 return None
3420 3426
3421 3427 def revlogcompressor(self, opts=None):
3422 3428 return self.nooprevlogcompressor()
3423 3429
3424 3430 compengines.register(_noopengine())
3425 3431
3426 3432 class _zstdengine(compressionengine):
3427 3433 def name(self):
3428 3434 return 'zstd'
3429 3435
3430 3436 @propertycache
3431 3437 def _module(self):
3432 3438 # Not all installs have the zstd module available. So defer importing
3433 3439 # until first access.
3434 3440 try:
3435 3441 from . import zstd
3436 3442 # Force delayed import.
3437 3443 zstd.__version__
3438 3444 return zstd
3439 3445 except ImportError:
3440 3446 return None
3441 3447
3442 3448 def available(self):
3443 3449 return bool(self._module)
3444 3450
3445 3451 def bundletype(self):
3446 3452 return 'zstd', 'ZS'
3447 3453
3448 3454 def wireprotosupport(self):
3449 3455 return compewireprotosupport('zstd', 50, 50)
3450 3456
3451 3457 def revlogheader(self):
3452 3458 return '\x28'
3453 3459
3454 3460 def compressstream(self, it, opts=None):
3455 3461 opts = opts or {}
3456 3462 # zstd level 3 is almost always significantly faster than zlib
3457 3463 # while providing no worse compression. It strikes a good balance
3458 3464 # between speed and compression.
3459 3465 level = opts.get('level', 3)
3460 3466
3461 3467 zstd = self._module
3462 3468 z = zstd.ZstdCompressor(level=level).compressobj()
3463 3469 for chunk in it:
3464 3470 data = z.compress(chunk)
3465 3471 if data:
3466 3472 yield data
3467 3473
3468 3474 yield z.flush()
3469 3475
3470 3476 def decompressorreader(self, fh):
3471 3477 zstd = self._module
3472 3478 dctx = zstd.ZstdDecompressor()
3473 3479 return chunkbuffer(dctx.read_from(fh))
3474 3480
3475 3481 class zstdrevlogcompressor(object):
3476 3482 def __init__(self, zstd, level=3):
3477 3483 # Writing the content size adds a few bytes to the output. However,
3478 3484 # it allows decompression to be more optimal since we can
3479 3485 # pre-allocate a buffer to hold the result.
3480 3486 self._cctx = zstd.ZstdCompressor(level=level,
3481 3487 write_content_size=True)
3482 3488 self._dctx = zstd.ZstdDecompressor()
3483 3489 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3484 3490 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3485 3491
3486 3492 def compress(self, data):
3487 3493 insize = len(data)
3488 3494 # Caller handles empty input case.
3489 3495 assert insize > 0
3490 3496
3491 3497 if insize < 50:
3492 3498 return None
3493 3499
3494 3500 elif insize <= 1000000:
3495 3501 compressed = self._cctx.compress(data)
3496 3502 if len(compressed) < insize:
3497 3503 return compressed
3498 3504 return None
3499 3505 else:
3500 3506 z = self._cctx.compressobj()
3501 3507 chunks = []
3502 3508 pos = 0
3503 3509 while pos < insize:
3504 3510 pos2 = pos + self._compinsize
3505 3511 chunk = z.compress(data[pos:pos2])
3506 3512 if chunk:
3507 3513 chunks.append(chunk)
3508 3514 pos = pos2
3509 3515 chunks.append(z.flush())
3510 3516
3511 3517 if sum(map(len, chunks)) < insize:
3512 3518 return ''.join(chunks)
3513 3519 return None
3514 3520
3515 3521 def decompress(self, data):
3516 3522 insize = len(data)
3517 3523
3518 3524 try:
3519 3525 # This was measured to be faster than other streaming
3520 3526 # decompressors.
3521 3527 dobj = self._dctx.decompressobj()
3522 3528 chunks = []
3523 3529 pos = 0
3524 3530 while pos < insize:
3525 3531 pos2 = pos + self._decompinsize
3526 3532 chunk = dobj.decompress(data[pos:pos2])
3527 3533 if chunk:
3528 3534 chunks.append(chunk)
3529 3535 pos = pos2
3530 3536 # Frame should be exhausted, so no finish() API.
3531 3537
3532 3538 return ''.join(chunks)
3533 3539 except Exception as e:
3534 3540 raise error.RevlogError(_('revlog decompress error: %s') %
3535 3541 str(e))
3536 3542
3537 3543 def revlogcompressor(self, opts=None):
3538 3544 opts = opts or {}
3539 3545 return self.zstdrevlogcompressor(self._module,
3540 3546 level=opts.get('level', 3))
3541 3547
3542 3548 compengines.register(_zstdengine())
3543 3549
3544 3550 # convenient shortcut
3545 3551 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now