##// END OF EJS Templates
util: reimplement lrucachedict...
Gregory Szorc -
r27371:45d996a5 default
parent child Browse files
Show More
@@ -1,2504 +1,2648 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import urllib
37 import urllib
38 import zlib
38 import zlib
39
39
40 from . import (
40 from . import (
41 encoding,
41 encoding,
42 error,
42 error,
43 i18n,
43 i18n,
44 osutil,
44 osutil,
45 parsers,
45 parsers,
46 )
46 )
47
47
48 if os.name == 'nt':
48 if os.name == 'nt':
49 from . import windows as platform
49 from . import windows as platform
50 else:
50 else:
51 from . import posix as platform
51 from . import posix as platform
52
52
53 md5 = hashlib.md5
53 md5 = hashlib.md5
54 sha1 = hashlib.sha1
54 sha1 = hashlib.sha1
55 sha512 = hashlib.sha512
55 sha512 = hashlib.sha512
56 _ = i18n._
56 _ = i18n._
57
57
58 cachestat = platform.cachestat
58 cachestat = platform.cachestat
59 checkexec = platform.checkexec
59 checkexec = platform.checkexec
60 checklink = platform.checklink
60 checklink = platform.checklink
61 copymode = platform.copymode
61 copymode = platform.copymode
62 executablepath = platform.executablepath
62 executablepath = platform.executablepath
63 expandglobs = platform.expandglobs
63 expandglobs = platform.expandglobs
64 explainexit = platform.explainexit
64 explainexit = platform.explainexit
65 findexe = platform.findexe
65 findexe = platform.findexe
66 gethgcmd = platform.gethgcmd
66 gethgcmd = platform.gethgcmd
67 getuser = platform.getuser
67 getuser = platform.getuser
68 groupmembers = platform.groupmembers
68 groupmembers = platform.groupmembers
69 groupname = platform.groupname
69 groupname = platform.groupname
70 hidewindow = platform.hidewindow
70 hidewindow = platform.hidewindow
71 isexec = platform.isexec
71 isexec = platform.isexec
72 isowner = platform.isowner
72 isowner = platform.isowner
73 localpath = platform.localpath
73 localpath = platform.localpath
74 lookupreg = platform.lookupreg
74 lookupreg = platform.lookupreg
75 makedir = platform.makedir
75 makedir = platform.makedir
76 nlinks = platform.nlinks
76 nlinks = platform.nlinks
77 normpath = platform.normpath
77 normpath = platform.normpath
78 normcase = platform.normcase
78 normcase = platform.normcase
79 normcasespec = platform.normcasespec
79 normcasespec = platform.normcasespec
80 normcasefallback = platform.normcasefallback
80 normcasefallback = platform.normcasefallback
81 openhardlinks = platform.openhardlinks
81 openhardlinks = platform.openhardlinks
82 oslink = platform.oslink
82 oslink = platform.oslink
83 parsepatchoutput = platform.parsepatchoutput
83 parsepatchoutput = platform.parsepatchoutput
84 pconvert = platform.pconvert
84 pconvert = platform.pconvert
85 poll = platform.poll
85 poll = platform.poll
86 popen = platform.popen
86 popen = platform.popen
87 posixfile = platform.posixfile
87 posixfile = platform.posixfile
88 quotecommand = platform.quotecommand
88 quotecommand = platform.quotecommand
89 readpipe = platform.readpipe
89 readpipe = platform.readpipe
90 rename = platform.rename
90 rename = platform.rename
91 removedirs = platform.removedirs
91 removedirs = platform.removedirs
92 samedevice = platform.samedevice
92 samedevice = platform.samedevice
93 samefile = platform.samefile
93 samefile = platform.samefile
94 samestat = platform.samestat
94 samestat = platform.samestat
95 setbinary = platform.setbinary
95 setbinary = platform.setbinary
96 setflags = platform.setflags
96 setflags = platform.setflags
97 setsignalhandler = platform.setsignalhandler
97 setsignalhandler = platform.setsignalhandler
98 shellquote = platform.shellquote
98 shellquote = platform.shellquote
99 spawndetached = platform.spawndetached
99 spawndetached = platform.spawndetached
100 split = platform.split
100 split = platform.split
101 sshargs = platform.sshargs
101 sshargs = platform.sshargs
102 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
102 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
103 statisexec = platform.statisexec
103 statisexec = platform.statisexec
104 statislink = platform.statislink
104 statislink = platform.statislink
105 termwidth = platform.termwidth
105 termwidth = platform.termwidth
106 testpid = platform.testpid
106 testpid = platform.testpid
107 umask = platform.umask
107 umask = platform.umask
108 unlink = platform.unlink
108 unlink = platform.unlink
109 unlinkpath = platform.unlinkpath
109 unlinkpath = platform.unlinkpath
110 username = platform.username
110 username = platform.username
111
111
112 # Python compatibility
112 # Python compatibility
113
113
114 _notset = object()
114 _notset = object()
115
115
116 # disable Python's problematic floating point timestamps (issue4836)
116 # disable Python's problematic floating point timestamps (issue4836)
117 # (Python hypocritically says you shouldn't change this behavior in
117 # (Python hypocritically says you shouldn't change this behavior in
118 # libraries, and sure enough Mercurial is not a library.)
118 # libraries, and sure enough Mercurial is not a library.)
119 os.stat_float_times(False)
119 os.stat_float_times(False)
120
120
121 def safehasattr(thing, attr):
121 def safehasattr(thing, attr):
122 return getattr(thing, attr, _notset) is not _notset
122 return getattr(thing, attr, _notset) is not _notset
123
123
124 DIGESTS = {
124 DIGESTS = {
125 'md5': md5,
125 'md5': md5,
126 'sha1': sha1,
126 'sha1': sha1,
127 'sha512': sha512,
127 'sha512': sha512,
128 }
128 }
129 # List of digest types from strongest to weakest
129 # List of digest types from strongest to weakest
130 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
130 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
131
131
132 for k in DIGESTS_BY_STRENGTH:
132 for k in DIGESTS_BY_STRENGTH:
133 assert k in DIGESTS
133 assert k in DIGESTS
134
134
135 class digester(object):
135 class digester(object):
136 """helper to compute digests.
136 """helper to compute digests.
137
137
138 This helper can be used to compute one or more digests given their name.
138 This helper can be used to compute one or more digests given their name.
139
139
140 >>> d = digester(['md5', 'sha1'])
140 >>> d = digester(['md5', 'sha1'])
141 >>> d.update('foo')
141 >>> d.update('foo')
142 >>> [k for k in sorted(d)]
142 >>> [k for k in sorted(d)]
143 ['md5', 'sha1']
143 ['md5', 'sha1']
144 >>> d['md5']
144 >>> d['md5']
145 'acbd18db4cc2f85cedef654fccc4a4d8'
145 'acbd18db4cc2f85cedef654fccc4a4d8'
146 >>> d['sha1']
146 >>> d['sha1']
147 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
147 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
148 >>> digester.preferred(['md5', 'sha1'])
148 >>> digester.preferred(['md5', 'sha1'])
149 'sha1'
149 'sha1'
150 """
150 """
151
151
152 def __init__(self, digests, s=''):
152 def __init__(self, digests, s=''):
153 self._hashes = {}
153 self._hashes = {}
154 for k in digests:
154 for k in digests:
155 if k not in DIGESTS:
155 if k not in DIGESTS:
156 raise Abort(_('unknown digest type: %s') % k)
156 raise Abort(_('unknown digest type: %s') % k)
157 self._hashes[k] = DIGESTS[k]()
157 self._hashes[k] = DIGESTS[k]()
158 if s:
158 if s:
159 self.update(s)
159 self.update(s)
160
160
161 def update(self, data):
161 def update(self, data):
162 for h in self._hashes.values():
162 for h in self._hashes.values():
163 h.update(data)
163 h.update(data)
164
164
165 def __getitem__(self, key):
165 def __getitem__(self, key):
166 if key not in DIGESTS:
166 if key not in DIGESTS:
167 raise Abort(_('unknown digest type: %s') % k)
167 raise Abort(_('unknown digest type: %s') % k)
168 return self._hashes[key].hexdigest()
168 return self._hashes[key].hexdigest()
169
169
170 def __iter__(self):
170 def __iter__(self):
171 return iter(self._hashes)
171 return iter(self._hashes)
172
172
173 @staticmethod
173 @staticmethod
174 def preferred(supported):
174 def preferred(supported):
175 """returns the strongest digest type in both supported and DIGESTS."""
175 """returns the strongest digest type in both supported and DIGESTS."""
176
176
177 for k in DIGESTS_BY_STRENGTH:
177 for k in DIGESTS_BY_STRENGTH:
178 if k in supported:
178 if k in supported:
179 return k
179 return k
180 return None
180 return None
181
181
182 class digestchecker(object):
182 class digestchecker(object):
183 """file handle wrapper that additionally checks content against a given
183 """file handle wrapper that additionally checks content against a given
184 size and digests.
184 size and digests.
185
185
186 d = digestchecker(fh, size, {'md5': '...'})
186 d = digestchecker(fh, size, {'md5': '...'})
187
187
188 When multiple digests are given, all of them are validated.
188 When multiple digests are given, all of them are validated.
189 """
189 """
190
190
191 def __init__(self, fh, size, digests):
191 def __init__(self, fh, size, digests):
192 self._fh = fh
192 self._fh = fh
193 self._size = size
193 self._size = size
194 self._got = 0
194 self._got = 0
195 self._digests = dict(digests)
195 self._digests = dict(digests)
196 self._digester = digester(self._digests.keys())
196 self._digester = digester(self._digests.keys())
197
197
198 def read(self, length=-1):
198 def read(self, length=-1):
199 content = self._fh.read(length)
199 content = self._fh.read(length)
200 self._digester.update(content)
200 self._digester.update(content)
201 self._got += len(content)
201 self._got += len(content)
202 return content
202 return content
203
203
204 def validate(self):
204 def validate(self):
205 if self._size != self._got:
205 if self._size != self._got:
206 raise Abort(_('size mismatch: expected %d, got %d') %
206 raise Abort(_('size mismatch: expected %d, got %d') %
207 (self._size, self._got))
207 (self._size, self._got))
208 for k, v in self._digests.items():
208 for k, v in self._digests.items():
209 if v != self._digester[k]:
209 if v != self._digester[k]:
210 # i18n: first parameter is a digest name
210 # i18n: first parameter is a digest name
211 raise Abort(_('%s mismatch: expected %s, got %s') %
211 raise Abort(_('%s mismatch: expected %s, got %s') %
212 (k, v, self._digester[k]))
212 (k, v, self._digester[k]))
213
213
214 try:
214 try:
215 buffer = buffer
215 buffer = buffer
216 except NameError:
216 except NameError:
217 if sys.version_info[0] < 3:
217 if sys.version_info[0] < 3:
218 def buffer(sliceable, offset=0):
218 def buffer(sliceable, offset=0):
219 return sliceable[offset:]
219 return sliceable[offset:]
220 else:
220 else:
221 def buffer(sliceable, offset=0):
221 def buffer(sliceable, offset=0):
222 return memoryview(sliceable)[offset:]
222 return memoryview(sliceable)[offset:]
223
223
224 closefds = os.name == 'posix'
224 closefds = os.name == 'posix'
225
225
226 _chunksize = 4096
226 _chunksize = 4096
227
227
228 class bufferedinputpipe(object):
228 class bufferedinputpipe(object):
229 """a manually buffered input pipe
229 """a manually buffered input pipe
230
230
231 Python will not let us use buffered IO and lazy reading with 'polling' at
231 Python will not let us use buffered IO and lazy reading with 'polling' at
232 the same time. We cannot probe the buffer state and select will not detect
232 the same time. We cannot probe the buffer state and select will not detect
233 that data are ready to read if they are already buffered.
233 that data are ready to read if they are already buffered.
234
234
235 This class let us work around that by implementing its own buffering
235 This class let us work around that by implementing its own buffering
236 (allowing efficient readline) while offering a way to know if the buffer is
236 (allowing efficient readline) while offering a way to know if the buffer is
237 empty from the output (allowing collaboration of the buffer with polling).
237 empty from the output (allowing collaboration of the buffer with polling).
238
238
239 This class lives in the 'util' module because it makes use of the 'os'
239 This class lives in the 'util' module because it makes use of the 'os'
240 module from the python stdlib.
240 module from the python stdlib.
241 """
241 """
242
242
243 def __init__(self, input):
243 def __init__(self, input):
244 self._input = input
244 self._input = input
245 self._buffer = []
245 self._buffer = []
246 self._eof = False
246 self._eof = False
247 self._lenbuf = 0
247 self._lenbuf = 0
248
248
249 @property
249 @property
250 def hasbuffer(self):
250 def hasbuffer(self):
251 """True is any data is currently buffered
251 """True is any data is currently buffered
252
252
253 This will be used externally a pre-step for polling IO. If there is
253 This will be used externally a pre-step for polling IO. If there is
254 already data then no polling should be set in place."""
254 already data then no polling should be set in place."""
255 return bool(self._buffer)
255 return bool(self._buffer)
256
256
257 @property
257 @property
258 def closed(self):
258 def closed(self):
259 return self._input.closed
259 return self._input.closed
260
260
261 def fileno(self):
261 def fileno(self):
262 return self._input.fileno()
262 return self._input.fileno()
263
263
264 def close(self):
264 def close(self):
265 return self._input.close()
265 return self._input.close()
266
266
267 def read(self, size):
267 def read(self, size):
268 while (not self._eof) and (self._lenbuf < size):
268 while (not self._eof) and (self._lenbuf < size):
269 self._fillbuffer()
269 self._fillbuffer()
270 return self._frombuffer(size)
270 return self._frombuffer(size)
271
271
272 def readline(self, *args, **kwargs):
272 def readline(self, *args, **kwargs):
273 if 1 < len(self._buffer):
273 if 1 < len(self._buffer):
274 # this should not happen because both read and readline end with a
274 # this should not happen because both read and readline end with a
275 # _frombuffer call that collapse it.
275 # _frombuffer call that collapse it.
276 self._buffer = [''.join(self._buffer)]
276 self._buffer = [''.join(self._buffer)]
277 self._lenbuf = len(self._buffer[0])
277 self._lenbuf = len(self._buffer[0])
278 lfi = -1
278 lfi = -1
279 if self._buffer:
279 if self._buffer:
280 lfi = self._buffer[-1].find('\n')
280 lfi = self._buffer[-1].find('\n')
281 while (not self._eof) and lfi < 0:
281 while (not self._eof) and lfi < 0:
282 self._fillbuffer()
282 self._fillbuffer()
283 if self._buffer:
283 if self._buffer:
284 lfi = self._buffer[-1].find('\n')
284 lfi = self._buffer[-1].find('\n')
285 size = lfi + 1
285 size = lfi + 1
286 if lfi < 0: # end of file
286 if lfi < 0: # end of file
287 size = self._lenbuf
287 size = self._lenbuf
288 elif 1 < len(self._buffer):
288 elif 1 < len(self._buffer):
289 # we need to take previous chunks into account
289 # we need to take previous chunks into account
290 size += self._lenbuf - len(self._buffer[-1])
290 size += self._lenbuf - len(self._buffer[-1])
291 return self._frombuffer(size)
291 return self._frombuffer(size)
292
292
293 def _frombuffer(self, size):
293 def _frombuffer(self, size):
294 """return at most 'size' data from the buffer
294 """return at most 'size' data from the buffer
295
295
296 The data are removed from the buffer."""
296 The data are removed from the buffer."""
297 if size == 0 or not self._buffer:
297 if size == 0 or not self._buffer:
298 return ''
298 return ''
299 buf = self._buffer[0]
299 buf = self._buffer[0]
300 if 1 < len(self._buffer):
300 if 1 < len(self._buffer):
301 buf = ''.join(self._buffer)
301 buf = ''.join(self._buffer)
302
302
303 data = buf[:size]
303 data = buf[:size]
304 buf = buf[len(data):]
304 buf = buf[len(data):]
305 if buf:
305 if buf:
306 self._buffer = [buf]
306 self._buffer = [buf]
307 self._lenbuf = len(buf)
307 self._lenbuf = len(buf)
308 else:
308 else:
309 self._buffer = []
309 self._buffer = []
310 self._lenbuf = 0
310 self._lenbuf = 0
311 return data
311 return data
312
312
313 def _fillbuffer(self):
313 def _fillbuffer(self):
314 """read data to the buffer"""
314 """read data to the buffer"""
315 data = os.read(self._input.fileno(), _chunksize)
315 data = os.read(self._input.fileno(), _chunksize)
316 if not data:
316 if not data:
317 self._eof = True
317 self._eof = True
318 else:
318 else:
319 self._lenbuf += len(data)
319 self._lenbuf += len(data)
320 self._buffer.append(data)
320 self._buffer.append(data)
321
321
322 def popen2(cmd, env=None, newlines=False):
322 def popen2(cmd, env=None, newlines=False):
323 # Setting bufsize to -1 lets the system decide the buffer size.
323 # Setting bufsize to -1 lets the system decide the buffer size.
324 # The default for bufsize is 0, meaning unbuffered. This leads to
324 # The default for bufsize is 0, meaning unbuffered. This leads to
325 # poor performance on Mac OS X: http://bugs.python.org/issue4194
325 # poor performance on Mac OS X: http://bugs.python.org/issue4194
326 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
326 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
327 close_fds=closefds,
327 close_fds=closefds,
328 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
328 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
329 universal_newlines=newlines,
329 universal_newlines=newlines,
330 env=env)
330 env=env)
331 return p.stdin, p.stdout
331 return p.stdin, p.stdout
332
332
333 def popen3(cmd, env=None, newlines=False):
333 def popen3(cmd, env=None, newlines=False):
334 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
334 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
335 return stdin, stdout, stderr
335 return stdin, stdout, stderr
336
336
337 def popen4(cmd, env=None, newlines=False, bufsize=-1):
337 def popen4(cmd, env=None, newlines=False, bufsize=-1):
338 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
338 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
339 close_fds=closefds,
339 close_fds=closefds,
340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
341 stderr=subprocess.PIPE,
341 stderr=subprocess.PIPE,
342 universal_newlines=newlines,
342 universal_newlines=newlines,
343 env=env)
343 env=env)
344 return p.stdin, p.stdout, p.stderr, p
344 return p.stdin, p.stdout, p.stderr, p
345
345
346 def version():
346 def version():
347 """Return version information if available."""
347 """Return version information if available."""
348 try:
348 try:
349 from . import __version__
349 from . import __version__
350 return __version__.version
350 return __version__.version
351 except ImportError:
351 except ImportError:
352 return 'unknown'
352 return 'unknown'
353
353
354 def versiontuple(v=None, n=4):
354 def versiontuple(v=None, n=4):
355 """Parses a Mercurial version string into an N-tuple.
355 """Parses a Mercurial version string into an N-tuple.
356
356
357 The version string to be parsed is specified with the ``v`` argument.
357 The version string to be parsed is specified with the ``v`` argument.
358 If it isn't defined, the current Mercurial version string will be parsed.
358 If it isn't defined, the current Mercurial version string will be parsed.
359
359
360 ``n`` can be 2, 3, or 4. Here is how some version strings map to
360 ``n`` can be 2, 3, or 4. Here is how some version strings map to
361 returned values:
361 returned values:
362
362
363 >>> v = '3.6.1+190-df9b73d2d444'
363 >>> v = '3.6.1+190-df9b73d2d444'
364 >>> versiontuple(v, 2)
364 >>> versiontuple(v, 2)
365 (3, 6)
365 (3, 6)
366 >>> versiontuple(v, 3)
366 >>> versiontuple(v, 3)
367 (3, 6, 1)
367 (3, 6, 1)
368 >>> versiontuple(v, 4)
368 >>> versiontuple(v, 4)
369 (3, 6, 1, '190-df9b73d2d444')
369 (3, 6, 1, '190-df9b73d2d444')
370
370
371 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
371 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
372 (3, 6, 1, '190-df9b73d2d444+20151118')
372 (3, 6, 1, '190-df9b73d2d444+20151118')
373
373
374 >>> v = '3.6'
374 >>> v = '3.6'
375 >>> versiontuple(v, 2)
375 >>> versiontuple(v, 2)
376 (3, 6)
376 (3, 6)
377 >>> versiontuple(v, 3)
377 >>> versiontuple(v, 3)
378 (3, 6, None)
378 (3, 6, None)
379 >>> versiontuple(v, 4)
379 >>> versiontuple(v, 4)
380 (3, 6, None, None)
380 (3, 6, None, None)
381 """
381 """
382 if not v:
382 if not v:
383 v = version()
383 v = version()
384 parts = v.split('+', 1)
384 parts = v.split('+', 1)
385 if len(parts) == 1:
385 if len(parts) == 1:
386 vparts, extra = parts[0], None
386 vparts, extra = parts[0], None
387 else:
387 else:
388 vparts, extra = parts
388 vparts, extra = parts
389
389
390 vints = []
390 vints = []
391 for i in vparts.split('.'):
391 for i in vparts.split('.'):
392 try:
392 try:
393 vints.append(int(i))
393 vints.append(int(i))
394 except ValueError:
394 except ValueError:
395 break
395 break
396 # (3, 6) -> (3, 6, None)
396 # (3, 6) -> (3, 6, None)
397 while len(vints) < 3:
397 while len(vints) < 3:
398 vints.append(None)
398 vints.append(None)
399
399
400 if n == 2:
400 if n == 2:
401 return (vints[0], vints[1])
401 return (vints[0], vints[1])
402 if n == 3:
402 if n == 3:
403 return (vints[0], vints[1], vints[2])
403 return (vints[0], vints[1], vints[2])
404 if n == 4:
404 if n == 4:
405 return (vints[0], vints[1], vints[2], extra)
405 return (vints[0], vints[1], vints[2], extra)
406
406
407 # used by parsedate
407 # used by parsedate
408 defaultdateformats = (
408 defaultdateformats = (
409 '%Y-%m-%d %H:%M:%S',
409 '%Y-%m-%d %H:%M:%S',
410 '%Y-%m-%d %I:%M:%S%p',
410 '%Y-%m-%d %I:%M:%S%p',
411 '%Y-%m-%d %H:%M',
411 '%Y-%m-%d %H:%M',
412 '%Y-%m-%d %I:%M%p',
412 '%Y-%m-%d %I:%M%p',
413 '%Y-%m-%d',
413 '%Y-%m-%d',
414 '%m-%d',
414 '%m-%d',
415 '%m/%d',
415 '%m/%d',
416 '%m/%d/%y',
416 '%m/%d/%y',
417 '%m/%d/%Y',
417 '%m/%d/%Y',
418 '%a %b %d %H:%M:%S %Y',
418 '%a %b %d %H:%M:%S %Y',
419 '%a %b %d %I:%M:%S%p %Y',
419 '%a %b %d %I:%M:%S%p %Y',
420 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
420 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
421 '%b %d %H:%M:%S %Y',
421 '%b %d %H:%M:%S %Y',
422 '%b %d %I:%M:%S%p %Y',
422 '%b %d %I:%M:%S%p %Y',
423 '%b %d %H:%M:%S',
423 '%b %d %H:%M:%S',
424 '%b %d %I:%M:%S%p',
424 '%b %d %I:%M:%S%p',
425 '%b %d %H:%M',
425 '%b %d %H:%M',
426 '%b %d %I:%M%p',
426 '%b %d %I:%M%p',
427 '%b %d %Y',
427 '%b %d %Y',
428 '%b %d',
428 '%b %d',
429 '%H:%M:%S',
429 '%H:%M:%S',
430 '%I:%M:%S%p',
430 '%I:%M:%S%p',
431 '%H:%M',
431 '%H:%M',
432 '%I:%M%p',
432 '%I:%M%p',
433 )
433 )
434
434
435 extendeddateformats = defaultdateformats + (
435 extendeddateformats = defaultdateformats + (
436 "%Y",
436 "%Y",
437 "%Y-%m",
437 "%Y-%m",
438 "%b",
438 "%b",
439 "%b %Y",
439 "%b %Y",
440 )
440 )
441
441
442 def cachefunc(func):
442 def cachefunc(func):
443 '''cache the result of function calls'''
443 '''cache the result of function calls'''
444 # XXX doesn't handle keywords args
444 # XXX doesn't handle keywords args
445 if func.func_code.co_argcount == 0:
445 if func.func_code.co_argcount == 0:
446 cache = []
446 cache = []
447 def f():
447 def f():
448 if len(cache) == 0:
448 if len(cache) == 0:
449 cache.append(func())
449 cache.append(func())
450 return cache[0]
450 return cache[0]
451 return f
451 return f
452 cache = {}
452 cache = {}
453 if func.func_code.co_argcount == 1:
453 if func.func_code.co_argcount == 1:
454 # we gain a small amount of time because
454 # we gain a small amount of time because
455 # we don't need to pack/unpack the list
455 # we don't need to pack/unpack the list
456 def f(arg):
456 def f(arg):
457 if arg not in cache:
457 if arg not in cache:
458 cache[arg] = func(arg)
458 cache[arg] = func(arg)
459 return cache[arg]
459 return cache[arg]
460 else:
460 else:
461 def f(*args):
461 def f(*args):
462 if args not in cache:
462 if args not in cache:
463 cache[args] = func(*args)
463 cache[args] = func(*args)
464 return cache[args]
464 return cache[args]
465
465
466 return f
466 return f
467
467
468 class sortdict(dict):
468 class sortdict(dict):
469 '''a simple sorted dictionary'''
469 '''a simple sorted dictionary'''
470 def __init__(self, data=None):
470 def __init__(self, data=None):
471 self._list = []
471 self._list = []
472 if data:
472 if data:
473 self.update(data)
473 self.update(data)
474 def copy(self):
474 def copy(self):
475 return sortdict(self)
475 return sortdict(self)
476 def __setitem__(self, key, val):
476 def __setitem__(self, key, val):
477 if key in self:
477 if key in self:
478 self._list.remove(key)
478 self._list.remove(key)
479 self._list.append(key)
479 self._list.append(key)
480 dict.__setitem__(self, key, val)
480 dict.__setitem__(self, key, val)
481 def __iter__(self):
481 def __iter__(self):
482 return self._list.__iter__()
482 return self._list.__iter__()
483 def update(self, src):
483 def update(self, src):
484 if isinstance(src, dict):
484 if isinstance(src, dict):
485 src = src.iteritems()
485 src = src.iteritems()
486 for k, v in src:
486 for k, v in src:
487 self[k] = v
487 self[k] = v
488 def clear(self):
488 def clear(self):
489 dict.clear(self)
489 dict.clear(self)
490 self._list = []
490 self._list = []
491 def items(self):
491 def items(self):
492 return [(k, self[k]) for k in self._list]
492 return [(k, self[k]) for k in self._list]
493 def __delitem__(self, key):
493 def __delitem__(self, key):
494 dict.__delitem__(self, key)
494 dict.__delitem__(self, key)
495 self._list.remove(key)
495 self._list.remove(key)
496 def pop(self, key, *args, **kwargs):
496 def pop(self, key, *args, **kwargs):
497 dict.pop(self, key, *args, **kwargs)
497 dict.pop(self, key, *args, **kwargs)
498 try:
498 try:
499 self._list.remove(key)
499 self._list.remove(key)
500 except ValueError:
500 except ValueError:
501 pass
501 pass
502 def keys(self):
502 def keys(self):
503 return self._list
503 return self._list
504 def iterkeys(self):
504 def iterkeys(self):
505 return self._list.__iter__()
505 return self._list.__iter__()
506 def iteritems(self):
506 def iteritems(self):
507 for k in self._list:
507 for k in self._list:
508 yield k, self[k]
508 yield k, self[k]
509 def insert(self, index, key, val):
509 def insert(self, index, key, val):
510 self._list.insert(index, key)
510 self._list.insert(index, key)
511 dict.__setitem__(self, key, val)
511 dict.__setitem__(self, key, val)
512
512
513 class _lrucachenode(object):
514 """A node in a doubly linked list.
515
516 Holds a reference to nodes on either side as well as a key-value
517 pair for the dictionary entry.
518 """
519 __slots__ = ('next', 'prev', 'key', 'value')
520
521 def __init__(self):
522 self.next = None
523 self.prev = None
524
525 self.key = _notset
526 self.value = None
527
528 def markempty(self):
529 """Mark the node as emptied."""
530 self.key = _notset
531
513 class lrucachedict(object):
532 class lrucachedict(object):
514 '''cache most recent gets from or sets to this dictionary'''
533 """Dict that caches most recent accesses and sets.
515 def __init__(self, maxsize):
534
535 The dict consists of an actual backing dict - indexed by original
536 key - and a doubly linked circular list defining the order of entries in
537 the cache.
538
539 The head node is the newest entry in the cache. If the cache is full,
540 we recycle head.prev and make it the new head. Cache accesses result in
541 the node being moved to before the existing head and being marked as the
542 new head node.
543
544 NOTE: construction of this class doesn't scale well if the cache size
545 is in the thousands. Avoid creating hundreds or thousands of instances
546 with large capacities.
547 """
548 def __init__(self, max):
516 self._cache = {}
549 self._cache = {}
517 self._maxsize = maxsize
550
518 self._order = collections.deque()
551 self._head = head = _lrucachenode()
552 head.prev = head
553 head.next = head
554 self._size = 1
555 self._capacity = max
556
557 def __len__(self):
558 return len(self._cache)
559
560 def __contains__(self, k):
561 return k in self._cache
519
562
520 def __getitem__(self, key):
563 def __iter__(self):
521 value = self._cache[key]
564 # We don't have to iterate in cache order, but why not.
522 self._order.remove(key)
565 n = self._head
523 self._order.append(key)
566 for i in range(len(self._cache)):
524 return value
567 yield n.key
568 n = n.next
569
570 def __getitem__(self, k):
571 node = self._cache[k]
572 self._movetohead(node)
573 return node.value
574
575 def __setitem__(self, k, v):
576 node = self._cache.get(k)
577 # Replace existing value and mark as newest.
578 if node is not None:
579 node.value = v
580 self._movetohead(node)
581 return
582
583 if self._size < self._capacity:
584 node = self._addcapacity()
585 else:
586 # Grab the last/oldest item.
587 node = self._head.prev
525
588
526 def __setitem__(self, key, value):
589 # At capacity. Kill the old entry.
527 if key not in self._cache:
590 if node.key is not _notset:
528 if len(self._cache) >= self._maxsize:
591 del self._cache[node.key]
529 del self._cache[self._order.popleft()]
592
530 else:
593 node.key = k
531 self._order.remove(key)
594 node.value = v
532 self._cache[key] = value
595 self._cache[k] = node
533 self._order.append(key)
596 # And mark it as newest entry. No need to adjust order since it
597 # is already self._head.prev.
598 self._head = node
534
599
535 def __contains__(self, key):
600 def __delitem__(self, k):
536 return key in self._cache
601 node = self._cache.pop(k)
602 node.markempty()
603
604 # Temporarily mark as newest item before re-adjusting head to make
605 # this node the oldest item.
606 self._movetohead(node)
607 self._head = node.next
608
609 # Additional dict methods.
610
611 def get(self, k, default=None):
612 try:
613 return self._cache[k]
614 except KeyError:
615 return default
537
616
538 def clear(self):
617 def clear(self):
618 n = self._head
619 while n.key is not _notset:
620 n.markempty()
621 n = n.next
622
539 self._cache.clear()
623 self._cache.clear()
540 self._order = collections.deque()
624
625 def _movetohead(self, node):
626 """Mark a node as the newest, making it the new head.
627
628 When a node is accessed, it becomes the freshest entry in the LRU
629 list, which is denoted by self._head.
630
631 Visually, let's make ``N`` the new head node (* denotes head):
632
633 previous/oldest <-> head <-> next/next newest
634
635 ----<->--- A* ---<->-----
636 | |
637 E <-> D <-> N <-> C <-> B
638
639 To:
640
641 ----<->--- N* ---<->-----
642 | |
643 E <-> D <-> C <-> B <-> A
644
645 This requires the following moves:
646
647 C.next = D (node.prev.next = node.next)
648 D.prev = C (node.next.prev = node.prev)
649 E.next = N (head.prev.next = node)
650 N.prev = E (node.prev = head.prev)
651 N.next = A (node.next = head)
652 A.prev = N (head.prev = node)
653 """
654 head = self._head
655 # C.next = D
656 node.prev.next = node.next
657 # D.prev = C
658 node.next.prev = node.prev
659 # N.prev = E
660 node.prev = head.prev
661 # N.next = A
662 # It is tempting to do just "head" here, however if node is
663 # adjacent to head, this will do bad things.
664 node.next = head.prev.next
665 # E.next = N
666 node.next.prev = node
667 # A.prev = N
668 node.prev.next = node
669
670 self._head = node
671
672 def _addcapacity(self):
673 """Add a node to the circular linked list.
674
675 The new node is inserted before the head node.
676 """
677 head = self._head
678 node = _lrucachenode()
679 head.prev.next = node
680 node.prev = head.prev
681 node.next = head
682 head.prev = node
683 self._size += 1
684 return node
541
685
542 def lrucachefunc(func):
686 def lrucachefunc(func):
543 '''cache most recent results of function calls'''
687 '''cache most recent results of function calls'''
544 cache = {}
688 cache = {}
545 order = collections.deque()
689 order = collections.deque()
546 if func.func_code.co_argcount == 1:
690 if func.func_code.co_argcount == 1:
547 def f(arg):
691 def f(arg):
548 if arg not in cache:
692 if arg not in cache:
549 if len(cache) > 20:
693 if len(cache) > 20:
550 del cache[order.popleft()]
694 del cache[order.popleft()]
551 cache[arg] = func(arg)
695 cache[arg] = func(arg)
552 else:
696 else:
553 order.remove(arg)
697 order.remove(arg)
554 order.append(arg)
698 order.append(arg)
555 return cache[arg]
699 return cache[arg]
556 else:
700 else:
557 def f(*args):
701 def f(*args):
558 if args not in cache:
702 if args not in cache:
559 if len(cache) > 20:
703 if len(cache) > 20:
560 del cache[order.popleft()]
704 del cache[order.popleft()]
561 cache[args] = func(*args)
705 cache[args] = func(*args)
562 else:
706 else:
563 order.remove(args)
707 order.remove(args)
564 order.append(args)
708 order.append(args)
565 return cache[args]
709 return cache[args]
566
710
567 return f
711 return f
568
712
569 class propertycache(object):
713 class propertycache(object):
570 def __init__(self, func):
714 def __init__(self, func):
571 self.func = func
715 self.func = func
572 self.name = func.__name__
716 self.name = func.__name__
573 def __get__(self, obj, type=None):
717 def __get__(self, obj, type=None):
574 result = self.func(obj)
718 result = self.func(obj)
575 self.cachevalue(obj, result)
719 self.cachevalue(obj, result)
576 return result
720 return result
577
721
578 def cachevalue(self, obj, value):
722 def cachevalue(self, obj, value):
579 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
723 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
580 obj.__dict__[self.name] = value
724 obj.__dict__[self.name] = value
581
725
582 def pipefilter(s, cmd):
726 def pipefilter(s, cmd):
583 '''filter string S through command CMD, returning its output'''
727 '''filter string S through command CMD, returning its output'''
584 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
728 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
585 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
729 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
586 pout, perr = p.communicate(s)
730 pout, perr = p.communicate(s)
587 return pout
731 return pout
588
732
589 def tempfilter(s, cmd):
733 def tempfilter(s, cmd):
590 '''filter string S through a pair of temporary files with CMD.
734 '''filter string S through a pair of temporary files with CMD.
591 CMD is used as a template to create the real command to be run,
735 CMD is used as a template to create the real command to be run,
592 with the strings INFILE and OUTFILE replaced by the real names of
736 with the strings INFILE and OUTFILE replaced by the real names of
593 the temporary files generated.'''
737 the temporary files generated.'''
594 inname, outname = None, None
738 inname, outname = None, None
595 try:
739 try:
596 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
740 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
597 fp = os.fdopen(infd, 'wb')
741 fp = os.fdopen(infd, 'wb')
598 fp.write(s)
742 fp.write(s)
599 fp.close()
743 fp.close()
600 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
744 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
601 os.close(outfd)
745 os.close(outfd)
602 cmd = cmd.replace('INFILE', inname)
746 cmd = cmd.replace('INFILE', inname)
603 cmd = cmd.replace('OUTFILE', outname)
747 cmd = cmd.replace('OUTFILE', outname)
604 code = os.system(cmd)
748 code = os.system(cmd)
605 if sys.platform == 'OpenVMS' and code & 1:
749 if sys.platform == 'OpenVMS' and code & 1:
606 code = 0
750 code = 0
607 if code:
751 if code:
608 raise Abort(_("command '%s' failed: %s") %
752 raise Abort(_("command '%s' failed: %s") %
609 (cmd, explainexit(code)))
753 (cmd, explainexit(code)))
610 fp = open(outname, 'rb')
754 fp = open(outname, 'rb')
611 r = fp.read()
755 r = fp.read()
612 fp.close()
756 fp.close()
613 return r
757 return r
614 finally:
758 finally:
615 try:
759 try:
616 if inname:
760 if inname:
617 os.unlink(inname)
761 os.unlink(inname)
618 except OSError:
762 except OSError:
619 pass
763 pass
620 try:
764 try:
621 if outname:
765 if outname:
622 os.unlink(outname)
766 os.unlink(outname)
623 except OSError:
767 except OSError:
624 pass
768 pass
625
769
626 filtertable = {
770 filtertable = {
627 'tempfile:': tempfilter,
771 'tempfile:': tempfilter,
628 'pipe:': pipefilter,
772 'pipe:': pipefilter,
629 }
773 }
630
774
631 def filter(s, cmd):
775 def filter(s, cmd):
632 "filter a string through a command that transforms its input to its output"
776 "filter a string through a command that transforms its input to its output"
633 for name, fn in filtertable.iteritems():
777 for name, fn in filtertable.iteritems():
634 if cmd.startswith(name):
778 if cmd.startswith(name):
635 return fn(s, cmd[len(name):].lstrip())
779 return fn(s, cmd[len(name):].lstrip())
636 return pipefilter(s, cmd)
780 return pipefilter(s, cmd)
637
781
638 def binary(s):
782 def binary(s):
639 """return true if a string is binary data"""
783 """return true if a string is binary data"""
640 return bool(s and '\0' in s)
784 return bool(s and '\0' in s)
641
785
642 def increasingchunks(source, min=1024, max=65536):
786 def increasingchunks(source, min=1024, max=65536):
643 '''return no less than min bytes per chunk while data remains,
787 '''return no less than min bytes per chunk while data remains,
644 doubling min after each chunk until it reaches max'''
788 doubling min after each chunk until it reaches max'''
645 def log2(x):
789 def log2(x):
646 if not x:
790 if not x:
647 return 0
791 return 0
648 i = 0
792 i = 0
649 while x:
793 while x:
650 x >>= 1
794 x >>= 1
651 i += 1
795 i += 1
652 return i - 1
796 return i - 1
653
797
654 buf = []
798 buf = []
655 blen = 0
799 blen = 0
656 for chunk in source:
800 for chunk in source:
657 buf.append(chunk)
801 buf.append(chunk)
658 blen += len(chunk)
802 blen += len(chunk)
659 if blen >= min:
803 if blen >= min:
660 if min < max:
804 if min < max:
661 min = min << 1
805 min = min << 1
662 nmin = 1 << log2(blen)
806 nmin = 1 << log2(blen)
663 if nmin > min:
807 if nmin > min:
664 min = nmin
808 min = nmin
665 if min > max:
809 if min > max:
666 min = max
810 min = max
667 yield ''.join(buf)
811 yield ''.join(buf)
668 blen = 0
812 blen = 0
669 buf = []
813 buf = []
670 if buf:
814 if buf:
671 yield ''.join(buf)
815 yield ''.join(buf)
672
816
673 Abort = error.Abort
817 Abort = error.Abort
674
818
675 def always(fn):
819 def always(fn):
676 return True
820 return True
677
821
678 def never(fn):
822 def never(fn):
679 return False
823 return False
680
824
681 def nogc(func):
825 def nogc(func):
682 """disable garbage collector
826 """disable garbage collector
683
827
684 Python's garbage collector triggers a GC each time a certain number of
828 Python's garbage collector triggers a GC each time a certain number of
685 container objects (the number being defined by gc.get_threshold()) are
829 container objects (the number being defined by gc.get_threshold()) are
686 allocated even when marked not to be tracked by the collector. Tracking has
830 allocated even when marked not to be tracked by the collector. Tracking has
687 no effect on when GCs are triggered, only on what objects the GC looks
831 no effect on when GCs are triggered, only on what objects the GC looks
688 into. As a workaround, disable GC while building complex (huge)
832 into. As a workaround, disable GC while building complex (huge)
689 containers.
833 containers.
690
834
691 This garbage collector issue have been fixed in 2.7.
835 This garbage collector issue have been fixed in 2.7.
692 """
836 """
693 def wrapper(*args, **kwargs):
837 def wrapper(*args, **kwargs):
694 gcenabled = gc.isenabled()
838 gcenabled = gc.isenabled()
695 gc.disable()
839 gc.disable()
696 try:
840 try:
697 return func(*args, **kwargs)
841 return func(*args, **kwargs)
698 finally:
842 finally:
699 if gcenabled:
843 if gcenabled:
700 gc.enable()
844 gc.enable()
701 return wrapper
845 return wrapper
702
846
703 def pathto(root, n1, n2):
847 def pathto(root, n1, n2):
704 '''return the relative path from one place to another.
848 '''return the relative path from one place to another.
705 root should use os.sep to separate directories
849 root should use os.sep to separate directories
706 n1 should use os.sep to separate directories
850 n1 should use os.sep to separate directories
707 n2 should use "/" to separate directories
851 n2 should use "/" to separate directories
708 returns an os.sep-separated path.
852 returns an os.sep-separated path.
709
853
710 If n1 is a relative path, it's assumed it's
854 If n1 is a relative path, it's assumed it's
711 relative to root.
855 relative to root.
712 n2 should always be relative to root.
856 n2 should always be relative to root.
713 '''
857 '''
714 if not n1:
858 if not n1:
715 return localpath(n2)
859 return localpath(n2)
716 if os.path.isabs(n1):
860 if os.path.isabs(n1):
717 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
861 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
718 return os.path.join(root, localpath(n2))
862 return os.path.join(root, localpath(n2))
719 n2 = '/'.join((pconvert(root), n2))
863 n2 = '/'.join((pconvert(root), n2))
720 a, b = splitpath(n1), n2.split('/')
864 a, b = splitpath(n1), n2.split('/')
721 a.reverse()
865 a.reverse()
722 b.reverse()
866 b.reverse()
723 while a and b and a[-1] == b[-1]:
867 while a and b and a[-1] == b[-1]:
724 a.pop()
868 a.pop()
725 b.pop()
869 b.pop()
726 b.reverse()
870 b.reverse()
727 return os.sep.join((['..'] * len(a)) + b) or '.'
871 return os.sep.join((['..'] * len(a)) + b) or '.'
728
872
729 def mainfrozen():
873 def mainfrozen():
730 """return True if we are a frozen executable.
874 """return True if we are a frozen executable.
731
875
732 The code supports py2exe (most common, Windows only) and tools/freeze
876 The code supports py2exe (most common, Windows only) and tools/freeze
733 (portable, not much used).
877 (portable, not much used).
734 """
878 """
735 return (safehasattr(sys, "frozen") or # new py2exe
879 return (safehasattr(sys, "frozen") or # new py2exe
736 safehasattr(sys, "importers") or # old py2exe
880 safehasattr(sys, "importers") or # old py2exe
737 imp.is_frozen("__main__")) # tools/freeze
881 imp.is_frozen("__main__")) # tools/freeze
738
882
739 # the location of data files matching the source code
883 # the location of data files matching the source code
740 if mainfrozen():
884 if mainfrozen():
741 # executable version (py2exe) doesn't support __file__
885 # executable version (py2exe) doesn't support __file__
742 datapath = os.path.dirname(sys.executable)
886 datapath = os.path.dirname(sys.executable)
743 else:
887 else:
744 datapath = os.path.dirname(__file__)
888 datapath = os.path.dirname(__file__)
745
889
746 i18n.setdatapath(datapath)
890 i18n.setdatapath(datapath)
747
891
748 _hgexecutable = None
892 _hgexecutable = None
749
893
750 def hgexecutable():
894 def hgexecutable():
751 """return location of the 'hg' executable.
895 """return location of the 'hg' executable.
752
896
753 Defaults to $HG or 'hg' in the search path.
897 Defaults to $HG or 'hg' in the search path.
754 """
898 """
755 if _hgexecutable is None:
899 if _hgexecutable is None:
756 hg = os.environ.get('HG')
900 hg = os.environ.get('HG')
757 mainmod = sys.modules['__main__']
901 mainmod = sys.modules['__main__']
758 if hg:
902 if hg:
759 _sethgexecutable(hg)
903 _sethgexecutable(hg)
760 elif mainfrozen():
904 elif mainfrozen():
761 _sethgexecutable(sys.executable)
905 _sethgexecutable(sys.executable)
762 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
906 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
763 _sethgexecutable(mainmod.__file__)
907 _sethgexecutable(mainmod.__file__)
764 else:
908 else:
765 exe = findexe('hg') or os.path.basename(sys.argv[0])
909 exe = findexe('hg') or os.path.basename(sys.argv[0])
766 _sethgexecutable(exe)
910 _sethgexecutable(exe)
767 return _hgexecutable
911 return _hgexecutable
768
912
769 def _sethgexecutable(path):
913 def _sethgexecutable(path):
770 """set location of the 'hg' executable"""
914 """set location of the 'hg' executable"""
771 global _hgexecutable
915 global _hgexecutable
772 _hgexecutable = path
916 _hgexecutable = path
773
917
774 def _isstdout(f):
918 def _isstdout(f):
775 fileno = getattr(f, 'fileno', None)
919 fileno = getattr(f, 'fileno', None)
776 return fileno and fileno() == sys.__stdout__.fileno()
920 return fileno and fileno() == sys.__stdout__.fileno()
777
921
778 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
922 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
779 '''enhanced shell command execution.
923 '''enhanced shell command execution.
780 run with environment maybe modified, maybe in different dir.
924 run with environment maybe modified, maybe in different dir.
781
925
782 if command fails and onerr is None, return status, else raise onerr
926 if command fails and onerr is None, return status, else raise onerr
783 object as exception.
927 object as exception.
784
928
785 if out is specified, it is assumed to be a file-like object that has a
929 if out is specified, it is assumed to be a file-like object that has a
786 write() method. stdout and stderr will be redirected to out.'''
930 write() method. stdout and stderr will be redirected to out.'''
787 if environ is None:
931 if environ is None:
788 environ = {}
932 environ = {}
789 try:
933 try:
790 sys.stdout.flush()
934 sys.stdout.flush()
791 except Exception:
935 except Exception:
792 pass
936 pass
793 def py2shell(val):
937 def py2shell(val):
794 'convert python object into string that is useful to shell'
938 'convert python object into string that is useful to shell'
795 if val is None or val is False:
939 if val is None or val is False:
796 return '0'
940 return '0'
797 if val is True:
941 if val is True:
798 return '1'
942 return '1'
799 return str(val)
943 return str(val)
800 origcmd = cmd
944 origcmd = cmd
801 cmd = quotecommand(cmd)
945 cmd = quotecommand(cmd)
802 if sys.platform == 'plan9' and (sys.version_info[0] == 2
946 if sys.platform == 'plan9' and (sys.version_info[0] == 2
803 and sys.version_info[1] < 7):
947 and sys.version_info[1] < 7):
804 # subprocess kludge to work around issues in half-baked Python
948 # subprocess kludge to work around issues in half-baked Python
805 # ports, notably bichued/python:
949 # ports, notably bichued/python:
806 if not cwd is None:
950 if not cwd is None:
807 os.chdir(cwd)
951 os.chdir(cwd)
808 rc = os.system(cmd)
952 rc = os.system(cmd)
809 else:
953 else:
810 env = dict(os.environ)
954 env = dict(os.environ)
811 env.update((k, py2shell(v)) for k, v in environ.iteritems())
955 env.update((k, py2shell(v)) for k, v in environ.iteritems())
812 env['HG'] = hgexecutable()
956 env['HG'] = hgexecutable()
813 if out is None or _isstdout(out):
957 if out is None or _isstdout(out):
814 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
958 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
815 env=env, cwd=cwd)
959 env=env, cwd=cwd)
816 else:
960 else:
817 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
961 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
818 env=env, cwd=cwd, stdout=subprocess.PIPE,
962 env=env, cwd=cwd, stdout=subprocess.PIPE,
819 stderr=subprocess.STDOUT)
963 stderr=subprocess.STDOUT)
820 while True:
964 while True:
821 line = proc.stdout.readline()
965 line = proc.stdout.readline()
822 if not line:
966 if not line:
823 break
967 break
824 out.write(line)
968 out.write(line)
825 proc.wait()
969 proc.wait()
826 rc = proc.returncode
970 rc = proc.returncode
827 if sys.platform == 'OpenVMS' and rc & 1:
971 if sys.platform == 'OpenVMS' and rc & 1:
828 rc = 0
972 rc = 0
829 if rc and onerr:
973 if rc and onerr:
830 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
974 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
831 explainexit(rc)[0])
975 explainexit(rc)[0])
832 if errprefix:
976 if errprefix:
833 errmsg = '%s: %s' % (errprefix, errmsg)
977 errmsg = '%s: %s' % (errprefix, errmsg)
834 raise onerr(errmsg)
978 raise onerr(errmsg)
835 return rc
979 return rc
836
980
837 def checksignature(func):
981 def checksignature(func):
838 '''wrap a function with code to check for calling errors'''
982 '''wrap a function with code to check for calling errors'''
839 def check(*args, **kwargs):
983 def check(*args, **kwargs):
840 try:
984 try:
841 return func(*args, **kwargs)
985 return func(*args, **kwargs)
842 except TypeError:
986 except TypeError:
843 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
987 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
844 raise error.SignatureError
988 raise error.SignatureError
845 raise
989 raise
846
990
847 return check
991 return check
848
992
849 def copyfile(src, dest, hardlink=False):
993 def copyfile(src, dest, hardlink=False):
850 "copy a file, preserving mode and atime/mtime"
994 "copy a file, preserving mode and atime/mtime"
851 if os.path.lexists(dest):
995 if os.path.lexists(dest):
852 unlink(dest)
996 unlink(dest)
853 # hardlinks are problematic on CIFS, quietly ignore this flag
997 # hardlinks are problematic on CIFS, quietly ignore this flag
854 # until we find a way to work around it cleanly (issue4546)
998 # until we find a way to work around it cleanly (issue4546)
855 if False and hardlink:
999 if False and hardlink:
856 try:
1000 try:
857 oslink(src, dest)
1001 oslink(src, dest)
858 return
1002 return
859 except (IOError, OSError):
1003 except (IOError, OSError):
860 pass # fall back to normal copy
1004 pass # fall back to normal copy
861 if os.path.islink(src):
1005 if os.path.islink(src):
862 os.symlink(os.readlink(src), dest)
1006 os.symlink(os.readlink(src), dest)
863 else:
1007 else:
864 try:
1008 try:
865 shutil.copyfile(src, dest)
1009 shutil.copyfile(src, dest)
866 shutil.copymode(src, dest)
1010 shutil.copymode(src, dest)
867 except shutil.Error as inst:
1011 except shutil.Error as inst:
868 raise Abort(str(inst))
1012 raise Abort(str(inst))
869
1013
870 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1014 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
871 """Copy a directory tree using hardlinks if possible."""
1015 """Copy a directory tree using hardlinks if possible."""
872 num = 0
1016 num = 0
873
1017
874 if hardlink is None:
1018 if hardlink is None:
875 hardlink = (os.stat(src).st_dev ==
1019 hardlink = (os.stat(src).st_dev ==
876 os.stat(os.path.dirname(dst)).st_dev)
1020 os.stat(os.path.dirname(dst)).st_dev)
877 if hardlink:
1021 if hardlink:
878 topic = _('linking')
1022 topic = _('linking')
879 else:
1023 else:
880 topic = _('copying')
1024 topic = _('copying')
881
1025
882 if os.path.isdir(src):
1026 if os.path.isdir(src):
883 os.mkdir(dst)
1027 os.mkdir(dst)
884 for name, kind in osutil.listdir(src):
1028 for name, kind in osutil.listdir(src):
885 srcname = os.path.join(src, name)
1029 srcname = os.path.join(src, name)
886 dstname = os.path.join(dst, name)
1030 dstname = os.path.join(dst, name)
887 def nprog(t, pos):
1031 def nprog(t, pos):
888 if pos is not None:
1032 if pos is not None:
889 return progress(t, pos + num)
1033 return progress(t, pos + num)
890 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1034 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
891 num += n
1035 num += n
892 else:
1036 else:
893 if hardlink:
1037 if hardlink:
894 try:
1038 try:
895 oslink(src, dst)
1039 oslink(src, dst)
896 except (IOError, OSError):
1040 except (IOError, OSError):
897 hardlink = False
1041 hardlink = False
898 shutil.copy(src, dst)
1042 shutil.copy(src, dst)
899 else:
1043 else:
900 shutil.copy(src, dst)
1044 shutil.copy(src, dst)
901 num += 1
1045 num += 1
902 progress(topic, num)
1046 progress(topic, num)
903 progress(topic, None)
1047 progress(topic, None)
904
1048
905 return hardlink, num
1049 return hardlink, num
906
1050
907 _winreservednames = '''con prn aux nul
1051 _winreservednames = '''con prn aux nul
908 com1 com2 com3 com4 com5 com6 com7 com8 com9
1052 com1 com2 com3 com4 com5 com6 com7 com8 com9
909 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1053 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
910 _winreservedchars = ':*?"<>|'
1054 _winreservedchars = ':*?"<>|'
911 def checkwinfilename(path):
1055 def checkwinfilename(path):
912 r'''Check that the base-relative path is a valid filename on Windows.
1056 r'''Check that the base-relative path is a valid filename on Windows.
913 Returns None if the path is ok, or a UI string describing the problem.
1057 Returns None if the path is ok, or a UI string describing the problem.
914
1058
915 >>> checkwinfilename("just/a/normal/path")
1059 >>> checkwinfilename("just/a/normal/path")
916 >>> checkwinfilename("foo/bar/con.xml")
1060 >>> checkwinfilename("foo/bar/con.xml")
917 "filename contains 'con', which is reserved on Windows"
1061 "filename contains 'con', which is reserved on Windows"
918 >>> checkwinfilename("foo/con.xml/bar")
1062 >>> checkwinfilename("foo/con.xml/bar")
919 "filename contains 'con', which is reserved on Windows"
1063 "filename contains 'con', which is reserved on Windows"
920 >>> checkwinfilename("foo/bar/xml.con")
1064 >>> checkwinfilename("foo/bar/xml.con")
921 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1065 >>> checkwinfilename("foo/bar/AUX/bla.txt")
922 "filename contains 'AUX', which is reserved on Windows"
1066 "filename contains 'AUX', which is reserved on Windows"
923 >>> checkwinfilename("foo/bar/bla:.txt")
1067 >>> checkwinfilename("foo/bar/bla:.txt")
924 "filename contains ':', which is reserved on Windows"
1068 "filename contains ':', which is reserved on Windows"
925 >>> checkwinfilename("foo/bar/b\07la.txt")
1069 >>> checkwinfilename("foo/bar/b\07la.txt")
926 "filename contains '\\x07', which is invalid on Windows"
1070 "filename contains '\\x07', which is invalid on Windows"
927 >>> checkwinfilename("foo/bar/bla ")
1071 >>> checkwinfilename("foo/bar/bla ")
928 "filename ends with ' ', which is not allowed on Windows"
1072 "filename ends with ' ', which is not allowed on Windows"
929 >>> checkwinfilename("../bar")
1073 >>> checkwinfilename("../bar")
930 >>> checkwinfilename("foo\\")
1074 >>> checkwinfilename("foo\\")
931 "filename ends with '\\', which is invalid on Windows"
1075 "filename ends with '\\', which is invalid on Windows"
932 >>> checkwinfilename("foo\\/bar")
1076 >>> checkwinfilename("foo\\/bar")
933 "directory name ends with '\\', which is invalid on Windows"
1077 "directory name ends with '\\', which is invalid on Windows"
934 '''
1078 '''
935 if path.endswith('\\'):
1079 if path.endswith('\\'):
936 return _("filename ends with '\\', which is invalid on Windows")
1080 return _("filename ends with '\\', which is invalid on Windows")
937 if '\\/' in path:
1081 if '\\/' in path:
938 return _("directory name ends with '\\', which is invalid on Windows")
1082 return _("directory name ends with '\\', which is invalid on Windows")
939 for n in path.replace('\\', '/').split('/'):
1083 for n in path.replace('\\', '/').split('/'):
940 if not n:
1084 if not n:
941 continue
1085 continue
942 for c in n:
1086 for c in n:
943 if c in _winreservedchars:
1087 if c in _winreservedchars:
944 return _("filename contains '%s', which is reserved "
1088 return _("filename contains '%s', which is reserved "
945 "on Windows") % c
1089 "on Windows") % c
946 if ord(c) <= 31:
1090 if ord(c) <= 31:
947 return _("filename contains %r, which is invalid "
1091 return _("filename contains %r, which is invalid "
948 "on Windows") % c
1092 "on Windows") % c
949 base = n.split('.')[0]
1093 base = n.split('.')[0]
950 if base and base.lower() in _winreservednames:
1094 if base and base.lower() in _winreservednames:
951 return _("filename contains '%s', which is reserved "
1095 return _("filename contains '%s', which is reserved "
952 "on Windows") % base
1096 "on Windows") % base
953 t = n[-1]
1097 t = n[-1]
954 if t in '. ' and n not in '..':
1098 if t in '. ' and n not in '..':
955 return _("filename ends with '%s', which is not allowed "
1099 return _("filename ends with '%s', which is not allowed "
956 "on Windows") % t
1100 "on Windows") % t
957
1101
958 if os.name == 'nt':
1102 if os.name == 'nt':
959 checkosfilename = checkwinfilename
1103 checkosfilename = checkwinfilename
960 else:
1104 else:
961 checkosfilename = platform.checkosfilename
1105 checkosfilename = platform.checkosfilename
962
1106
963 def makelock(info, pathname):
1107 def makelock(info, pathname):
964 try:
1108 try:
965 return os.symlink(info, pathname)
1109 return os.symlink(info, pathname)
966 except OSError as why:
1110 except OSError as why:
967 if why.errno == errno.EEXIST:
1111 if why.errno == errno.EEXIST:
968 raise
1112 raise
969 except AttributeError: # no symlink in os
1113 except AttributeError: # no symlink in os
970 pass
1114 pass
971
1115
972 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1116 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
973 os.write(ld, info)
1117 os.write(ld, info)
974 os.close(ld)
1118 os.close(ld)
975
1119
976 def readlock(pathname):
1120 def readlock(pathname):
977 try:
1121 try:
978 return os.readlink(pathname)
1122 return os.readlink(pathname)
979 except OSError as why:
1123 except OSError as why:
980 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1124 if why.errno not in (errno.EINVAL, errno.ENOSYS):
981 raise
1125 raise
982 except AttributeError: # no symlink in os
1126 except AttributeError: # no symlink in os
983 pass
1127 pass
984 fp = posixfile(pathname)
1128 fp = posixfile(pathname)
985 r = fp.read()
1129 r = fp.read()
986 fp.close()
1130 fp.close()
987 return r
1131 return r
988
1132
989 def fstat(fp):
1133 def fstat(fp):
990 '''stat file object that may not have fileno method.'''
1134 '''stat file object that may not have fileno method.'''
991 try:
1135 try:
992 return os.fstat(fp.fileno())
1136 return os.fstat(fp.fileno())
993 except AttributeError:
1137 except AttributeError:
994 return os.stat(fp.name)
1138 return os.stat(fp.name)
995
1139
996 # File system features
1140 # File system features
997
1141
998 def checkcase(path):
1142 def checkcase(path):
999 """
1143 """
1000 Return true if the given path is on a case-sensitive filesystem
1144 Return true if the given path is on a case-sensitive filesystem
1001
1145
1002 Requires a path (like /foo/.hg) ending with a foldable final
1146 Requires a path (like /foo/.hg) ending with a foldable final
1003 directory component.
1147 directory component.
1004 """
1148 """
1005 s1 = os.lstat(path)
1149 s1 = os.lstat(path)
1006 d, b = os.path.split(path)
1150 d, b = os.path.split(path)
1007 b2 = b.upper()
1151 b2 = b.upper()
1008 if b == b2:
1152 if b == b2:
1009 b2 = b.lower()
1153 b2 = b.lower()
1010 if b == b2:
1154 if b == b2:
1011 return True # no evidence against case sensitivity
1155 return True # no evidence against case sensitivity
1012 p2 = os.path.join(d, b2)
1156 p2 = os.path.join(d, b2)
1013 try:
1157 try:
1014 s2 = os.lstat(p2)
1158 s2 = os.lstat(p2)
1015 if s2 == s1:
1159 if s2 == s1:
1016 return False
1160 return False
1017 return True
1161 return True
1018 except OSError:
1162 except OSError:
1019 return True
1163 return True
1020
1164
1021 try:
1165 try:
1022 import re2
1166 import re2
1023 _re2 = None
1167 _re2 = None
1024 except ImportError:
1168 except ImportError:
1025 _re2 = False
1169 _re2 = False
1026
1170
1027 class _re(object):
1171 class _re(object):
1028 def _checkre2(self):
1172 def _checkre2(self):
1029 global _re2
1173 global _re2
1030 try:
1174 try:
1031 # check if match works, see issue3964
1175 # check if match works, see issue3964
1032 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1176 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1033 except ImportError:
1177 except ImportError:
1034 _re2 = False
1178 _re2 = False
1035
1179
1036 def compile(self, pat, flags=0):
1180 def compile(self, pat, flags=0):
1037 '''Compile a regular expression, using re2 if possible
1181 '''Compile a regular expression, using re2 if possible
1038
1182
1039 For best performance, use only re2-compatible regexp features. The
1183 For best performance, use only re2-compatible regexp features. The
1040 only flags from the re module that are re2-compatible are
1184 only flags from the re module that are re2-compatible are
1041 IGNORECASE and MULTILINE.'''
1185 IGNORECASE and MULTILINE.'''
1042 if _re2 is None:
1186 if _re2 is None:
1043 self._checkre2()
1187 self._checkre2()
1044 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1188 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1045 if flags & remod.IGNORECASE:
1189 if flags & remod.IGNORECASE:
1046 pat = '(?i)' + pat
1190 pat = '(?i)' + pat
1047 if flags & remod.MULTILINE:
1191 if flags & remod.MULTILINE:
1048 pat = '(?m)' + pat
1192 pat = '(?m)' + pat
1049 try:
1193 try:
1050 return re2.compile(pat)
1194 return re2.compile(pat)
1051 except re2.error:
1195 except re2.error:
1052 pass
1196 pass
1053 return remod.compile(pat, flags)
1197 return remod.compile(pat, flags)
1054
1198
1055 @propertycache
1199 @propertycache
1056 def escape(self):
1200 def escape(self):
1057 '''Return the version of escape corresponding to self.compile.
1201 '''Return the version of escape corresponding to self.compile.
1058
1202
1059 This is imperfect because whether re2 or re is used for a particular
1203 This is imperfect because whether re2 or re is used for a particular
1060 function depends on the flags, etc, but it's the best we can do.
1204 function depends on the flags, etc, but it's the best we can do.
1061 '''
1205 '''
1062 global _re2
1206 global _re2
1063 if _re2 is None:
1207 if _re2 is None:
1064 self._checkre2()
1208 self._checkre2()
1065 if _re2:
1209 if _re2:
1066 return re2.escape
1210 return re2.escape
1067 else:
1211 else:
1068 return remod.escape
1212 return remod.escape
1069
1213
1070 re = _re()
1214 re = _re()
1071
1215
1072 _fspathcache = {}
1216 _fspathcache = {}
1073 def fspath(name, root):
1217 def fspath(name, root):
1074 '''Get name in the case stored in the filesystem
1218 '''Get name in the case stored in the filesystem
1075
1219
1076 The name should be relative to root, and be normcase-ed for efficiency.
1220 The name should be relative to root, and be normcase-ed for efficiency.
1077
1221
1078 Note that this function is unnecessary, and should not be
1222 Note that this function is unnecessary, and should not be
1079 called, for case-sensitive filesystems (simply because it's expensive).
1223 called, for case-sensitive filesystems (simply because it's expensive).
1080
1224
1081 The root should be normcase-ed, too.
1225 The root should be normcase-ed, too.
1082 '''
1226 '''
1083 def _makefspathcacheentry(dir):
1227 def _makefspathcacheentry(dir):
1084 return dict((normcase(n), n) for n in os.listdir(dir))
1228 return dict((normcase(n), n) for n in os.listdir(dir))
1085
1229
1086 seps = os.sep
1230 seps = os.sep
1087 if os.altsep:
1231 if os.altsep:
1088 seps = seps + os.altsep
1232 seps = seps + os.altsep
1089 # Protect backslashes. This gets silly very quickly.
1233 # Protect backslashes. This gets silly very quickly.
1090 seps.replace('\\','\\\\')
1234 seps.replace('\\','\\\\')
1091 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1235 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1092 dir = os.path.normpath(root)
1236 dir = os.path.normpath(root)
1093 result = []
1237 result = []
1094 for part, sep in pattern.findall(name):
1238 for part, sep in pattern.findall(name):
1095 if sep:
1239 if sep:
1096 result.append(sep)
1240 result.append(sep)
1097 continue
1241 continue
1098
1242
1099 if dir not in _fspathcache:
1243 if dir not in _fspathcache:
1100 _fspathcache[dir] = _makefspathcacheentry(dir)
1244 _fspathcache[dir] = _makefspathcacheentry(dir)
1101 contents = _fspathcache[dir]
1245 contents = _fspathcache[dir]
1102
1246
1103 found = contents.get(part)
1247 found = contents.get(part)
1104 if not found:
1248 if not found:
1105 # retry "once per directory" per "dirstate.walk" which
1249 # retry "once per directory" per "dirstate.walk" which
1106 # may take place for each patches of "hg qpush", for example
1250 # may take place for each patches of "hg qpush", for example
1107 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1251 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1108 found = contents.get(part)
1252 found = contents.get(part)
1109
1253
1110 result.append(found or part)
1254 result.append(found or part)
1111 dir = os.path.join(dir, part)
1255 dir = os.path.join(dir, part)
1112
1256
1113 return ''.join(result)
1257 return ''.join(result)
1114
1258
1115 def checknlink(testfile):
1259 def checknlink(testfile):
1116 '''check whether hardlink count reporting works properly'''
1260 '''check whether hardlink count reporting works properly'''
1117
1261
1118 # testfile may be open, so we need a separate file for checking to
1262 # testfile may be open, so we need a separate file for checking to
1119 # work around issue2543 (or testfile may get lost on Samba shares)
1263 # work around issue2543 (or testfile may get lost on Samba shares)
1120 f1 = testfile + ".hgtmp1"
1264 f1 = testfile + ".hgtmp1"
1121 if os.path.lexists(f1):
1265 if os.path.lexists(f1):
1122 return False
1266 return False
1123 try:
1267 try:
1124 posixfile(f1, 'w').close()
1268 posixfile(f1, 'w').close()
1125 except IOError:
1269 except IOError:
1126 return False
1270 return False
1127
1271
1128 f2 = testfile + ".hgtmp2"
1272 f2 = testfile + ".hgtmp2"
1129 fd = None
1273 fd = None
1130 try:
1274 try:
1131 oslink(f1, f2)
1275 oslink(f1, f2)
1132 # nlinks() may behave differently for files on Windows shares if
1276 # nlinks() may behave differently for files on Windows shares if
1133 # the file is open.
1277 # the file is open.
1134 fd = posixfile(f2)
1278 fd = posixfile(f2)
1135 return nlinks(f2) > 1
1279 return nlinks(f2) > 1
1136 except OSError:
1280 except OSError:
1137 return False
1281 return False
1138 finally:
1282 finally:
1139 if fd is not None:
1283 if fd is not None:
1140 fd.close()
1284 fd.close()
1141 for f in (f1, f2):
1285 for f in (f1, f2):
1142 try:
1286 try:
1143 os.unlink(f)
1287 os.unlink(f)
1144 except OSError:
1288 except OSError:
1145 pass
1289 pass
1146
1290
1147 def endswithsep(path):
1291 def endswithsep(path):
1148 '''Check path ends with os.sep or os.altsep.'''
1292 '''Check path ends with os.sep or os.altsep.'''
1149 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1293 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1150
1294
1151 def splitpath(path):
1295 def splitpath(path):
1152 '''Split path by os.sep.
1296 '''Split path by os.sep.
1153 Note that this function does not use os.altsep because this is
1297 Note that this function does not use os.altsep because this is
1154 an alternative of simple "xxx.split(os.sep)".
1298 an alternative of simple "xxx.split(os.sep)".
1155 It is recommended to use os.path.normpath() before using this
1299 It is recommended to use os.path.normpath() before using this
1156 function if need.'''
1300 function if need.'''
1157 return path.split(os.sep)
1301 return path.split(os.sep)
1158
1302
1159 def gui():
1303 def gui():
1160 '''Are we running in a GUI?'''
1304 '''Are we running in a GUI?'''
1161 if sys.platform == 'darwin':
1305 if sys.platform == 'darwin':
1162 if 'SSH_CONNECTION' in os.environ:
1306 if 'SSH_CONNECTION' in os.environ:
1163 # handle SSH access to a box where the user is logged in
1307 # handle SSH access to a box where the user is logged in
1164 return False
1308 return False
1165 elif getattr(osutil, 'isgui', None):
1309 elif getattr(osutil, 'isgui', None):
1166 # check if a CoreGraphics session is available
1310 # check if a CoreGraphics session is available
1167 return osutil.isgui()
1311 return osutil.isgui()
1168 else:
1312 else:
1169 # pure build; use a safe default
1313 # pure build; use a safe default
1170 return True
1314 return True
1171 else:
1315 else:
1172 return os.name == "nt" or os.environ.get("DISPLAY")
1316 return os.name == "nt" or os.environ.get("DISPLAY")
1173
1317
1174 def mktempcopy(name, emptyok=False, createmode=None):
1318 def mktempcopy(name, emptyok=False, createmode=None):
1175 """Create a temporary file with the same contents from name
1319 """Create a temporary file with the same contents from name
1176
1320
1177 The permission bits are copied from the original file.
1321 The permission bits are copied from the original file.
1178
1322
1179 If the temporary file is going to be truncated immediately, you
1323 If the temporary file is going to be truncated immediately, you
1180 can use emptyok=True as an optimization.
1324 can use emptyok=True as an optimization.
1181
1325
1182 Returns the name of the temporary file.
1326 Returns the name of the temporary file.
1183 """
1327 """
1184 d, fn = os.path.split(name)
1328 d, fn = os.path.split(name)
1185 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1329 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1186 os.close(fd)
1330 os.close(fd)
1187 # Temporary files are created with mode 0600, which is usually not
1331 # Temporary files are created with mode 0600, which is usually not
1188 # what we want. If the original file already exists, just copy
1332 # what we want. If the original file already exists, just copy
1189 # its mode. Otherwise, manually obey umask.
1333 # its mode. Otherwise, manually obey umask.
1190 copymode(name, temp, createmode)
1334 copymode(name, temp, createmode)
1191 if emptyok:
1335 if emptyok:
1192 return temp
1336 return temp
1193 try:
1337 try:
1194 try:
1338 try:
1195 ifp = posixfile(name, "rb")
1339 ifp = posixfile(name, "rb")
1196 except IOError as inst:
1340 except IOError as inst:
1197 if inst.errno == errno.ENOENT:
1341 if inst.errno == errno.ENOENT:
1198 return temp
1342 return temp
1199 if not getattr(inst, 'filename', None):
1343 if not getattr(inst, 'filename', None):
1200 inst.filename = name
1344 inst.filename = name
1201 raise
1345 raise
1202 ofp = posixfile(temp, "wb")
1346 ofp = posixfile(temp, "wb")
1203 for chunk in filechunkiter(ifp):
1347 for chunk in filechunkiter(ifp):
1204 ofp.write(chunk)
1348 ofp.write(chunk)
1205 ifp.close()
1349 ifp.close()
1206 ofp.close()
1350 ofp.close()
1207 except: # re-raises
1351 except: # re-raises
1208 try: os.unlink(temp)
1352 try: os.unlink(temp)
1209 except OSError: pass
1353 except OSError: pass
1210 raise
1354 raise
1211 return temp
1355 return temp
1212
1356
1213 class atomictempfile(object):
1357 class atomictempfile(object):
1214 '''writable file object that atomically updates a file
1358 '''writable file object that atomically updates a file
1215
1359
1216 All writes will go to a temporary copy of the original file. Call
1360 All writes will go to a temporary copy of the original file. Call
1217 close() when you are done writing, and atomictempfile will rename
1361 close() when you are done writing, and atomictempfile will rename
1218 the temporary copy to the original name, making the changes
1362 the temporary copy to the original name, making the changes
1219 visible. If the object is destroyed without being closed, all your
1363 visible. If the object is destroyed without being closed, all your
1220 writes are discarded.
1364 writes are discarded.
1221 '''
1365 '''
1222 def __init__(self, name, mode='w+b', createmode=None):
1366 def __init__(self, name, mode='w+b', createmode=None):
1223 self.__name = name # permanent name
1367 self.__name = name # permanent name
1224 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1368 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1225 createmode=createmode)
1369 createmode=createmode)
1226 self._fp = posixfile(self._tempname, mode)
1370 self._fp = posixfile(self._tempname, mode)
1227
1371
1228 # delegated methods
1372 # delegated methods
1229 self.write = self._fp.write
1373 self.write = self._fp.write
1230 self.seek = self._fp.seek
1374 self.seek = self._fp.seek
1231 self.tell = self._fp.tell
1375 self.tell = self._fp.tell
1232 self.fileno = self._fp.fileno
1376 self.fileno = self._fp.fileno
1233
1377
1234 def close(self):
1378 def close(self):
1235 if not self._fp.closed:
1379 if not self._fp.closed:
1236 self._fp.close()
1380 self._fp.close()
1237 rename(self._tempname, localpath(self.__name))
1381 rename(self._tempname, localpath(self.__name))
1238
1382
1239 def discard(self):
1383 def discard(self):
1240 if not self._fp.closed:
1384 if not self._fp.closed:
1241 try:
1385 try:
1242 os.unlink(self._tempname)
1386 os.unlink(self._tempname)
1243 except OSError:
1387 except OSError:
1244 pass
1388 pass
1245 self._fp.close()
1389 self._fp.close()
1246
1390
1247 def __del__(self):
1391 def __del__(self):
1248 if safehasattr(self, '_fp'): # constructor actually did something
1392 if safehasattr(self, '_fp'): # constructor actually did something
1249 self.discard()
1393 self.discard()
1250
1394
1251 def makedirs(name, mode=None, notindexed=False):
1395 def makedirs(name, mode=None, notindexed=False):
1252 """recursive directory creation with parent mode inheritance"""
1396 """recursive directory creation with parent mode inheritance"""
1253 try:
1397 try:
1254 makedir(name, notindexed)
1398 makedir(name, notindexed)
1255 except OSError as err:
1399 except OSError as err:
1256 if err.errno == errno.EEXIST:
1400 if err.errno == errno.EEXIST:
1257 return
1401 return
1258 if err.errno != errno.ENOENT or not name:
1402 if err.errno != errno.ENOENT or not name:
1259 raise
1403 raise
1260 parent = os.path.dirname(os.path.abspath(name))
1404 parent = os.path.dirname(os.path.abspath(name))
1261 if parent == name:
1405 if parent == name:
1262 raise
1406 raise
1263 makedirs(parent, mode, notindexed)
1407 makedirs(parent, mode, notindexed)
1264 makedir(name, notindexed)
1408 makedir(name, notindexed)
1265 if mode is not None:
1409 if mode is not None:
1266 os.chmod(name, mode)
1410 os.chmod(name, mode)
1267
1411
1268 def ensuredirs(name, mode=None, notindexed=False):
1412 def ensuredirs(name, mode=None, notindexed=False):
1269 """race-safe recursive directory creation
1413 """race-safe recursive directory creation
1270
1414
1271 Newly created directories are marked as "not to be indexed by
1415 Newly created directories are marked as "not to be indexed by
1272 the content indexing service", if ``notindexed`` is specified
1416 the content indexing service", if ``notindexed`` is specified
1273 for "write" mode access.
1417 for "write" mode access.
1274 """
1418 """
1275 if os.path.isdir(name):
1419 if os.path.isdir(name):
1276 return
1420 return
1277 parent = os.path.dirname(os.path.abspath(name))
1421 parent = os.path.dirname(os.path.abspath(name))
1278 if parent != name:
1422 if parent != name:
1279 ensuredirs(parent, mode, notindexed)
1423 ensuredirs(parent, mode, notindexed)
1280 try:
1424 try:
1281 makedir(name, notindexed)
1425 makedir(name, notindexed)
1282 except OSError as err:
1426 except OSError as err:
1283 if err.errno == errno.EEXIST and os.path.isdir(name):
1427 if err.errno == errno.EEXIST and os.path.isdir(name):
1284 # someone else seems to have won a directory creation race
1428 # someone else seems to have won a directory creation race
1285 return
1429 return
1286 raise
1430 raise
1287 if mode is not None:
1431 if mode is not None:
1288 os.chmod(name, mode)
1432 os.chmod(name, mode)
1289
1433
1290 def readfile(path):
1434 def readfile(path):
1291 fp = open(path, 'rb')
1435 fp = open(path, 'rb')
1292 try:
1436 try:
1293 return fp.read()
1437 return fp.read()
1294 finally:
1438 finally:
1295 fp.close()
1439 fp.close()
1296
1440
1297 def writefile(path, text):
1441 def writefile(path, text):
1298 fp = open(path, 'wb')
1442 fp = open(path, 'wb')
1299 try:
1443 try:
1300 fp.write(text)
1444 fp.write(text)
1301 finally:
1445 finally:
1302 fp.close()
1446 fp.close()
1303
1447
1304 def appendfile(path, text):
1448 def appendfile(path, text):
1305 fp = open(path, 'ab')
1449 fp = open(path, 'ab')
1306 try:
1450 try:
1307 fp.write(text)
1451 fp.write(text)
1308 finally:
1452 finally:
1309 fp.close()
1453 fp.close()
1310
1454
1311 class chunkbuffer(object):
1455 class chunkbuffer(object):
1312 """Allow arbitrary sized chunks of data to be efficiently read from an
1456 """Allow arbitrary sized chunks of data to be efficiently read from an
1313 iterator over chunks of arbitrary size."""
1457 iterator over chunks of arbitrary size."""
1314
1458
1315 def __init__(self, in_iter):
1459 def __init__(self, in_iter):
1316 """in_iter is the iterator that's iterating over the input chunks.
1460 """in_iter is the iterator that's iterating over the input chunks.
1317 targetsize is how big a buffer to try to maintain."""
1461 targetsize is how big a buffer to try to maintain."""
1318 def splitbig(chunks):
1462 def splitbig(chunks):
1319 for chunk in chunks:
1463 for chunk in chunks:
1320 if len(chunk) > 2**20:
1464 if len(chunk) > 2**20:
1321 pos = 0
1465 pos = 0
1322 while pos < len(chunk):
1466 while pos < len(chunk):
1323 end = pos + 2 ** 18
1467 end = pos + 2 ** 18
1324 yield chunk[pos:end]
1468 yield chunk[pos:end]
1325 pos = end
1469 pos = end
1326 else:
1470 else:
1327 yield chunk
1471 yield chunk
1328 self.iter = splitbig(in_iter)
1472 self.iter = splitbig(in_iter)
1329 self._queue = collections.deque()
1473 self._queue = collections.deque()
1330 self._chunkoffset = 0
1474 self._chunkoffset = 0
1331
1475
1332 def read(self, l=None):
1476 def read(self, l=None):
1333 """Read L bytes of data from the iterator of chunks of data.
1477 """Read L bytes of data from the iterator of chunks of data.
1334 Returns less than L bytes if the iterator runs dry.
1478 Returns less than L bytes if the iterator runs dry.
1335
1479
1336 If size parameter is omitted, read everything"""
1480 If size parameter is omitted, read everything"""
1337 if l is None:
1481 if l is None:
1338 return ''.join(self.iter)
1482 return ''.join(self.iter)
1339
1483
1340 left = l
1484 left = l
1341 buf = []
1485 buf = []
1342 queue = self._queue
1486 queue = self._queue
1343 while left > 0:
1487 while left > 0:
1344 # refill the queue
1488 # refill the queue
1345 if not queue:
1489 if not queue:
1346 target = 2**18
1490 target = 2**18
1347 for chunk in self.iter:
1491 for chunk in self.iter:
1348 queue.append(chunk)
1492 queue.append(chunk)
1349 target -= len(chunk)
1493 target -= len(chunk)
1350 if target <= 0:
1494 if target <= 0:
1351 break
1495 break
1352 if not queue:
1496 if not queue:
1353 break
1497 break
1354
1498
1355 # The easy way to do this would be to queue.popleft(), modify the
1499 # The easy way to do this would be to queue.popleft(), modify the
1356 # chunk (if necessary), then queue.appendleft(). However, for cases
1500 # chunk (if necessary), then queue.appendleft(). However, for cases
1357 # where we read partial chunk content, this incurs 2 dequeue
1501 # where we read partial chunk content, this incurs 2 dequeue
1358 # mutations and creates a new str for the remaining chunk in the
1502 # mutations and creates a new str for the remaining chunk in the
1359 # queue. Our code below avoids this overhead.
1503 # queue. Our code below avoids this overhead.
1360
1504
1361 chunk = queue[0]
1505 chunk = queue[0]
1362 chunkl = len(chunk)
1506 chunkl = len(chunk)
1363 offset = self._chunkoffset
1507 offset = self._chunkoffset
1364
1508
1365 # Use full chunk.
1509 # Use full chunk.
1366 if offset == 0 and left >= chunkl:
1510 if offset == 0 and left >= chunkl:
1367 left -= chunkl
1511 left -= chunkl
1368 queue.popleft()
1512 queue.popleft()
1369 buf.append(chunk)
1513 buf.append(chunk)
1370 # self._chunkoffset remains at 0.
1514 # self._chunkoffset remains at 0.
1371 continue
1515 continue
1372
1516
1373 chunkremaining = chunkl - offset
1517 chunkremaining = chunkl - offset
1374
1518
1375 # Use all of unconsumed part of chunk.
1519 # Use all of unconsumed part of chunk.
1376 if left >= chunkremaining:
1520 if left >= chunkremaining:
1377 left -= chunkremaining
1521 left -= chunkremaining
1378 queue.popleft()
1522 queue.popleft()
1379 # offset == 0 is enabled by block above, so this won't merely
1523 # offset == 0 is enabled by block above, so this won't merely
1380 # copy via ``chunk[0:]``.
1524 # copy via ``chunk[0:]``.
1381 buf.append(chunk[offset:])
1525 buf.append(chunk[offset:])
1382 self._chunkoffset = 0
1526 self._chunkoffset = 0
1383
1527
1384 # Partial chunk needed.
1528 # Partial chunk needed.
1385 else:
1529 else:
1386 buf.append(chunk[offset:offset + left])
1530 buf.append(chunk[offset:offset + left])
1387 self._chunkoffset += left
1531 self._chunkoffset += left
1388 left -= chunkremaining
1532 left -= chunkremaining
1389
1533
1390 return ''.join(buf)
1534 return ''.join(buf)
1391
1535
1392 def filechunkiter(f, size=65536, limit=None):
1536 def filechunkiter(f, size=65536, limit=None):
1393 """Create a generator that produces the data in the file size
1537 """Create a generator that produces the data in the file size
1394 (default 65536) bytes at a time, up to optional limit (default is
1538 (default 65536) bytes at a time, up to optional limit (default is
1395 to read all data). Chunks may be less than size bytes if the
1539 to read all data). Chunks may be less than size bytes if the
1396 chunk is the last chunk in the file, or the file is a socket or
1540 chunk is the last chunk in the file, or the file is a socket or
1397 some other type of file that sometimes reads less data than is
1541 some other type of file that sometimes reads less data than is
1398 requested."""
1542 requested."""
1399 assert size >= 0
1543 assert size >= 0
1400 assert limit is None or limit >= 0
1544 assert limit is None or limit >= 0
1401 while True:
1545 while True:
1402 if limit is None:
1546 if limit is None:
1403 nbytes = size
1547 nbytes = size
1404 else:
1548 else:
1405 nbytes = min(limit, size)
1549 nbytes = min(limit, size)
1406 s = nbytes and f.read(nbytes)
1550 s = nbytes and f.read(nbytes)
1407 if not s:
1551 if not s:
1408 break
1552 break
1409 if limit:
1553 if limit:
1410 limit -= len(s)
1554 limit -= len(s)
1411 yield s
1555 yield s
1412
1556
1413 def makedate(timestamp=None):
1557 def makedate(timestamp=None):
1414 '''Return a unix timestamp (or the current time) as a (unixtime,
1558 '''Return a unix timestamp (or the current time) as a (unixtime,
1415 offset) tuple based off the local timezone.'''
1559 offset) tuple based off the local timezone.'''
1416 if timestamp is None:
1560 if timestamp is None:
1417 timestamp = time.time()
1561 timestamp = time.time()
1418 if timestamp < 0:
1562 if timestamp < 0:
1419 hint = _("check your clock")
1563 hint = _("check your clock")
1420 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1564 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1421 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1565 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1422 datetime.datetime.fromtimestamp(timestamp))
1566 datetime.datetime.fromtimestamp(timestamp))
1423 tz = delta.days * 86400 + delta.seconds
1567 tz = delta.days * 86400 + delta.seconds
1424 return timestamp, tz
1568 return timestamp, tz
1425
1569
1426 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1570 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1427 """represent a (unixtime, offset) tuple as a localized time.
1571 """represent a (unixtime, offset) tuple as a localized time.
1428 unixtime is seconds since the epoch, and offset is the time zone's
1572 unixtime is seconds since the epoch, and offset is the time zone's
1429 number of seconds away from UTC. if timezone is false, do not
1573 number of seconds away from UTC. if timezone is false, do not
1430 append time zone to string."""
1574 append time zone to string."""
1431 t, tz = date or makedate()
1575 t, tz = date or makedate()
1432 if t < 0:
1576 if t < 0:
1433 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1577 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1434 tz = 0
1578 tz = 0
1435 if "%1" in format or "%2" in format or "%z" in format:
1579 if "%1" in format or "%2" in format or "%z" in format:
1436 sign = (tz > 0) and "-" or "+"
1580 sign = (tz > 0) and "-" or "+"
1437 minutes = abs(tz) // 60
1581 minutes = abs(tz) // 60
1438 q, r = divmod(minutes, 60)
1582 q, r = divmod(minutes, 60)
1439 format = format.replace("%z", "%1%2")
1583 format = format.replace("%z", "%1%2")
1440 format = format.replace("%1", "%c%02d" % (sign, q))
1584 format = format.replace("%1", "%c%02d" % (sign, q))
1441 format = format.replace("%2", "%02d" % r)
1585 format = format.replace("%2", "%02d" % r)
1442 try:
1586 try:
1443 t = time.gmtime(float(t) - tz)
1587 t = time.gmtime(float(t) - tz)
1444 except ValueError:
1588 except ValueError:
1445 # time was out of range
1589 # time was out of range
1446 t = time.gmtime(sys.maxint)
1590 t = time.gmtime(sys.maxint)
1447 s = time.strftime(format, t)
1591 s = time.strftime(format, t)
1448 return s
1592 return s
1449
1593
1450 def shortdate(date=None):
1594 def shortdate(date=None):
1451 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1595 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1452 return datestr(date, format='%Y-%m-%d')
1596 return datestr(date, format='%Y-%m-%d')
1453
1597
1454 def parsetimezone(tz):
1598 def parsetimezone(tz):
1455 """parse a timezone string and return an offset integer"""
1599 """parse a timezone string and return an offset integer"""
1456 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1600 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1457 sign = (tz[0] == "+") and 1 or -1
1601 sign = (tz[0] == "+") and 1 or -1
1458 hours = int(tz[1:3])
1602 hours = int(tz[1:3])
1459 minutes = int(tz[3:5])
1603 minutes = int(tz[3:5])
1460 return -sign * (hours * 60 + minutes) * 60
1604 return -sign * (hours * 60 + minutes) * 60
1461 if tz == "GMT" or tz == "UTC":
1605 if tz == "GMT" or tz == "UTC":
1462 return 0
1606 return 0
1463 return None
1607 return None
1464
1608
1465 def strdate(string, format, defaults=[]):
1609 def strdate(string, format, defaults=[]):
1466 """parse a localized time string and return a (unixtime, offset) tuple.
1610 """parse a localized time string and return a (unixtime, offset) tuple.
1467 if the string cannot be parsed, ValueError is raised."""
1611 if the string cannot be parsed, ValueError is raised."""
1468 # NOTE: unixtime = localunixtime + offset
1612 # NOTE: unixtime = localunixtime + offset
1469 offset, date = parsetimezone(string.split()[-1]), string
1613 offset, date = parsetimezone(string.split()[-1]), string
1470 if offset is not None:
1614 if offset is not None:
1471 date = " ".join(string.split()[:-1])
1615 date = " ".join(string.split()[:-1])
1472
1616
1473 # add missing elements from defaults
1617 # add missing elements from defaults
1474 usenow = False # default to using biased defaults
1618 usenow = False # default to using biased defaults
1475 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1619 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1476 found = [True for p in part if ("%"+p) in format]
1620 found = [True for p in part if ("%"+p) in format]
1477 if not found:
1621 if not found:
1478 date += "@" + defaults[part][usenow]
1622 date += "@" + defaults[part][usenow]
1479 format += "@%" + part[0]
1623 format += "@%" + part[0]
1480 else:
1624 else:
1481 # We've found a specific time element, less specific time
1625 # We've found a specific time element, less specific time
1482 # elements are relative to today
1626 # elements are relative to today
1483 usenow = True
1627 usenow = True
1484
1628
1485 timetuple = time.strptime(date, format)
1629 timetuple = time.strptime(date, format)
1486 localunixtime = int(calendar.timegm(timetuple))
1630 localunixtime = int(calendar.timegm(timetuple))
1487 if offset is None:
1631 if offset is None:
1488 # local timezone
1632 # local timezone
1489 unixtime = int(time.mktime(timetuple))
1633 unixtime = int(time.mktime(timetuple))
1490 offset = unixtime - localunixtime
1634 offset = unixtime - localunixtime
1491 else:
1635 else:
1492 unixtime = localunixtime + offset
1636 unixtime = localunixtime + offset
1493 return unixtime, offset
1637 return unixtime, offset
1494
1638
1495 def parsedate(date, formats=None, bias=None):
1639 def parsedate(date, formats=None, bias=None):
1496 """parse a localized date/time and return a (unixtime, offset) tuple.
1640 """parse a localized date/time and return a (unixtime, offset) tuple.
1497
1641
1498 The date may be a "unixtime offset" string or in one of the specified
1642 The date may be a "unixtime offset" string or in one of the specified
1499 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1643 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1500
1644
1501 >>> parsedate(' today ') == parsedate(\
1645 >>> parsedate(' today ') == parsedate(\
1502 datetime.date.today().strftime('%b %d'))
1646 datetime.date.today().strftime('%b %d'))
1503 True
1647 True
1504 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1648 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1505 datetime.timedelta(days=1)\
1649 datetime.timedelta(days=1)\
1506 ).strftime('%b %d'))
1650 ).strftime('%b %d'))
1507 True
1651 True
1508 >>> now, tz = makedate()
1652 >>> now, tz = makedate()
1509 >>> strnow, strtz = parsedate('now')
1653 >>> strnow, strtz = parsedate('now')
1510 >>> (strnow - now) < 1
1654 >>> (strnow - now) < 1
1511 True
1655 True
1512 >>> tz == strtz
1656 >>> tz == strtz
1513 True
1657 True
1514 """
1658 """
1515 if bias is None:
1659 if bias is None:
1516 bias = {}
1660 bias = {}
1517 if not date:
1661 if not date:
1518 return 0, 0
1662 return 0, 0
1519 if isinstance(date, tuple) and len(date) == 2:
1663 if isinstance(date, tuple) and len(date) == 2:
1520 return date
1664 return date
1521 if not formats:
1665 if not formats:
1522 formats = defaultdateformats
1666 formats = defaultdateformats
1523 date = date.strip()
1667 date = date.strip()
1524
1668
1525 if date == 'now' or date == _('now'):
1669 if date == 'now' or date == _('now'):
1526 return makedate()
1670 return makedate()
1527 if date == 'today' or date == _('today'):
1671 if date == 'today' or date == _('today'):
1528 date = datetime.date.today().strftime('%b %d')
1672 date = datetime.date.today().strftime('%b %d')
1529 elif date == 'yesterday' or date == _('yesterday'):
1673 elif date == 'yesterday' or date == _('yesterday'):
1530 date = (datetime.date.today() -
1674 date = (datetime.date.today() -
1531 datetime.timedelta(days=1)).strftime('%b %d')
1675 datetime.timedelta(days=1)).strftime('%b %d')
1532
1676
1533 try:
1677 try:
1534 when, offset = map(int, date.split(' '))
1678 when, offset = map(int, date.split(' '))
1535 except ValueError:
1679 except ValueError:
1536 # fill out defaults
1680 # fill out defaults
1537 now = makedate()
1681 now = makedate()
1538 defaults = {}
1682 defaults = {}
1539 for part in ("d", "mb", "yY", "HI", "M", "S"):
1683 for part in ("d", "mb", "yY", "HI", "M", "S"):
1540 # this piece is for rounding the specific end of unknowns
1684 # this piece is for rounding the specific end of unknowns
1541 b = bias.get(part)
1685 b = bias.get(part)
1542 if b is None:
1686 if b is None:
1543 if part[0] in "HMS":
1687 if part[0] in "HMS":
1544 b = "00"
1688 b = "00"
1545 else:
1689 else:
1546 b = "0"
1690 b = "0"
1547
1691
1548 # this piece is for matching the generic end to today's date
1692 # this piece is for matching the generic end to today's date
1549 n = datestr(now, "%" + part[0])
1693 n = datestr(now, "%" + part[0])
1550
1694
1551 defaults[part] = (b, n)
1695 defaults[part] = (b, n)
1552
1696
1553 for format in formats:
1697 for format in formats:
1554 try:
1698 try:
1555 when, offset = strdate(date, format, defaults)
1699 when, offset = strdate(date, format, defaults)
1556 except (ValueError, OverflowError):
1700 except (ValueError, OverflowError):
1557 pass
1701 pass
1558 else:
1702 else:
1559 break
1703 break
1560 else:
1704 else:
1561 raise Abort(_('invalid date: %r') % date)
1705 raise Abort(_('invalid date: %r') % date)
1562 # validate explicit (probably user-specified) date and
1706 # validate explicit (probably user-specified) date and
1563 # time zone offset. values must fit in signed 32 bits for
1707 # time zone offset. values must fit in signed 32 bits for
1564 # current 32-bit linux runtimes. timezones go from UTC-12
1708 # current 32-bit linux runtimes. timezones go from UTC-12
1565 # to UTC+14
1709 # to UTC+14
1566 if abs(when) > 0x7fffffff:
1710 if abs(when) > 0x7fffffff:
1567 raise Abort(_('date exceeds 32 bits: %d') % when)
1711 raise Abort(_('date exceeds 32 bits: %d') % when)
1568 if when < 0:
1712 if when < 0:
1569 raise Abort(_('negative date value: %d') % when)
1713 raise Abort(_('negative date value: %d') % when)
1570 if offset < -50400 or offset > 43200:
1714 if offset < -50400 or offset > 43200:
1571 raise Abort(_('impossible time zone offset: %d') % offset)
1715 raise Abort(_('impossible time zone offset: %d') % offset)
1572 return when, offset
1716 return when, offset
1573
1717
1574 def matchdate(date):
1718 def matchdate(date):
1575 """Return a function that matches a given date match specifier
1719 """Return a function that matches a given date match specifier
1576
1720
1577 Formats include:
1721 Formats include:
1578
1722
1579 '{date}' match a given date to the accuracy provided
1723 '{date}' match a given date to the accuracy provided
1580
1724
1581 '<{date}' on or before a given date
1725 '<{date}' on or before a given date
1582
1726
1583 '>{date}' on or after a given date
1727 '>{date}' on or after a given date
1584
1728
1585 >>> p1 = parsedate("10:29:59")
1729 >>> p1 = parsedate("10:29:59")
1586 >>> p2 = parsedate("10:30:00")
1730 >>> p2 = parsedate("10:30:00")
1587 >>> p3 = parsedate("10:30:59")
1731 >>> p3 = parsedate("10:30:59")
1588 >>> p4 = parsedate("10:31:00")
1732 >>> p4 = parsedate("10:31:00")
1589 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1733 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1590 >>> f = matchdate("10:30")
1734 >>> f = matchdate("10:30")
1591 >>> f(p1[0])
1735 >>> f(p1[0])
1592 False
1736 False
1593 >>> f(p2[0])
1737 >>> f(p2[0])
1594 True
1738 True
1595 >>> f(p3[0])
1739 >>> f(p3[0])
1596 True
1740 True
1597 >>> f(p4[0])
1741 >>> f(p4[0])
1598 False
1742 False
1599 >>> f(p5[0])
1743 >>> f(p5[0])
1600 False
1744 False
1601 """
1745 """
1602
1746
1603 def lower(date):
1747 def lower(date):
1604 d = {'mb': "1", 'd': "1"}
1748 d = {'mb': "1", 'd': "1"}
1605 return parsedate(date, extendeddateformats, d)[0]
1749 return parsedate(date, extendeddateformats, d)[0]
1606
1750
1607 def upper(date):
1751 def upper(date):
1608 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1752 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1609 for days in ("31", "30", "29"):
1753 for days in ("31", "30", "29"):
1610 try:
1754 try:
1611 d["d"] = days
1755 d["d"] = days
1612 return parsedate(date, extendeddateformats, d)[0]
1756 return parsedate(date, extendeddateformats, d)[0]
1613 except Abort:
1757 except Abort:
1614 pass
1758 pass
1615 d["d"] = "28"
1759 d["d"] = "28"
1616 return parsedate(date, extendeddateformats, d)[0]
1760 return parsedate(date, extendeddateformats, d)[0]
1617
1761
1618 date = date.strip()
1762 date = date.strip()
1619
1763
1620 if not date:
1764 if not date:
1621 raise Abort(_("dates cannot consist entirely of whitespace"))
1765 raise Abort(_("dates cannot consist entirely of whitespace"))
1622 elif date[0] == "<":
1766 elif date[0] == "<":
1623 if not date[1:]:
1767 if not date[1:]:
1624 raise Abort(_("invalid day spec, use '<DATE'"))
1768 raise Abort(_("invalid day spec, use '<DATE'"))
1625 when = upper(date[1:])
1769 when = upper(date[1:])
1626 return lambda x: x <= when
1770 return lambda x: x <= when
1627 elif date[0] == ">":
1771 elif date[0] == ">":
1628 if not date[1:]:
1772 if not date[1:]:
1629 raise Abort(_("invalid day spec, use '>DATE'"))
1773 raise Abort(_("invalid day spec, use '>DATE'"))
1630 when = lower(date[1:])
1774 when = lower(date[1:])
1631 return lambda x: x >= when
1775 return lambda x: x >= when
1632 elif date[0] == "-":
1776 elif date[0] == "-":
1633 try:
1777 try:
1634 days = int(date[1:])
1778 days = int(date[1:])
1635 except ValueError:
1779 except ValueError:
1636 raise Abort(_("invalid day spec: %s") % date[1:])
1780 raise Abort(_("invalid day spec: %s") % date[1:])
1637 if days < 0:
1781 if days < 0:
1638 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1782 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1639 % date[1:])
1783 % date[1:])
1640 when = makedate()[0] - days * 3600 * 24
1784 when = makedate()[0] - days * 3600 * 24
1641 return lambda x: x >= when
1785 return lambda x: x >= when
1642 elif " to " in date:
1786 elif " to " in date:
1643 a, b = date.split(" to ")
1787 a, b = date.split(" to ")
1644 start, stop = lower(a), upper(b)
1788 start, stop = lower(a), upper(b)
1645 return lambda x: x >= start and x <= stop
1789 return lambda x: x >= start and x <= stop
1646 else:
1790 else:
1647 start, stop = lower(date), upper(date)
1791 start, stop = lower(date), upper(date)
1648 return lambda x: x >= start and x <= stop
1792 return lambda x: x >= start and x <= stop
1649
1793
1650 def stringmatcher(pattern):
1794 def stringmatcher(pattern):
1651 """
1795 """
1652 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1796 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1653 returns the matcher name, pattern, and matcher function.
1797 returns the matcher name, pattern, and matcher function.
1654 missing or unknown prefixes are treated as literal matches.
1798 missing or unknown prefixes are treated as literal matches.
1655
1799
1656 helper for tests:
1800 helper for tests:
1657 >>> def test(pattern, *tests):
1801 >>> def test(pattern, *tests):
1658 ... kind, pattern, matcher = stringmatcher(pattern)
1802 ... kind, pattern, matcher = stringmatcher(pattern)
1659 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1803 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1660
1804
1661 exact matching (no prefix):
1805 exact matching (no prefix):
1662 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1806 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1663 ('literal', 'abcdefg', [False, False, True])
1807 ('literal', 'abcdefg', [False, False, True])
1664
1808
1665 regex matching ('re:' prefix)
1809 regex matching ('re:' prefix)
1666 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1810 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1667 ('re', 'a.+b', [False, False, True])
1811 ('re', 'a.+b', [False, False, True])
1668
1812
1669 force exact matches ('literal:' prefix)
1813 force exact matches ('literal:' prefix)
1670 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1814 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1671 ('literal', 're:foobar', [False, True])
1815 ('literal', 're:foobar', [False, True])
1672
1816
1673 unknown prefixes are ignored and treated as literals
1817 unknown prefixes are ignored and treated as literals
1674 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1818 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1675 ('literal', 'foo:bar', [False, False, True])
1819 ('literal', 'foo:bar', [False, False, True])
1676 """
1820 """
1677 if pattern.startswith('re:'):
1821 if pattern.startswith('re:'):
1678 pattern = pattern[3:]
1822 pattern = pattern[3:]
1679 try:
1823 try:
1680 regex = remod.compile(pattern)
1824 regex = remod.compile(pattern)
1681 except remod.error as e:
1825 except remod.error as e:
1682 raise error.ParseError(_('invalid regular expression: %s')
1826 raise error.ParseError(_('invalid regular expression: %s')
1683 % e)
1827 % e)
1684 return 're', pattern, regex.search
1828 return 're', pattern, regex.search
1685 elif pattern.startswith('literal:'):
1829 elif pattern.startswith('literal:'):
1686 pattern = pattern[8:]
1830 pattern = pattern[8:]
1687 return 'literal', pattern, pattern.__eq__
1831 return 'literal', pattern, pattern.__eq__
1688
1832
1689 def shortuser(user):
1833 def shortuser(user):
1690 """Return a short representation of a user name or email address."""
1834 """Return a short representation of a user name or email address."""
1691 f = user.find('@')
1835 f = user.find('@')
1692 if f >= 0:
1836 if f >= 0:
1693 user = user[:f]
1837 user = user[:f]
1694 f = user.find('<')
1838 f = user.find('<')
1695 if f >= 0:
1839 if f >= 0:
1696 user = user[f + 1:]
1840 user = user[f + 1:]
1697 f = user.find(' ')
1841 f = user.find(' ')
1698 if f >= 0:
1842 if f >= 0:
1699 user = user[:f]
1843 user = user[:f]
1700 f = user.find('.')
1844 f = user.find('.')
1701 if f >= 0:
1845 if f >= 0:
1702 user = user[:f]
1846 user = user[:f]
1703 return user
1847 return user
1704
1848
1705 def emailuser(user):
1849 def emailuser(user):
1706 """Return the user portion of an email address."""
1850 """Return the user portion of an email address."""
1707 f = user.find('@')
1851 f = user.find('@')
1708 if f >= 0:
1852 if f >= 0:
1709 user = user[:f]
1853 user = user[:f]
1710 f = user.find('<')
1854 f = user.find('<')
1711 if f >= 0:
1855 if f >= 0:
1712 user = user[f + 1:]
1856 user = user[f + 1:]
1713 return user
1857 return user
1714
1858
1715 def email(author):
1859 def email(author):
1716 '''get email of author.'''
1860 '''get email of author.'''
1717 r = author.find('>')
1861 r = author.find('>')
1718 if r == -1:
1862 if r == -1:
1719 r = None
1863 r = None
1720 return author[author.find('<') + 1:r]
1864 return author[author.find('<') + 1:r]
1721
1865
1722 def ellipsis(text, maxlength=400):
1866 def ellipsis(text, maxlength=400):
1723 """Trim string to at most maxlength (default: 400) columns in display."""
1867 """Trim string to at most maxlength (default: 400) columns in display."""
1724 return encoding.trim(text, maxlength, ellipsis='...')
1868 return encoding.trim(text, maxlength, ellipsis='...')
1725
1869
1726 def unitcountfn(*unittable):
1870 def unitcountfn(*unittable):
1727 '''return a function that renders a readable count of some quantity'''
1871 '''return a function that renders a readable count of some quantity'''
1728
1872
1729 def go(count):
1873 def go(count):
1730 for multiplier, divisor, format in unittable:
1874 for multiplier, divisor, format in unittable:
1731 if count >= divisor * multiplier:
1875 if count >= divisor * multiplier:
1732 return format % (count / float(divisor))
1876 return format % (count / float(divisor))
1733 return unittable[-1][2] % count
1877 return unittable[-1][2] % count
1734
1878
1735 return go
1879 return go
1736
1880
1737 bytecount = unitcountfn(
1881 bytecount = unitcountfn(
1738 (100, 1 << 30, _('%.0f GB')),
1882 (100, 1 << 30, _('%.0f GB')),
1739 (10, 1 << 30, _('%.1f GB')),
1883 (10, 1 << 30, _('%.1f GB')),
1740 (1, 1 << 30, _('%.2f GB')),
1884 (1, 1 << 30, _('%.2f GB')),
1741 (100, 1 << 20, _('%.0f MB')),
1885 (100, 1 << 20, _('%.0f MB')),
1742 (10, 1 << 20, _('%.1f MB')),
1886 (10, 1 << 20, _('%.1f MB')),
1743 (1, 1 << 20, _('%.2f MB')),
1887 (1, 1 << 20, _('%.2f MB')),
1744 (100, 1 << 10, _('%.0f KB')),
1888 (100, 1 << 10, _('%.0f KB')),
1745 (10, 1 << 10, _('%.1f KB')),
1889 (10, 1 << 10, _('%.1f KB')),
1746 (1, 1 << 10, _('%.2f KB')),
1890 (1, 1 << 10, _('%.2f KB')),
1747 (1, 1, _('%.0f bytes')),
1891 (1, 1, _('%.0f bytes')),
1748 )
1892 )
1749
1893
1750 def uirepr(s):
1894 def uirepr(s):
1751 # Avoid double backslash in Windows path repr()
1895 # Avoid double backslash in Windows path repr()
1752 return repr(s).replace('\\\\', '\\')
1896 return repr(s).replace('\\\\', '\\')
1753
1897
1754 # delay import of textwrap
1898 # delay import of textwrap
1755 def MBTextWrapper(**kwargs):
1899 def MBTextWrapper(**kwargs):
1756 class tw(textwrap.TextWrapper):
1900 class tw(textwrap.TextWrapper):
1757 """
1901 """
1758 Extend TextWrapper for width-awareness.
1902 Extend TextWrapper for width-awareness.
1759
1903
1760 Neither number of 'bytes' in any encoding nor 'characters' is
1904 Neither number of 'bytes' in any encoding nor 'characters' is
1761 appropriate to calculate terminal columns for specified string.
1905 appropriate to calculate terminal columns for specified string.
1762
1906
1763 Original TextWrapper implementation uses built-in 'len()' directly,
1907 Original TextWrapper implementation uses built-in 'len()' directly,
1764 so overriding is needed to use width information of each characters.
1908 so overriding is needed to use width information of each characters.
1765
1909
1766 In addition, characters classified into 'ambiguous' width are
1910 In addition, characters classified into 'ambiguous' width are
1767 treated as wide in East Asian area, but as narrow in other.
1911 treated as wide in East Asian area, but as narrow in other.
1768
1912
1769 This requires use decision to determine width of such characters.
1913 This requires use decision to determine width of such characters.
1770 """
1914 """
1771 def _cutdown(self, ucstr, space_left):
1915 def _cutdown(self, ucstr, space_left):
1772 l = 0
1916 l = 0
1773 colwidth = encoding.ucolwidth
1917 colwidth = encoding.ucolwidth
1774 for i in xrange(len(ucstr)):
1918 for i in xrange(len(ucstr)):
1775 l += colwidth(ucstr[i])
1919 l += colwidth(ucstr[i])
1776 if space_left < l:
1920 if space_left < l:
1777 return (ucstr[:i], ucstr[i:])
1921 return (ucstr[:i], ucstr[i:])
1778 return ucstr, ''
1922 return ucstr, ''
1779
1923
1780 # overriding of base class
1924 # overriding of base class
1781 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1925 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1782 space_left = max(width - cur_len, 1)
1926 space_left = max(width - cur_len, 1)
1783
1927
1784 if self.break_long_words:
1928 if self.break_long_words:
1785 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1929 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1786 cur_line.append(cut)
1930 cur_line.append(cut)
1787 reversed_chunks[-1] = res
1931 reversed_chunks[-1] = res
1788 elif not cur_line:
1932 elif not cur_line:
1789 cur_line.append(reversed_chunks.pop())
1933 cur_line.append(reversed_chunks.pop())
1790
1934
1791 # this overriding code is imported from TextWrapper of Python 2.6
1935 # this overriding code is imported from TextWrapper of Python 2.6
1792 # to calculate columns of string by 'encoding.ucolwidth()'
1936 # to calculate columns of string by 'encoding.ucolwidth()'
1793 def _wrap_chunks(self, chunks):
1937 def _wrap_chunks(self, chunks):
1794 colwidth = encoding.ucolwidth
1938 colwidth = encoding.ucolwidth
1795
1939
1796 lines = []
1940 lines = []
1797 if self.width <= 0:
1941 if self.width <= 0:
1798 raise ValueError("invalid width %r (must be > 0)" % self.width)
1942 raise ValueError("invalid width %r (must be > 0)" % self.width)
1799
1943
1800 # Arrange in reverse order so items can be efficiently popped
1944 # Arrange in reverse order so items can be efficiently popped
1801 # from a stack of chucks.
1945 # from a stack of chucks.
1802 chunks.reverse()
1946 chunks.reverse()
1803
1947
1804 while chunks:
1948 while chunks:
1805
1949
1806 # Start the list of chunks that will make up the current line.
1950 # Start the list of chunks that will make up the current line.
1807 # cur_len is just the length of all the chunks in cur_line.
1951 # cur_len is just the length of all the chunks in cur_line.
1808 cur_line = []
1952 cur_line = []
1809 cur_len = 0
1953 cur_len = 0
1810
1954
1811 # Figure out which static string will prefix this line.
1955 # Figure out which static string will prefix this line.
1812 if lines:
1956 if lines:
1813 indent = self.subsequent_indent
1957 indent = self.subsequent_indent
1814 else:
1958 else:
1815 indent = self.initial_indent
1959 indent = self.initial_indent
1816
1960
1817 # Maximum width for this line.
1961 # Maximum width for this line.
1818 width = self.width - len(indent)
1962 width = self.width - len(indent)
1819
1963
1820 # First chunk on line is whitespace -- drop it, unless this
1964 # First chunk on line is whitespace -- drop it, unless this
1821 # is the very beginning of the text (i.e. no lines started yet).
1965 # is the very beginning of the text (i.e. no lines started yet).
1822 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1966 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1823 del chunks[-1]
1967 del chunks[-1]
1824
1968
1825 while chunks:
1969 while chunks:
1826 l = colwidth(chunks[-1])
1970 l = colwidth(chunks[-1])
1827
1971
1828 # Can at least squeeze this chunk onto the current line.
1972 # Can at least squeeze this chunk onto the current line.
1829 if cur_len + l <= width:
1973 if cur_len + l <= width:
1830 cur_line.append(chunks.pop())
1974 cur_line.append(chunks.pop())
1831 cur_len += l
1975 cur_len += l
1832
1976
1833 # Nope, this line is full.
1977 # Nope, this line is full.
1834 else:
1978 else:
1835 break
1979 break
1836
1980
1837 # The current line is full, and the next chunk is too big to
1981 # The current line is full, and the next chunk is too big to
1838 # fit on *any* line (not just this one).
1982 # fit on *any* line (not just this one).
1839 if chunks and colwidth(chunks[-1]) > width:
1983 if chunks and colwidth(chunks[-1]) > width:
1840 self._handle_long_word(chunks, cur_line, cur_len, width)
1984 self._handle_long_word(chunks, cur_line, cur_len, width)
1841
1985
1842 # If the last chunk on this line is all whitespace, drop it.
1986 # If the last chunk on this line is all whitespace, drop it.
1843 if (self.drop_whitespace and
1987 if (self.drop_whitespace and
1844 cur_line and cur_line[-1].strip() == ''):
1988 cur_line and cur_line[-1].strip() == ''):
1845 del cur_line[-1]
1989 del cur_line[-1]
1846
1990
1847 # Convert current line back to a string and store it in list
1991 # Convert current line back to a string and store it in list
1848 # of all lines (return value).
1992 # of all lines (return value).
1849 if cur_line:
1993 if cur_line:
1850 lines.append(indent + ''.join(cur_line))
1994 lines.append(indent + ''.join(cur_line))
1851
1995
1852 return lines
1996 return lines
1853
1997
1854 global MBTextWrapper
1998 global MBTextWrapper
1855 MBTextWrapper = tw
1999 MBTextWrapper = tw
1856 return tw(**kwargs)
2000 return tw(**kwargs)
1857
2001
1858 def wrap(line, width, initindent='', hangindent=''):
2002 def wrap(line, width, initindent='', hangindent=''):
1859 maxindent = max(len(hangindent), len(initindent))
2003 maxindent = max(len(hangindent), len(initindent))
1860 if width <= maxindent:
2004 if width <= maxindent:
1861 # adjust for weird terminal size
2005 # adjust for weird terminal size
1862 width = max(78, maxindent + 1)
2006 width = max(78, maxindent + 1)
1863 line = line.decode(encoding.encoding, encoding.encodingmode)
2007 line = line.decode(encoding.encoding, encoding.encodingmode)
1864 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2008 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1865 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2009 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1866 wrapper = MBTextWrapper(width=width,
2010 wrapper = MBTextWrapper(width=width,
1867 initial_indent=initindent,
2011 initial_indent=initindent,
1868 subsequent_indent=hangindent)
2012 subsequent_indent=hangindent)
1869 return wrapper.fill(line).encode(encoding.encoding)
2013 return wrapper.fill(line).encode(encoding.encoding)
1870
2014
1871 def iterlines(iterator):
2015 def iterlines(iterator):
1872 for chunk in iterator:
2016 for chunk in iterator:
1873 for line in chunk.splitlines():
2017 for line in chunk.splitlines():
1874 yield line
2018 yield line
1875
2019
1876 def expandpath(path):
2020 def expandpath(path):
1877 return os.path.expanduser(os.path.expandvars(path))
2021 return os.path.expanduser(os.path.expandvars(path))
1878
2022
1879 def hgcmd():
2023 def hgcmd():
1880 """Return the command used to execute current hg
2024 """Return the command used to execute current hg
1881
2025
1882 This is different from hgexecutable() because on Windows we want
2026 This is different from hgexecutable() because on Windows we want
1883 to avoid things opening new shell windows like batch files, so we
2027 to avoid things opening new shell windows like batch files, so we
1884 get either the python call or current executable.
2028 get either the python call or current executable.
1885 """
2029 """
1886 if mainfrozen():
2030 if mainfrozen():
1887 return [sys.executable]
2031 return [sys.executable]
1888 return gethgcmd()
2032 return gethgcmd()
1889
2033
1890 def rundetached(args, condfn):
2034 def rundetached(args, condfn):
1891 """Execute the argument list in a detached process.
2035 """Execute the argument list in a detached process.
1892
2036
1893 condfn is a callable which is called repeatedly and should return
2037 condfn is a callable which is called repeatedly and should return
1894 True once the child process is known to have started successfully.
2038 True once the child process is known to have started successfully.
1895 At this point, the child process PID is returned. If the child
2039 At this point, the child process PID is returned. If the child
1896 process fails to start or finishes before condfn() evaluates to
2040 process fails to start or finishes before condfn() evaluates to
1897 True, return -1.
2041 True, return -1.
1898 """
2042 """
1899 # Windows case is easier because the child process is either
2043 # Windows case is easier because the child process is either
1900 # successfully starting and validating the condition or exiting
2044 # successfully starting and validating the condition or exiting
1901 # on failure. We just poll on its PID. On Unix, if the child
2045 # on failure. We just poll on its PID. On Unix, if the child
1902 # process fails to start, it will be left in a zombie state until
2046 # process fails to start, it will be left in a zombie state until
1903 # the parent wait on it, which we cannot do since we expect a long
2047 # the parent wait on it, which we cannot do since we expect a long
1904 # running process on success. Instead we listen for SIGCHLD telling
2048 # running process on success. Instead we listen for SIGCHLD telling
1905 # us our child process terminated.
2049 # us our child process terminated.
1906 terminated = set()
2050 terminated = set()
1907 def handler(signum, frame):
2051 def handler(signum, frame):
1908 terminated.add(os.wait())
2052 terminated.add(os.wait())
1909 prevhandler = None
2053 prevhandler = None
1910 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2054 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1911 if SIGCHLD is not None:
2055 if SIGCHLD is not None:
1912 prevhandler = signal.signal(SIGCHLD, handler)
2056 prevhandler = signal.signal(SIGCHLD, handler)
1913 try:
2057 try:
1914 pid = spawndetached(args)
2058 pid = spawndetached(args)
1915 while not condfn():
2059 while not condfn():
1916 if ((pid in terminated or not testpid(pid))
2060 if ((pid in terminated or not testpid(pid))
1917 and not condfn()):
2061 and not condfn()):
1918 return -1
2062 return -1
1919 time.sleep(0.1)
2063 time.sleep(0.1)
1920 return pid
2064 return pid
1921 finally:
2065 finally:
1922 if prevhandler is not None:
2066 if prevhandler is not None:
1923 signal.signal(signal.SIGCHLD, prevhandler)
2067 signal.signal(signal.SIGCHLD, prevhandler)
1924
2068
1925 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2069 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1926 """Return the result of interpolating items in the mapping into string s.
2070 """Return the result of interpolating items in the mapping into string s.
1927
2071
1928 prefix is a single character string, or a two character string with
2072 prefix is a single character string, or a two character string with
1929 a backslash as the first character if the prefix needs to be escaped in
2073 a backslash as the first character if the prefix needs to be escaped in
1930 a regular expression.
2074 a regular expression.
1931
2075
1932 fn is an optional function that will be applied to the replacement text
2076 fn is an optional function that will be applied to the replacement text
1933 just before replacement.
2077 just before replacement.
1934
2078
1935 escape_prefix is an optional flag that allows using doubled prefix for
2079 escape_prefix is an optional flag that allows using doubled prefix for
1936 its escaping.
2080 its escaping.
1937 """
2081 """
1938 fn = fn or (lambda s: s)
2082 fn = fn or (lambda s: s)
1939 patterns = '|'.join(mapping.keys())
2083 patterns = '|'.join(mapping.keys())
1940 if escape_prefix:
2084 if escape_prefix:
1941 patterns += '|' + prefix
2085 patterns += '|' + prefix
1942 if len(prefix) > 1:
2086 if len(prefix) > 1:
1943 prefix_char = prefix[1:]
2087 prefix_char = prefix[1:]
1944 else:
2088 else:
1945 prefix_char = prefix
2089 prefix_char = prefix
1946 mapping[prefix_char] = prefix_char
2090 mapping[prefix_char] = prefix_char
1947 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2091 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1948 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2092 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1949
2093
1950 def getport(port):
2094 def getport(port):
1951 """Return the port for a given network service.
2095 """Return the port for a given network service.
1952
2096
1953 If port is an integer, it's returned as is. If it's a string, it's
2097 If port is an integer, it's returned as is. If it's a string, it's
1954 looked up using socket.getservbyname(). If there's no matching
2098 looked up using socket.getservbyname(). If there's no matching
1955 service, error.Abort is raised.
2099 service, error.Abort is raised.
1956 """
2100 """
1957 try:
2101 try:
1958 return int(port)
2102 return int(port)
1959 except ValueError:
2103 except ValueError:
1960 pass
2104 pass
1961
2105
1962 try:
2106 try:
1963 return socket.getservbyname(port)
2107 return socket.getservbyname(port)
1964 except socket.error:
2108 except socket.error:
1965 raise Abort(_("no port number associated with service '%s'") % port)
2109 raise Abort(_("no port number associated with service '%s'") % port)
1966
2110
1967 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2111 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1968 '0': False, 'no': False, 'false': False, 'off': False,
2112 '0': False, 'no': False, 'false': False, 'off': False,
1969 'never': False}
2113 'never': False}
1970
2114
1971 def parsebool(s):
2115 def parsebool(s):
1972 """Parse s into a boolean.
2116 """Parse s into a boolean.
1973
2117
1974 If s is not a valid boolean, returns None.
2118 If s is not a valid boolean, returns None.
1975 """
2119 """
1976 return _booleans.get(s.lower(), None)
2120 return _booleans.get(s.lower(), None)
1977
2121
1978 _hexdig = '0123456789ABCDEFabcdef'
2122 _hexdig = '0123456789ABCDEFabcdef'
1979 _hextochr = dict((a + b, chr(int(a + b, 16)))
2123 _hextochr = dict((a + b, chr(int(a + b, 16)))
1980 for a in _hexdig for b in _hexdig)
2124 for a in _hexdig for b in _hexdig)
1981
2125
1982 def _urlunquote(s):
2126 def _urlunquote(s):
1983 """Decode HTTP/HTML % encoding.
2127 """Decode HTTP/HTML % encoding.
1984
2128
1985 >>> _urlunquote('abc%20def')
2129 >>> _urlunquote('abc%20def')
1986 'abc def'
2130 'abc def'
1987 """
2131 """
1988 res = s.split('%')
2132 res = s.split('%')
1989 # fastpath
2133 # fastpath
1990 if len(res) == 1:
2134 if len(res) == 1:
1991 return s
2135 return s
1992 s = res[0]
2136 s = res[0]
1993 for item in res[1:]:
2137 for item in res[1:]:
1994 try:
2138 try:
1995 s += _hextochr[item[:2]] + item[2:]
2139 s += _hextochr[item[:2]] + item[2:]
1996 except KeyError:
2140 except KeyError:
1997 s += '%' + item
2141 s += '%' + item
1998 except UnicodeDecodeError:
2142 except UnicodeDecodeError:
1999 s += unichr(int(item[:2], 16)) + item[2:]
2143 s += unichr(int(item[:2], 16)) + item[2:]
2000 return s
2144 return s
2001
2145
2002 class url(object):
2146 class url(object):
2003 r"""Reliable URL parser.
2147 r"""Reliable URL parser.
2004
2148
2005 This parses URLs and provides attributes for the following
2149 This parses URLs and provides attributes for the following
2006 components:
2150 components:
2007
2151
2008 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2152 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2009
2153
2010 Missing components are set to None. The only exception is
2154 Missing components are set to None. The only exception is
2011 fragment, which is set to '' if present but empty.
2155 fragment, which is set to '' if present but empty.
2012
2156
2013 If parsefragment is False, fragment is included in query. If
2157 If parsefragment is False, fragment is included in query. If
2014 parsequery is False, query is included in path. If both are
2158 parsequery is False, query is included in path. If both are
2015 False, both fragment and query are included in path.
2159 False, both fragment and query are included in path.
2016
2160
2017 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2161 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2018
2162
2019 Note that for backward compatibility reasons, bundle URLs do not
2163 Note that for backward compatibility reasons, bundle URLs do not
2020 take host names. That means 'bundle://../' has a path of '../'.
2164 take host names. That means 'bundle://../' has a path of '../'.
2021
2165
2022 Examples:
2166 Examples:
2023
2167
2024 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2168 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2025 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2169 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2026 >>> url('ssh://[::1]:2200//home/joe/repo')
2170 >>> url('ssh://[::1]:2200//home/joe/repo')
2027 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2171 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2028 >>> url('file:///home/joe/repo')
2172 >>> url('file:///home/joe/repo')
2029 <url scheme: 'file', path: '/home/joe/repo'>
2173 <url scheme: 'file', path: '/home/joe/repo'>
2030 >>> url('file:///c:/temp/foo/')
2174 >>> url('file:///c:/temp/foo/')
2031 <url scheme: 'file', path: 'c:/temp/foo/'>
2175 <url scheme: 'file', path: 'c:/temp/foo/'>
2032 >>> url('bundle:foo')
2176 >>> url('bundle:foo')
2033 <url scheme: 'bundle', path: 'foo'>
2177 <url scheme: 'bundle', path: 'foo'>
2034 >>> url('bundle://../foo')
2178 >>> url('bundle://../foo')
2035 <url scheme: 'bundle', path: '../foo'>
2179 <url scheme: 'bundle', path: '../foo'>
2036 >>> url(r'c:\foo\bar')
2180 >>> url(r'c:\foo\bar')
2037 <url path: 'c:\\foo\\bar'>
2181 <url path: 'c:\\foo\\bar'>
2038 >>> url(r'\\blah\blah\blah')
2182 >>> url(r'\\blah\blah\blah')
2039 <url path: '\\\\blah\\blah\\blah'>
2183 <url path: '\\\\blah\\blah\\blah'>
2040 >>> url(r'\\blah\blah\blah#baz')
2184 >>> url(r'\\blah\blah\blah#baz')
2041 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2185 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2042 >>> url(r'file:///C:\users\me')
2186 >>> url(r'file:///C:\users\me')
2043 <url scheme: 'file', path: 'C:\\users\\me'>
2187 <url scheme: 'file', path: 'C:\\users\\me'>
2044
2188
2045 Authentication credentials:
2189 Authentication credentials:
2046
2190
2047 >>> url('ssh://joe:xyz@x/repo')
2191 >>> url('ssh://joe:xyz@x/repo')
2048 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2192 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2049 >>> url('ssh://joe@x/repo')
2193 >>> url('ssh://joe@x/repo')
2050 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2194 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2051
2195
2052 Query strings and fragments:
2196 Query strings and fragments:
2053
2197
2054 >>> url('http://host/a?b#c')
2198 >>> url('http://host/a?b#c')
2055 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2199 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2056 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2200 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2057 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2201 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2058 """
2202 """
2059
2203
2060 _safechars = "!~*'()+"
2204 _safechars = "!~*'()+"
2061 _safepchars = "/!~*'()+:\\"
2205 _safepchars = "/!~*'()+:\\"
2062 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2206 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2063
2207
2064 def __init__(self, path, parsequery=True, parsefragment=True):
2208 def __init__(self, path, parsequery=True, parsefragment=True):
2065 # We slowly chomp away at path until we have only the path left
2209 # We slowly chomp away at path until we have only the path left
2066 self.scheme = self.user = self.passwd = self.host = None
2210 self.scheme = self.user = self.passwd = self.host = None
2067 self.port = self.path = self.query = self.fragment = None
2211 self.port = self.path = self.query = self.fragment = None
2068 self._localpath = True
2212 self._localpath = True
2069 self._hostport = ''
2213 self._hostport = ''
2070 self._origpath = path
2214 self._origpath = path
2071
2215
2072 if parsefragment and '#' in path:
2216 if parsefragment and '#' in path:
2073 path, self.fragment = path.split('#', 1)
2217 path, self.fragment = path.split('#', 1)
2074 if not path:
2218 if not path:
2075 path = None
2219 path = None
2076
2220
2077 # special case for Windows drive letters and UNC paths
2221 # special case for Windows drive letters and UNC paths
2078 if hasdriveletter(path) or path.startswith(r'\\'):
2222 if hasdriveletter(path) or path.startswith(r'\\'):
2079 self.path = path
2223 self.path = path
2080 return
2224 return
2081
2225
2082 # For compatibility reasons, we can't handle bundle paths as
2226 # For compatibility reasons, we can't handle bundle paths as
2083 # normal URLS
2227 # normal URLS
2084 if path.startswith('bundle:'):
2228 if path.startswith('bundle:'):
2085 self.scheme = 'bundle'
2229 self.scheme = 'bundle'
2086 path = path[7:]
2230 path = path[7:]
2087 if path.startswith('//'):
2231 if path.startswith('//'):
2088 path = path[2:]
2232 path = path[2:]
2089 self.path = path
2233 self.path = path
2090 return
2234 return
2091
2235
2092 if self._matchscheme(path):
2236 if self._matchscheme(path):
2093 parts = path.split(':', 1)
2237 parts = path.split(':', 1)
2094 if parts[0]:
2238 if parts[0]:
2095 self.scheme, path = parts
2239 self.scheme, path = parts
2096 self._localpath = False
2240 self._localpath = False
2097
2241
2098 if not path:
2242 if not path:
2099 path = None
2243 path = None
2100 if self._localpath:
2244 if self._localpath:
2101 self.path = ''
2245 self.path = ''
2102 return
2246 return
2103 else:
2247 else:
2104 if self._localpath:
2248 if self._localpath:
2105 self.path = path
2249 self.path = path
2106 return
2250 return
2107
2251
2108 if parsequery and '?' in path:
2252 if parsequery and '?' in path:
2109 path, self.query = path.split('?', 1)
2253 path, self.query = path.split('?', 1)
2110 if not path:
2254 if not path:
2111 path = None
2255 path = None
2112 if not self.query:
2256 if not self.query:
2113 self.query = None
2257 self.query = None
2114
2258
2115 # // is required to specify a host/authority
2259 # // is required to specify a host/authority
2116 if path and path.startswith('//'):
2260 if path and path.startswith('//'):
2117 parts = path[2:].split('/', 1)
2261 parts = path[2:].split('/', 1)
2118 if len(parts) > 1:
2262 if len(parts) > 1:
2119 self.host, path = parts
2263 self.host, path = parts
2120 else:
2264 else:
2121 self.host = parts[0]
2265 self.host = parts[0]
2122 path = None
2266 path = None
2123 if not self.host:
2267 if not self.host:
2124 self.host = None
2268 self.host = None
2125 # path of file:///d is /d
2269 # path of file:///d is /d
2126 # path of file:///d:/ is d:/, not /d:/
2270 # path of file:///d:/ is d:/, not /d:/
2127 if path and not hasdriveletter(path):
2271 if path and not hasdriveletter(path):
2128 path = '/' + path
2272 path = '/' + path
2129
2273
2130 if self.host and '@' in self.host:
2274 if self.host and '@' in self.host:
2131 self.user, self.host = self.host.rsplit('@', 1)
2275 self.user, self.host = self.host.rsplit('@', 1)
2132 if ':' in self.user:
2276 if ':' in self.user:
2133 self.user, self.passwd = self.user.split(':', 1)
2277 self.user, self.passwd = self.user.split(':', 1)
2134 if not self.host:
2278 if not self.host:
2135 self.host = None
2279 self.host = None
2136
2280
2137 # Don't split on colons in IPv6 addresses without ports
2281 # Don't split on colons in IPv6 addresses without ports
2138 if (self.host and ':' in self.host and
2282 if (self.host and ':' in self.host and
2139 not (self.host.startswith('[') and self.host.endswith(']'))):
2283 not (self.host.startswith('[') and self.host.endswith(']'))):
2140 self._hostport = self.host
2284 self._hostport = self.host
2141 self.host, self.port = self.host.rsplit(':', 1)
2285 self.host, self.port = self.host.rsplit(':', 1)
2142 if not self.host:
2286 if not self.host:
2143 self.host = None
2287 self.host = None
2144
2288
2145 if (self.host and self.scheme == 'file' and
2289 if (self.host and self.scheme == 'file' and
2146 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2290 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2147 raise Abort(_('file:// URLs can only refer to localhost'))
2291 raise Abort(_('file:// URLs can only refer to localhost'))
2148
2292
2149 self.path = path
2293 self.path = path
2150
2294
2151 # leave the query string escaped
2295 # leave the query string escaped
2152 for a in ('user', 'passwd', 'host', 'port',
2296 for a in ('user', 'passwd', 'host', 'port',
2153 'path', 'fragment'):
2297 'path', 'fragment'):
2154 v = getattr(self, a)
2298 v = getattr(self, a)
2155 if v is not None:
2299 if v is not None:
2156 setattr(self, a, _urlunquote(v))
2300 setattr(self, a, _urlunquote(v))
2157
2301
2158 def __repr__(self):
2302 def __repr__(self):
2159 attrs = []
2303 attrs = []
2160 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2304 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2161 'query', 'fragment'):
2305 'query', 'fragment'):
2162 v = getattr(self, a)
2306 v = getattr(self, a)
2163 if v is not None:
2307 if v is not None:
2164 attrs.append('%s: %r' % (a, v))
2308 attrs.append('%s: %r' % (a, v))
2165 return '<url %s>' % ', '.join(attrs)
2309 return '<url %s>' % ', '.join(attrs)
2166
2310
2167 def __str__(self):
2311 def __str__(self):
2168 r"""Join the URL's components back into a URL string.
2312 r"""Join the URL's components back into a URL string.
2169
2313
2170 Examples:
2314 Examples:
2171
2315
2172 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2316 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2173 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2317 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2174 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2318 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2175 'http://user:pw@host:80/?foo=bar&baz=42'
2319 'http://user:pw@host:80/?foo=bar&baz=42'
2176 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2320 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2177 'http://user:pw@host:80/?foo=bar%3dbaz'
2321 'http://user:pw@host:80/?foo=bar%3dbaz'
2178 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2322 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2179 'ssh://user:pw@[::1]:2200//home/joe#'
2323 'ssh://user:pw@[::1]:2200//home/joe#'
2180 >>> str(url('http://localhost:80//'))
2324 >>> str(url('http://localhost:80//'))
2181 'http://localhost:80//'
2325 'http://localhost:80//'
2182 >>> str(url('http://localhost:80/'))
2326 >>> str(url('http://localhost:80/'))
2183 'http://localhost:80/'
2327 'http://localhost:80/'
2184 >>> str(url('http://localhost:80'))
2328 >>> str(url('http://localhost:80'))
2185 'http://localhost:80/'
2329 'http://localhost:80/'
2186 >>> str(url('bundle:foo'))
2330 >>> str(url('bundle:foo'))
2187 'bundle:foo'
2331 'bundle:foo'
2188 >>> str(url('bundle://../foo'))
2332 >>> str(url('bundle://../foo'))
2189 'bundle:../foo'
2333 'bundle:../foo'
2190 >>> str(url('path'))
2334 >>> str(url('path'))
2191 'path'
2335 'path'
2192 >>> str(url('file:///tmp/foo/bar'))
2336 >>> str(url('file:///tmp/foo/bar'))
2193 'file:///tmp/foo/bar'
2337 'file:///tmp/foo/bar'
2194 >>> str(url('file:///c:/tmp/foo/bar'))
2338 >>> str(url('file:///c:/tmp/foo/bar'))
2195 'file:///c:/tmp/foo/bar'
2339 'file:///c:/tmp/foo/bar'
2196 >>> print url(r'bundle:foo\bar')
2340 >>> print url(r'bundle:foo\bar')
2197 bundle:foo\bar
2341 bundle:foo\bar
2198 >>> print url(r'file:///D:\data\hg')
2342 >>> print url(r'file:///D:\data\hg')
2199 file:///D:\data\hg
2343 file:///D:\data\hg
2200 """
2344 """
2201 if self._localpath:
2345 if self._localpath:
2202 s = self.path
2346 s = self.path
2203 if self.scheme == 'bundle':
2347 if self.scheme == 'bundle':
2204 s = 'bundle:' + s
2348 s = 'bundle:' + s
2205 if self.fragment:
2349 if self.fragment:
2206 s += '#' + self.fragment
2350 s += '#' + self.fragment
2207 return s
2351 return s
2208
2352
2209 s = self.scheme + ':'
2353 s = self.scheme + ':'
2210 if self.user or self.passwd or self.host:
2354 if self.user or self.passwd or self.host:
2211 s += '//'
2355 s += '//'
2212 elif self.scheme and (not self.path or self.path.startswith('/')
2356 elif self.scheme and (not self.path or self.path.startswith('/')
2213 or hasdriveletter(self.path)):
2357 or hasdriveletter(self.path)):
2214 s += '//'
2358 s += '//'
2215 if hasdriveletter(self.path):
2359 if hasdriveletter(self.path):
2216 s += '/'
2360 s += '/'
2217 if self.user:
2361 if self.user:
2218 s += urllib.quote(self.user, safe=self._safechars)
2362 s += urllib.quote(self.user, safe=self._safechars)
2219 if self.passwd:
2363 if self.passwd:
2220 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2364 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2221 if self.user or self.passwd:
2365 if self.user or self.passwd:
2222 s += '@'
2366 s += '@'
2223 if self.host:
2367 if self.host:
2224 if not (self.host.startswith('[') and self.host.endswith(']')):
2368 if not (self.host.startswith('[') and self.host.endswith(']')):
2225 s += urllib.quote(self.host)
2369 s += urllib.quote(self.host)
2226 else:
2370 else:
2227 s += self.host
2371 s += self.host
2228 if self.port:
2372 if self.port:
2229 s += ':' + urllib.quote(self.port)
2373 s += ':' + urllib.quote(self.port)
2230 if self.host:
2374 if self.host:
2231 s += '/'
2375 s += '/'
2232 if self.path:
2376 if self.path:
2233 # TODO: similar to the query string, we should not unescape the
2377 # TODO: similar to the query string, we should not unescape the
2234 # path when we store it, the path might contain '%2f' = '/',
2378 # path when we store it, the path might contain '%2f' = '/',
2235 # which we should *not* escape.
2379 # which we should *not* escape.
2236 s += urllib.quote(self.path, safe=self._safepchars)
2380 s += urllib.quote(self.path, safe=self._safepchars)
2237 if self.query:
2381 if self.query:
2238 # we store the query in escaped form.
2382 # we store the query in escaped form.
2239 s += '?' + self.query
2383 s += '?' + self.query
2240 if self.fragment is not None:
2384 if self.fragment is not None:
2241 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2385 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2242 return s
2386 return s
2243
2387
2244 def authinfo(self):
2388 def authinfo(self):
2245 user, passwd = self.user, self.passwd
2389 user, passwd = self.user, self.passwd
2246 try:
2390 try:
2247 self.user, self.passwd = None, None
2391 self.user, self.passwd = None, None
2248 s = str(self)
2392 s = str(self)
2249 finally:
2393 finally:
2250 self.user, self.passwd = user, passwd
2394 self.user, self.passwd = user, passwd
2251 if not self.user:
2395 if not self.user:
2252 return (s, None)
2396 return (s, None)
2253 # authinfo[1] is passed to urllib2 password manager, and its
2397 # authinfo[1] is passed to urllib2 password manager, and its
2254 # URIs must not contain credentials. The host is passed in the
2398 # URIs must not contain credentials. The host is passed in the
2255 # URIs list because Python < 2.4.3 uses only that to search for
2399 # URIs list because Python < 2.4.3 uses only that to search for
2256 # a password.
2400 # a password.
2257 return (s, (None, (s, self.host),
2401 return (s, (None, (s, self.host),
2258 self.user, self.passwd or ''))
2402 self.user, self.passwd or ''))
2259
2403
2260 def isabs(self):
2404 def isabs(self):
2261 if self.scheme and self.scheme != 'file':
2405 if self.scheme and self.scheme != 'file':
2262 return True # remote URL
2406 return True # remote URL
2263 if hasdriveletter(self.path):
2407 if hasdriveletter(self.path):
2264 return True # absolute for our purposes - can't be joined()
2408 return True # absolute for our purposes - can't be joined()
2265 if self.path.startswith(r'\\'):
2409 if self.path.startswith(r'\\'):
2266 return True # Windows UNC path
2410 return True # Windows UNC path
2267 if self.path.startswith('/'):
2411 if self.path.startswith('/'):
2268 return True # POSIX-style
2412 return True # POSIX-style
2269 return False
2413 return False
2270
2414
2271 def localpath(self):
2415 def localpath(self):
2272 if self.scheme == 'file' or self.scheme == 'bundle':
2416 if self.scheme == 'file' or self.scheme == 'bundle':
2273 path = self.path or '/'
2417 path = self.path or '/'
2274 # For Windows, we need to promote hosts containing drive
2418 # For Windows, we need to promote hosts containing drive
2275 # letters to paths with drive letters.
2419 # letters to paths with drive letters.
2276 if hasdriveletter(self._hostport):
2420 if hasdriveletter(self._hostport):
2277 path = self._hostport + '/' + self.path
2421 path = self._hostport + '/' + self.path
2278 elif (self.host is not None and self.path
2422 elif (self.host is not None and self.path
2279 and not hasdriveletter(path)):
2423 and not hasdriveletter(path)):
2280 path = '/' + path
2424 path = '/' + path
2281 return path
2425 return path
2282 return self._origpath
2426 return self._origpath
2283
2427
2284 def islocal(self):
2428 def islocal(self):
2285 '''whether localpath will return something that posixfile can open'''
2429 '''whether localpath will return something that posixfile can open'''
2286 return (not self.scheme or self.scheme == 'file'
2430 return (not self.scheme or self.scheme == 'file'
2287 or self.scheme == 'bundle')
2431 or self.scheme == 'bundle')
2288
2432
2289 def hasscheme(path):
2433 def hasscheme(path):
2290 return bool(url(path).scheme)
2434 return bool(url(path).scheme)
2291
2435
2292 def hasdriveletter(path):
2436 def hasdriveletter(path):
2293 return path and path[1:2] == ':' and path[0:1].isalpha()
2437 return path and path[1:2] == ':' and path[0:1].isalpha()
2294
2438
2295 def urllocalpath(path):
2439 def urllocalpath(path):
2296 return url(path, parsequery=False, parsefragment=False).localpath()
2440 return url(path, parsequery=False, parsefragment=False).localpath()
2297
2441
2298 def hidepassword(u):
2442 def hidepassword(u):
2299 '''hide user credential in a url string'''
2443 '''hide user credential in a url string'''
2300 u = url(u)
2444 u = url(u)
2301 if u.passwd:
2445 if u.passwd:
2302 u.passwd = '***'
2446 u.passwd = '***'
2303 return str(u)
2447 return str(u)
2304
2448
2305 def removeauth(u):
2449 def removeauth(u):
2306 '''remove all authentication information from a url string'''
2450 '''remove all authentication information from a url string'''
2307 u = url(u)
2451 u = url(u)
2308 u.user = u.passwd = None
2452 u.user = u.passwd = None
2309 return str(u)
2453 return str(u)
2310
2454
2311 def isatty(fp):
2455 def isatty(fp):
2312 try:
2456 try:
2313 return fp.isatty()
2457 return fp.isatty()
2314 except AttributeError:
2458 except AttributeError:
2315 return False
2459 return False
2316
2460
2317 timecount = unitcountfn(
2461 timecount = unitcountfn(
2318 (1, 1e3, _('%.0f s')),
2462 (1, 1e3, _('%.0f s')),
2319 (100, 1, _('%.1f s')),
2463 (100, 1, _('%.1f s')),
2320 (10, 1, _('%.2f s')),
2464 (10, 1, _('%.2f s')),
2321 (1, 1, _('%.3f s')),
2465 (1, 1, _('%.3f s')),
2322 (100, 0.001, _('%.1f ms')),
2466 (100, 0.001, _('%.1f ms')),
2323 (10, 0.001, _('%.2f ms')),
2467 (10, 0.001, _('%.2f ms')),
2324 (1, 0.001, _('%.3f ms')),
2468 (1, 0.001, _('%.3f ms')),
2325 (100, 0.000001, _('%.1f us')),
2469 (100, 0.000001, _('%.1f us')),
2326 (10, 0.000001, _('%.2f us')),
2470 (10, 0.000001, _('%.2f us')),
2327 (1, 0.000001, _('%.3f us')),
2471 (1, 0.000001, _('%.3f us')),
2328 (100, 0.000000001, _('%.1f ns')),
2472 (100, 0.000000001, _('%.1f ns')),
2329 (10, 0.000000001, _('%.2f ns')),
2473 (10, 0.000000001, _('%.2f ns')),
2330 (1, 0.000000001, _('%.3f ns')),
2474 (1, 0.000000001, _('%.3f ns')),
2331 )
2475 )
2332
2476
2333 _timenesting = [0]
2477 _timenesting = [0]
2334
2478
2335 def timed(func):
2479 def timed(func):
2336 '''Report the execution time of a function call to stderr.
2480 '''Report the execution time of a function call to stderr.
2337
2481
2338 During development, use as a decorator when you need to measure
2482 During development, use as a decorator when you need to measure
2339 the cost of a function, e.g. as follows:
2483 the cost of a function, e.g. as follows:
2340
2484
2341 @util.timed
2485 @util.timed
2342 def foo(a, b, c):
2486 def foo(a, b, c):
2343 pass
2487 pass
2344 '''
2488 '''
2345
2489
2346 def wrapper(*args, **kwargs):
2490 def wrapper(*args, **kwargs):
2347 start = time.time()
2491 start = time.time()
2348 indent = 2
2492 indent = 2
2349 _timenesting[0] += indent
2493 _timenesting[0] += indent
2350 try:
2494 try:
2351 return func(*args, **kwargs)
2495 return func(*args, **kwargs)
2352 finally:
2496 finally:
2353 elapsed = time.time() - start
2497 elapsed = time.time() - start
2354 _timenesting[0] -= indent
2498 _timenesting[0] -= indent
2355 sys.stderr.write('%s%s: %s\n' %
2499 sys.stderr.write('%s%s: %s\n' %
2356 (' ' * _timenesting[0], func.__name__,
2500 (' ' * _timenesting[0], func.__name__,
2357 timecount(elapsed)))
2501 timecount(elapsed)))
2358 return wrapper
2502 return wrapper
2359
2503
2360 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2504 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2361 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2505 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2362
2506
2363 def sizetoint(s):
2507 def sizetoint(s):
2364 '''Convert a space specifier to a byte count.
2508 '''Convert a space specifier to a byte count.
2365
2509
2366 >>> sizetoint('30')
2510 >>> sizetoint('30')
2367 30
2511 30
2368 >>> sizetoint('2.2kb')
2512 >>> sizetoint('2.2kb')
2369 2252
2513 2252
2370 >>> sizetoint('6M')
2514 >>> sizetoint('6M')
2371 6291456
2515 6291456
2372 '''
2516 '''
2373 t = s.strip().lower()
2517 t = s.strip().lower()
2374 try:
2518 try:
2375 for k, u in _sizeunits:
2519 for k, u in _sizeunits:
2376 if t.endswith(k):
2520 if t.endswith(k):
2377 return int(float(t[:-len(k)]) * u)
2521 return int(float(t[:-len(k)]) * u)
2378 return int(t)
2522 return int(t)
2379 except ValueError:
2523 except ValueError:
2380 raise error.ParseError(_("couldn't parse size: %s") % s)
2524 raise error.ParseError(_("couldn't parse size: %s") % s)
2381
2525
2382 class hooks(object):
2526 class hooks(object):
2383 '''A collection of hook functions that can be used to extend a
2527 '''A collection of hook functions that can be used to extend a
2384 function's behavior. Hooks are called in lexicographic order,
2528 function's behavior. Hooks are called in lexicographic order,
2385 based on the names of their sources.'''
2529 based on the names of their sources.'''
2386
2530
2387 def __init__(self):
2531 def __init__(self):
2388 self._hooks = []
2532 self._hooks = []
2389
2533
2390 def add(self, source, hook):
2534 def add(self, source, hook):
2391 self._hooks.append((source, hook))
2535 self._hooks.append((source, hook))
2392
2536
2393 def __call__(self, *args):
2537 def __call__(self, *args):
2394 self._hooks.sort(key=lambda x: x[0])
2538 self._hooks.sort(key=lambda x: x[0])
2395 results = []
2539 results = []
2396 for source, hook in self._hooks:
2540 for source, hook in self._hooks:
2397 results.append(hook(*args))
2541 results.append(hook(*args))
2398 return results
2542 return results
2399
2543
2400 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2544 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2401 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2545 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2402 Skips the 'skip' last entries. By default it will flush stdout first.
2546 Skips the 'skip' last entries. By default it will flush stdout first.
2403 It can be used everywhere and do intentionally not require an ui object.
2547 It can be used everywhere and do intentionally not require an ui object.
2404 Not be used in production code but very convenient while developing.
2548 Not be used in production code but very convenient while developing.
2405 '''
2549 '''
2406 if otherf:
2550 if otherf:
2407 otherf.flush()
2551 otherf.flush()
2408 f.write('%s at:\n' % msg)
2552 f.write('%s at:\n' % msg)
2409 entries = [('%s:%s' % (fn, ln), func)
2553 entries = [('%s:%s' % (fn, ln), func)
2410 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2554 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2411 if entries:
2555 if entries:
2412 fnmax = max(len(entry[0]) for entry in entries)
2556 fnmax = max(len(entry[0]) for entry in entries)
2413 for fnln, func in entries:
2557 for fnln, func in entries:
2414 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2558 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2415 f.flush()
2559 f.flush()
2416
2560
2417 class dirs(object):
2561 class dirs(object):
2418 '''a multiset of directory names from a dirstate or manifest'''
2562 '''a multiset of directory names from a dirstate or manifest'''
2419
2563
2420 def __init__(self, map, skip=None):
2564 def __init__(self, map, skip=None):
2421 self._dirs = {}
2565 self._dirs = {}
2422 addpath = self.addpath
2566 addpath = self.addpath
2423 if safehasattr(map, 'iteritems') and skip is not None:
2567 if safehasattr(map, 'iteritems') and skip is not None:
2424 for f, s in map.iteritems():
2568 for f, s in map.iteritems():
2425 if s[0] != skip:
2569 if s[0] != skip:
2426 addpath(f)
2570 addpath(f)
2427 else:
2571 else:
2428 for f in map:
2572 for f in map:
2429 addpath(f)
2573 addpath(f)
2430
2574
2431 def addpath(self, path):
2575 def addpath(self, path):
2432 dirs = self._dirs
2576 dirs = self._dirs
2433 for base in finddirs(path):
2577 for base in finddirs(path):
2434 if base in dirs:
2578 if base in dirs:
2435 dirs[base] += 1
2579 dirs[base] += 1
2436 return
2580 return
2437 dirs[base] = 1
2581 dirs[base] = 1
2438
2582
2439 def delpath(self, path):
2583 def delpath(self, path):
2440 dirs = self._dirs
2584 dirs = self._dirs
2441 for base in finddirs(path):
2585 for base in finddirs(path):
2442 if dirs[base] > 1:
2586 if dirs[base] > 1:
2443 dirs[base] -= 1
2587 dirs[base] -= 1
2444 return
2588 return
2445 del dirs[base]
2589 del dirs[base]
2446
2590
2447 def __iter__(self):
2591 def __iter__(self):
2448 return self._dirs.iterkeys()
2592 return self._dirs.iterkeys()
2449
2593
2450 def __contains__(self, d):
2594 def __contains__(self, d):
2451 return d in self._dirs
2595 return d in self._dirs
2452
2596
2453 if safehasattr(parsers, 'dirs'):
2597 if safehasattr(parsers, 'dirs'):
2454 dirs = parsers.dirs
2598 dirs = parsers.dirs
2455
2599
2456 def finddirs(path):
2600 def finddirs(path):
2457 pos = path.rfind('/')
2601 pos = path.rfind('/')
2458 while pos != -1:
2602 while pos != -1:
2459 yield path[:pos]
2603 yield path[:pos]
2460 pos = path.rfind('/', 0, pos)
2604 pos = path.rfind('/', 0, pos)
2461
2605
2462 # compression utility
2606 # compression utility
2463
2607
2464 class nocompress(object):
2608 class nocompress(object):
2465 def compress(self, x):
2609 def compress(self, x):
2466 return x
2610 return x
2467 def flush(self):
2611 def flush(self):
2468 return ""
2612 return ""
2469
2613
2470 compressors = {
2614 compressors = {
2471 None: nocompress,
2615 None: nocompress,
2472 # lambda to prevent early import
2616 # lambda to prevent early import
2473 'BZ': lambda: bz2.BZ2Compressor(),
2617 'BZ': lambda: bz2.BZ2Compressor(),
2474 'GZ': lambda: zlib.compressobj(),
2618 'GZ': lambda: zlib.compressobj(),
2475 }
2619 }
2476 # also support the old form by courtesies
2620 # also support the old form by courtesies
2477 compressors['UN'] = compressors[None]
2621 compressors['UN'] = compressors[None]
2478
2622
2479 def _makedecompressor(decompcls):
2623 def _makedecompressor(decompcls):
2480 def generator(f):
2624 def generator(f):
2481 d = decompcls()
2625 d = decompcls()
2482 for chunk in filechunkiter(f):
2626 for chunk in filechunkiter(f):
2483 yield d.decompress(chunk)
2627 yield d.decompress(chunk)
2484 def func(fh):
2628 def func(fh):
2485 return chunkbuffer(generator(fh))
2629 return chunkbuffer(generator(fh))
2486 return func
2630 return func
2487
2631
2488 def _bz2():
2632 def _bz2():
2489 d = bz2.BZ2Decompressor()
2633 d = bz2.BZ2Decompressor()
2490 # Bzip2 stream start with BZ, but we stripped it.
2634 # Bzip2 stream start with BZ, but we stripped it.
2491 # we put it back for good measure.
2635 # we put it back for good measure.
2492 d.decompress('BZ')
2636 d.decompress('BZ')
2493 return d
2637 return d
2494
2638
2495 decompressors = {None: lambda fh: fh,
2639 decompressors = {None: lambda fh: fh,
2496 '_truncatedBZ': _makedecompressor(_bz2),
2640 '_truncatedBZ': _makedecompressor(_bz2),
2497 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2641 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2498 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2642 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2499 }
2643 }
2500 # also support the old form by courtesies
2644 # also support the old form by courtesies
2501 decompressors['UN'] = decompressors[None]
2645 decompressors['UN'] = decompressors[None]
2502
2646
2503 # convenient shortcut
2647 # convenient shortcut
2504 dst = debugstacktrace
2648 dst = debugstacktrace
@@ -1,38 +1,46 b''
1 from mercurial import util
1 from mercurial import util
2
2
3 def printifpresent(d, xs):
3 def printifpresent(d, xs):
4 for x in xs:
4 for x in xs:
5 present = x in d
5 present = x in d
6 print "'%s' in d: %s" % (x, present)
6 print "'%s' in d: %s" % (x, present)
7 if present:
7 if present:
8 print "d['%s']: %s" % (x, d[x])
8 print "d['%s']: %s" % (x, d[x])
9
9
10 def test_lrucachedict():
10 def test_lrucachedict():
11 d = util.lrucachedict(4)
11 d = util.lrucachedict(4)
12 d['a'] = 'va'
12 d['a'] = 'va'
13 d['b'] = 'vb'
13 d['b'] = 'vb'
14 d['c'] = 'vc'
14 d['c'] = 'vc'
15 d['d'] = 'vd'
15 d['d'] = 'vd'
16
16
17 # all of these should be present
17 # all of these should be present
18 printifpresent(d, ['a', 'b', 'c', 'd'])
18 printifpresent(d, ['a', 'b', 'c', 'd'])
19
19
20 # 'a' should be dropped because it was least recently used
20 # 'a' should be dropped because it was least recently used
21 d['e'] = 've'
21 d['e'] = 've'
22 printifpresent(d, ['a', 'b', 'c', 'd', 'e'])
22 printifpresent(d, ['a', 'b', 'c', 'd', 'e'])
23
23
24 # touch entries in some order (get or set).
24 # touch entries in some order (get or set).
25 d['e']
25 d['e']
26 d['c'] = 'vc2'
26 d['c'] = 'vc2'
27 d['d']
27 d['d']
28 d['b'] = 'vb2'
28 d['b'] = 'vb2'
29
29
30 # 'e' should be dropped now
30 # 'e' should be dropped now
31 d['f'] = 'vf'
31 d['f'] = 'vf'
32 printifpresent(d, ['b', 'c', 'd', 'e', 'f'])
32 printifpresent(d, ['b', 'c', 'd', 'e', 'f'])
33
33
34 d.clear()
34 d.clear()
35 printifpresent(d, ['b', 'c', 'd', 'e', 'f'])
35 printifpresent(d, ['b', 'c', 'd', 'e', 'f'])
36
36
37 # Now test dicts that aren't full.
38 d = util.lrucachedict(4)
39 d['a'] = 1
40 d['b'] = 2
41 d['a']
42 d['b']
43 printifpresent(d, ['a', 'b'])
44
37 if __name__ == '__main__':
45 if __name__ == '__main__':
38 test_lrucachedict()
46 test_lrucachedict()
@@ -1,31 +1,35 b''
1 'a' in d: True
1 'a' in d: True
2 d['a']: va
2 d['a']: va
3 'b' in d: True
3 'b' in d: True
4 d['b']: vb
4 d['b']: vb
5 'c' in d: True
5 'c' in d: True
6 d['c']: vc
6 d['c']: vc
7 'd' in d: True
7 'd' in d: True
8 d['d']: vd
8 d['d']: vd
9 'a' in d: False
9 'a' in d: False
10 'b' in d: True
10 'b' in d: True
11 d['b']: vb
11 d['b']: vb
12 'c' in d: True
12 'c' in d: True
13 d['c']: vc
13 d['c']: vc
14 'd' in d: True
14 'd' in d: True
15 d['d']: vd
15 d['d']: vd
16 'e' in d: True
16 'e' in d: True
17 d['e']: ve
17 d['e']: ve
18 'b' in d: True
18 'b' in d: True
19 d['b']: vb2
19 d['b']: vb2
20 'c' in d: True
20 'c' in d: True
21 d['c']: vc2
21 d['c']: vc2
22 'd' in d: True
22 'd' in d: True
23 d['d']: vd
23 d['d']: vd
24 'e' in d: False
24 'e' in d: False
25 'f' in d: True
25 'f' in d: True
26 d['f']: vf
26 d['f']: vf
27 'b' in d: False
27 'b' in d: False
28 'c' in d: False
28 'c' in d: False
29 'd' in d: False
29 'd' in d: False
30 'e' in d: False
30 'e' in d: False
31 'f' in d: False
31 'f' in d: False
32 'a' in d: True
33 d['a']: 1
34 'b' in d: True
35 d['b']: 2
General Comments 0
You need to be logged in to leave comments. Login now