##// END OF EJS Templates
util: add versiontuple() for returning parsed version information...
Gregory Szorc -
r27112:39c14e89 default
parent child Browse files
Show More
@@ -1,2436 +1,2489
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 import i18n
16 import i18n
17 _ = i18n._
17 _ = i18n._
18 import error, osutil, encoding, parsers
18 import error, osutil, encoding, parsers
19 import errno, shutil, sys, tempfile, traceback
19 import errno, shutil, sys, tempfile, traceback
20 import re as remod
20 import re as remod
21 import os, time, datetime, calendar, textwrap, signal, collections
21 import os, time, datetime, calendar, textwrap, signal, collections
22 import imp, socket, urllib
22 import imp, socket, urllib
23 import gc
23 import gc
24 import bz2
24 import bz2
25 import zlib
25 import zlib
26
26
27 if os.name == 'nt':
27 if os.name == 'nt':
28 import windows as platform
28 import windows as platform
29 else:
29 else:
30 import posix as platform
30 import posix as platform
31
31
32 cachestat = platform.cachestat
32 cachestat = platform.cachestat
33 checkexec = platform.checkexec
33 checkexec = platform.checkexec
34 checklink = platform.checklink
34 checklink = platform.checklink
35 copymode = platform.copymode
35 copymode = platform.copymode
36 executablepath = platform.executablepath
36 executablepath = platform.executablepath
37 expandglobs = platform.expandglobs
37 expandglobs = platform.expandglobs
38 explainexit = platform.explainexit
38 explainexit = platform.explainexit
39 findexe = platform.findexe
39 findexe = platform.findexe
40 gethgcmd = platform.gethgcmd
40 gethgcmd = platform.gethgcmd
41 getuser = platform.getuser
41 getuser = platform.getuser
42 groupmembers = platform.groupmembers
42 groupmembers = platform.groupmembers
43 groupname = platform.groupname
43 groupname = platform.groupname
44 hidewindow = platform.hidewindow
44 hidewindow = platform.hidewindow
45 isexec = platform.isexec
45 isexec = platform.isexec
46 isowner = platform.isowner
46 isowner = platform.isowner
47 localpath = platform.localpath
47 localpath = platform.localpath
48 lookupreg = platform.lookupreg
48 lookupreg = platform.lookupreg
49 makedir = platform.makedir
49 makedir = platform.makedir
50 nlinks = platform.nlinks
50 nlinks = platform.nlinks
51 normpath = platform.normpath
51 normpath = platform.normpath
52 normcase = platform.normcase
52 normcase = platform.normcase
53 normcasespec = platform.normcasespec
53 normcasespec = platform.normcasespec
54 normcasefallback = platform.normcasefallback
54 normcasefallback = platform.normcasefallback
55 openhardlinks = platform.openhardlinks
55 openhardlinks = platform.openhardlinks
56 oslink = platform.oslink
56 oslink = platform.oslink
57 parsepatchoutput = platform.parsepatchoutput
57 parsepatchoutput = platform.parsepatchoutput
58 pconvert = platform.pconvert
58 pconvert = platform.pconvert
59 poll = platform.poll
59 poll = platform.poll
60 popen = platform.popen
60 popen = platform.popen
61 posixfile = platform.posixfile
61 posixfile = platform.posixfile
62 quotecommand = platform.quotecommand
62 quotecommand = platform.quotecommand
63 readpipe = platform.readpipe
63 readpipe = platform.readpipe
64 rename = platform.rename
64 rename = platform.rename
65 removedirs = platform.removedirs
65 removedirs = platform.removedirs
66 samedevice = platform.samedevice
66 samedevice = platform.samedevice
67 samefile = platform.samefile
67 samefile = platform.samefile
68 samestat = platform.samestat
68 samestat = platform.samestat
69 setbinary = platform.setbinary
69 setbinary = platform.setbinary
70 setflags = platform.setflags
70 setflags = platform.setflags
71 setsignalhandler = platform.setsignalhandler
71 setsignalhandler = platform.setsignalhandler
72 shellquote = platform.shellquote
72 shellquote = platform.shellquote
73 spawndetached = platform.spawndetached
73 spawndetached = platform.spawndetached
74 split = platform.split
74 split = platform.split
75 sshargs = platform.sshargs
75 sshargs = platform.sshargs
76 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
76 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
77 statisexec = platform.statisexec
77 statisexec = platform.statisexec
78 statislink = platform.statislink
78 statislink = platform.statislink
79 termwidth = platform.termwidth
79 termwidth = platform.termwidth
80 testpid = platform.testpid
80 testpid = platform.testpid
81 umask = platform.umask
81 umask = platform.umask
82 unlink = platform.unlink
82 unlink = platform.unlink
83 unlinkpath = platform.unlinkpath
83 unlinkpath = platform.unlinkpath
84 username = platform.username
84 username = platform.username
85
85
86 # Python compatibility
86 # Python compatibility
87
87
88 _notset = object()
88 _notset = object()
89
89
90 # disable Python's problematic floating point timestamps (issue4836)
90 # disable Python's problematic floating point timestamps (issue4836)
91 # (Python hypocritically says you shouldn't change this behavior in
91 # (Python hypocritically says you shouldn't change this behavior in
92 # libraries, and sure enough Mercurial is not a library.)
92 # libraries, and sure enough Mercurial is not a library.)
93 os.stat_float_times(False)
93 os.stat_float_times(False)
94
94
95 def safehasattr(thing, attr):
95 def safehasattr(thing, attr):
96 return getattr(thing, attr, _notset) is not _notset
96 return getattr(thing, attr, _notset) is not _notset
97
97
98 from hashlib import md5, sha1
98 from hashlib import md5, sha1
99
99
100 DIGESTS = {
100 DIGESTS = {
101 'md5': md5,
101 'md5': md5,
102 'sha1': sha1,
102 'sha1': sha1,
103 }
103 }
104 # List of digest types from strongest to weakest
104 # List of digest types from strongest to weakest
105 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
105 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
106
106
107 try:
107 try:
108 import hashlib
108 import hashlib
109 DIGESTS.update({
109 DIGESTS.update({
110 'sha512': hashlib.sha512,
110 'sha512': hashlib.sha512,
111 })
111 })
112 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
112 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
113 except ImportError:
113 except ImportError:
114 pass
114 pass
115
115
116 for k in DIGESTS_BY_STRENGTH:
116 for k in DIGESTS_BY_STRENGTH:
117 assert k in DIGESTS
117 assert k in DIGESTS
118
118
119 class digester(object):
119 class digester(object):
120 """helper to compute digests.
120 """helper to compute digests.
121
121
122 This helper can be used to compute one or more digests given their name.
122 This helper can be used to compute one or more digests given their name.
123
123
124 >>> d = digester(['md5', 'sha1'])
124 >>> d = digester(['md5', 'sha1'])
125 >>> d.update('foo')
125 >>> d.update('foo')
126 >>> [k for k in sorted(d)]
126 >>> [k for k in sorted(d)]
127 ['md5', 'sha1']
127 ['md5', 'sha1']
128 >>> d['md5']
128 >>> d['md5']
129 'acbd18db4cc2f85cedef654fccc4a4d8'
129 'acbd18db4cc2f85cedef654fccc4a4d8'
130 >>> d['sha1']
130 >>> d['sha1']
131 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
131 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
132 >>> digester.preferred(['md5', 'sha1'])
132 >>> digester.preferred(['md5', 'sha1'])
133 'sha1'
133 'sha1'
134 """
134 """
135
135
136 def __init__(self, digests, s=''):
136 def __init__(self, digests, s=''):
137 self._hashes = {}
137 self._hashes = {}
138 for k in digests:
138 for k in digests:
139 if k not in DIGESTS:
139 if k not in DIGESTS:
140 raise Abort(_('unknown digest type: %s') % k)
140 raise Abort(_('unknown digest type: %s') % k)
141 self._hashes[k] = DIGESTS[k]()
141 self._hashes[k] = DIGESTS[k]()
142 if s:
142 if s:
143 self.update(s)
143 self.update(s)
144
144
145 def update(self, data):
145 def update(self, data):
146 for h in self._hashes.values():
146 for h in self._hashes.values():
147 h.update(data)
147 h.update(data)
148
148
149 def __getitem__(self, key):
149 def __getitem__(self, key):
150 if key not in DIGESTS:
150 if key not in DIGESTS:
151 raise Abort(_('unknown digest type: %s') % k)
151 raise Abort(_('unknown digest type: %s') % k)
152 return self._hashes[key].hexdigest()
152 return self._hashes[key].hexdigest()
153
153
154 def __iter__(self):
154 def __iter__(self):
155 return iter(self._hashes)
155 return iter(self._hashes)
156
156
157 @staticmethod
157 @staticmethod
158 def preferred(supported):
158 def preferred(supported):
159 """returns the strongest digest type in both supported and DIGESTS."""
159 """returns the strongest digest type in both supported and DIGESTS."""
160
160
161 for k in DIGESTS_BY_STRENGTH:
161 for k in DIGESTS_BY_STRENGTH:
162 if k in supported:
162 if k in supported:
163 return k
163 return k
164 return None
164 return None
165
165
166 class digestchecker(object):
166 class digestchecker(object):
167 """file handle wrapper that additionally checks content against a given
167 """file handle wrapper that additionally checks content against a given
168 size and digests.
168 size and digests.
169
169
170 d = digestchecker(fh, size, {'md5': '...'})
170 d = digestchecker(fh, size, {'md5': '...'})
171
171
172 When multiple digests are given, all of them are validated.
172 When multiple digests are given, all of them are validated.
173 """
173 """
174
174
175 def __init__(self, fh, size, digests):
175 def __init__(self, fh, size, digests):
176 self._fh = fh
176 self._fh = fh
177 self._size = size
177 self._size = size
178 self._got = 0
178 self._got = 0
179 self._digests = dict(digests)
179 self._digests = dict(digests)
180 self._digester = digester(self._digests.keys())
180 self._digester = digester(self._digests.keys())
181
181
182 def read(self, length=-1):
182 def read(self, length=-1):
183 content = self._fh.read(length)
183 content = self._fh.read(length)
184 self._digester.update(content)
184 self._digester.update(content)
185 self._got += len(content)
185 self._got += len(content)
186 return content
186 return content
187
187
188 def validate(self):
188 def validate(self):
189 if self._size != self._got:
189 if self._size != self._got:
190 raise Abort(_('size mismatch: expected %d, got %d') %
190 raise Abort(_('size mismatch: expected %d, got %d') %
191 (self._size, self._got))
191 (self._size, self._got))
192 for k, v in self._digests.items():
192 for k, v in self._digests.items():
193 if v != self._digester[k]:
193 if v != self._digester[k]:
194 # i18n: first parameter is a digest name
194 # i18n: first parameter is a digest name
195 raise Abort(_('%s mismatch: expected %s, got %s') %
195 raise Abort(_('%s mismatch: expected %s, got %s') %
196 (k, v, self._digester[k]))
196 (k, v, self._digester[k]))
197
197
198 try:
198 try:
199 buffer = buffer
199 buffer = buffer
200 except NameError:
200 except NameError:
201 if sys.version_info[0] < 3:
201 if sys.version_info[0] < 3:
202 def buffer(sliceable, offset=0):
202 def buffer(sliceable, offset=0):
203 return sliceable[offset:]
203 return sliceable[offset:]
204 else:
204 else:
205 def buffer(sliceable, offset=0):
205 def buffer(sliceable, offset=0):
206 return memoryview(sliceable)[offset:]
206 return memoryview(sliceable)[offset:]
207
207
208 import subprocess
208 import subprocess
209 closefds = os.name == 'posix'
209 closefds = os.name == 'posix'
210
210
211 _chunksize = 4096
211 _chunksize = 4096
212
212
213 class bufferedinputpipe(object):
213 class bufferedinputpipe(object):
214 """a manually buffered input pipe
214 """a manually buffered input pipe
215
215
216 Python will not let us use buffered IO and lazy reading with 'polling' at
216 Python will not let us use buffered IO and lazy reading with 'polling' at
217 the same time. We cannot probe the buffer state and select will not detect
217 the same time. We cannot probe the buffer state and select will not detect
218 that data are ready to read if they are already buffered.
218 that data are ready to read if they are already buffered.
219
219
220 This class let us work around that by implementing its own buffering
220 This class let us work around that by implementing its own buffering
221 (allowing efficient readline) while offering a way to know if the buffer is
221 (allowing efficient readline) while offering a way to know if the buffer is
222 empty from the output (allowing collaboration of the buffer with polling).
222 empty from the output (allowing collaboration of the buffer with polling).
223
223
224 This class lives in the 'util' module because it makes use of the 'os'
224 This class lives in the 'util' module because it makes use of the 'os'
225 module from the python stdlib.
225 module from the python stdlib.
226 """
226 """
227
227
228 def __init__(self, input):
228 def __init__(self, input):
229 self._input = input
229 self._input = input
230 self._buffer = []
230 self._buffer = []
231 self._eof = False
231 self._eof = False
232 self._lenbuf = 0
232 self._lenbuf = 0
233
233
234 @property
234 @property
235 def hasbuffer(self):
235 def hasbuffer(self):
236 """True is any data is currently buffered
236 """True is any data is currently buffered
237
237
238 This will be used externally a pre-step for polling IO. If there is
238 This will be used externally a pre-step for polling IO. If there is
239 already data then no polling should be set in place."""
239 already data then no polling should be set in place."""
240 return bool(self._buffer)
240 return bool(self._buffer)
241
241
242 @property
242 @property
243 def closed(self):
243 def closed(self):
244 return self._input.closed
244 return self._input.closed
245
245
246 def fileno(self):
246 def fileno(self):
247 return self._input.fileno()
247 return self._input.fileno()
248
248
249 def close(self):
249 def close(self):
250 return self._input.close()
250 return self._input.close()
251
251
252 def read(self, size):
252 def read(self, size):
253 while (not self._eof) and (self._lenbuf < size):
253 while (not self._eof) and (self._lenbuf < size):
254 self._fillbuffer()
254 self._fillbuffer()
255 return self._frombuffer(size)
255 return self._frombuffer(size)
256
256
257 def readline(self, *args, **kwargs):
257 def readline(self, *args, **kwargs):
258 if 1 < len(self._buffer):
258 if 1 < len(self._buffer):
259 # this should not happen because both read and readline end with a
259 # this should not happen because both read and readline end with a
260 # _frombuffer call that collapse it.
260 # _frombuffer call that collapse it.
261 self._buffer = [''.join(self._buffer)]
261 self._buffer = [''.join(self._buffer)]
262 self._lenbuf = len(self._buffer[0])
262 self._lenbuf = len(self._buffer[0])
263 lfi = -1
263 lfi = -1
264 if self._buffer:
264 if self._buffer:
265 lfi = self._buffer[-1].find('\n')
265 lfi = self._buffer[-1].find('\n')
266 while (not self._eof) and lfi < 0:
266 while (not self._eof) and lfi < 0:
267 self._fillbuffer()
267 self._fillbuffer()
268 if self._buffer:
268 if self._buffer:
269 lfi = self._buffer[-1].find('\n')
269 lfi = self._buffer[-1].find('\n')
270 size = lfi + 1
270 size = lfi + 1
271 if lfi < 0: # end of file
271 if lfi < 0: # end of file
272 size = self._lenbuf
272 size = self._lenbuf
273 elif 1 < len(self._buffer):
273 elif 1 < len(self._buffer):
274 # we need to take previous chunks into account
274 # we need to take previous chunks into account
275 size += self._lenbuf - len(self._buffer[-1])
275 size += self._lenbuf - len(self._buffer[-1])
276 return self._frombuffer(size)
276 return self._frombuffer(size)
277
277
278 def _frombuffer(self, size):
278 def _frombuffer(self, size):
279 """return at most 'size' data from the buffer
279 """return at most 'size' data from the buffer
280
280
281 The data are removed from the buffer."""
281 The data are removed from the buffer."""
282 if size == 0 or not self._buffer:
282 if size == 0 or not self._buffer:
283 return ''
283 return ''
284 buf = self._buffer[0]
284 buf = self._buffer[0]
285 if 1 < len(self._buffer):
285 if 1 < len(self._buffer):
286 buf = ''.join(self._buffer)
286 buf = ''.join(self._buffer)
287
287
288 data = buf[:size]
288 data = buf[:size]
289 buf = buf[len(data):]
289 buf = buf[len(data):]
290 if buf:
290 if buf:
291 self._buffer = [buf]
291 self._buffer = [buf]
292 self._lenbuf = len(buf)
292 self._lenbuf = len(buf)
293 else:
293 else:
294 self._buffer = []
294 self._buffer = []
295 self._lenbuf = 0
295 self._lenbuf = 0
296 return data
296 return data
297
297
298 def _fillbuffer(self):
298 def _fillbuffer(self):
299 """read data to the buffer"""
299 """read data to the buffer"""
300 data = os.read(self._input.fileno(), _chunksize)
300 data = os.read(self._input.fileno(), _chunksize)
301 if not data:
301 if not data:
302 self._eof = True
302 self._eof = True
303 else:
303 else:
304 self._lenbuf += len(data)
304 self._lenbuf += len(data)
305 self._buffer.append(data)
305 self._buffer.append(data)
306
306
307 def popen2(cmd, env=None, newlines=False):
307 def popen2(cmd, env=None, newlines=False):
308 # Setting bufsize to -1 lets the system decide the buffer size.
308 # Setting bufsize to -1 lets the system decide the buffer size.
309 # The default for bufsize is 0, meaning unbuffered. This leads to
309 # The default for bufsize is 0, meaning unbuffered. This leads to
310 # poor performance on Mac OS X: http://bugs.python.org/issue4194
310 # poor performance on Mac OS X: http://bugs.python.org/issue4194
311 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
311 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
312 close_fds=closefds,
312 close_fds=closefds,
313 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
313 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
314 universal_newlines=newlines,
314 universal_newlines=newlines,
315 env=env)
315 env=env)
316 return p.stdin, p.stdout
316 return p.stdin, p.stdout
317
317
318 def popen3(cmd, env=None, newlines=False):
318 def popen3(cmd, env=None, newlines=False):
319 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
319 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
320 return stdin, stdout, stderr
320 return stdin, stdout, stderr
321
321
322 def popen4(cmd, env=None, newlines=False, bufsize=-1):
322 def popen4(cmd, env=None, newlines=False, bufsize=-1):
323 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
323 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
324 close_fds=closefds,
324 close_fds=closefds,
325 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
325 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
326 stderr=subprocess.PIPE,
326 stderr=subprocess.PIPE,
327 universal_newlines=newlines,
327 universal_newlines=newlines,
328 env=env)
328 env=env)
329 return p.stdin, p.stdout, p.stderr, p
329 return p.stdin, p.stdout, p.stderr, p
330
330
331 def version():
331 def version():
332 """Return version information if available."""
332 """Return version information if available."""
333 try:
333 try:
334 import __version__
334 import __version__
335 return __version__.version
335 return __version__.version
336 except ImportError:
336 except ImportError:
337 return 'unknown'
337 return 'unknown'
338
338
339 def versiontuple(v=None, n=4):
340 """Parses a Mercurial version string into an N-tuple.
341
342 The version string to be parsed is specified with the ``v`` argument.
343 If it isn't defined, the current Mercurial version string will be parsed.
344
345 ``n`` can be 2, 3, or 4. Here is how some version strings map to
346 returned values:
347
348 >>> v = '3.6.1+190-df9b73d2d444'
349 >>> versiontuple(v, 2)
350 (3, 6)
351 >>> versiontuple(v, 3)
352 (3, 6, 1)
353 >>> versiontuple(v, 4)
354 (3, 6, 1, '190-df9b73d2d444')
355
356 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
357 (3, 6, 1, '190-df9b73d2d444+20151118')
358
359 >>> v = '3.6'
360 >>> versiontuple(v, 2)
361 (3, 6)
362 >>> versiontuple(v, 3)
363 (3, 6, None)
364 >>> versiontuple(v, 4)
365 (3, 6, None, None)
366 """
367 if not v:
368 v = version()
369 parts = v.split('+', 1)
370 if len(parts) == 1:
371 vparts, extra = parts[0], None
372 else:
373 vparts, extra = parts
374
375 vints = []
376 for i in vparts.split('.'):
377 try:
378 vints.append(int(i))
379 except ValueError:
380 break
381 # (3, 6) -> (3, 6, None)
382 while len(vints) < 3:
383 vints.append(None)
384
385 if n == 2:
386 return (vints[0], vints[1])
387 if n == 3:
388 return (vints[0], vints[1], vints[2])
389 if n == 4:
390 return (vints[0], vints[1], vints[2], extra)
391
339 # used by parsedate
392 # used by parsedate
340 defaultdateformats = (
393 defaultdateformats = (
341 '%Y-%m-%d %H:%M:%S',
394 '%Y-%m-%d %H:%M:%S',
342 '%Y-%m-%d %I:%M:%S%p',
395 '%Y-%m-%d %I:%M:%S%p',
343 '%Y-%m-%d %H:%M',
396 '%Y-%m-%d %H:%M',
344 '%Y-%m-%d %I:%M%p',
397 '%Y-%m-%d %I:%M%p',
345 '%Y-%m-%d',
398 '%Y-%m-%d',
346 '%m-%d',
399 '%m-%d',
347 '%m/%d',
400 '%m/%d',
348 '%m/%d/%y',
401 '%m/%d/%y',
349 '%m/%d/%Y',
402 '%m/%d/%Y',
350 '%a %b %d %H:%M:%S %Y',
403 '%a %b %d %H:%M:%S %Y',
351 '%a %b %d %I:%M:%S%p %Y',
404 '%a %b %d %I:%M:%S%p %Y',
352 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
405 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
353 '%b %d %H:%M:%S %Y',
406 '%b %d %H:%M:%S %Y',
354 '%b %d %I:%M:%S%p %Y',
407 '%b %d %I:%M:%S%p %Y',
355 '%b %d %H:%M:%S',
408 '%b %d %H:%M:%S',
356 '%b %d %I:%M:%S%p',
409 '%b %d %I:%M:%S%p',
357 '%b %d %H:%M',
410 '%b %d %H:%M',
358 '%b %d %I:%M%p',
411 '%b %d %I:%M%p',
359 '%b %d %Y',
412 '%b %d %Y',
360 '%b %d',
413 '%b %d',
361 '%H:%M:%S',
414 '%H:%M:%S',
362 '%I:%M:%S%p',
415 '%I:%M:%S%p',
363 '%H:%M',
416 '%H:%M',
364 '%I:%M%p',
417 '%I:%M%p',
365 )
418 )
366
419
367 extendeddateformats = defaultdateformats + (
420 extendeddateformats = defaultdateformats + (
368 "%Y",
421 "%Y",
369 "%Y-%m",
422 "%Y-%m",
370 "%b",
423 "%b",
371 "%b %Y",
424 "%b %Y",
372 )
425 )
373
426
374 def cachefunc(func):
427 def cachefunc(func):
375 '''cache the result of function calls'''
428 '''cache the result of function calls'''
376 # XXX doesn't handle keywords args
429 # XXX doesn't handle keywords args
377 if func.func_code.co_argcount == 0:
430 if func.func_code.co_argcount == 0:
378 cache = []
431 cache = []
379 def f():
432 def f():
380 if len(cache) == 0:
433 if len(cache) == 0:
381 cache.append(func())
434 cache.append(func())
382 return cache[0]
435 return cache[0]
383 return f
436 return f
384 cache = {}
437 cache = {}
385 if func.func_code.co_argcount == 1:
438 if func.func_code.co_argcount == 1:
386 # we gain a small amount of time because
439 # we gain a small amount of time because
387 # we don't need to pack/unpack the list
440 # we don't need to pack/unpack the list
388 def f(arg):
441 def f(arg):
389 if arg not in cache:
442 if arg not in cache:
390 cache[arg] = func(arg)
443 cache[arg] = func(arg)
391 return cache[arg]
444 return cache[arg]
392 else:
445 else:
393 def f(*args):
446 def f(*args):
394 if args not in cache:
447 if args not in cache:
395 cache[args] = func(*args)
448 cache[args] = func(*args)
396 return cache[args]
449 return cache[args]
397
450
398 return f
451 return f
399
452
400 class sortdict(dict):
453 class sortdict(dict):
401 '''a simple sorted dictionary'''
454 '''a simple sorted dictionary'''
402 def __init__(self, data=None):
455 def __init__(self, data=None):
403 self._list = []
456 self._list = []
404 if data:
457 if data:
405 self.update(data)
458 self.update(data)
406 def copy(self):
459 def copy(self):
407 return sortdict(self)
460 return sortdict(self)
408 def __setitem__(self, key, val):
461 def __setitem__(self, key, val):
409 if key in self:
462 if key in self:
410 self._list.remove(key)
463 self._list.remove(key)
411 self._list.append(key)
464 self._list.append(key)
412 dict.__setitem__(self, key, val)
465 dict.__setitem__(self, key, val)
413 def __iter__(self):
466 def __iter__(self):
414 return self._list.__iter__()
467 return self._list.__iter__()
415 def update(self, src):
468 def update(self, src):
416 if isinstance(src, dict):
469 if isinstance(src, dict):
417 src = src.iteritems()
470 src = src.iteritems()
418 for k, v in src:
471 for k, v in src:
419 self[k] = v
472 self[k] = v
420 def clear(self):
473 def clear(self):
421 dict.clear(self)
474 dict.clear(self)
422 self._list = []
475 self._list = []
423 def items(self):
476 def items(self):
424 return [(k, self[k]) for k in self._list]
477 return [(k, self[k]) for k in self._list]
425 def __delitem__(self, key):
478 def __delitem__(self, key):
426 dict.__delitem__(self, key)
479 dict.__delitem__(self, key)
427 self._list.remove(key)
480 self._list.remove(key)
428 def pop(self, key, *args, **kwargs):
481 def pop(self, key, *args, **kwargs):
429 dict.pop(self, key, *args, **kwargs)
482 dict.pop(self, key, *args, **kwargs)
430 try:
483 try:
431 self._list.remove(key)
484 self._list.remove(key)
432 except ValueError:
485 except ValueError:
433 pass
486 pass
434 def keys(self):
487 def keys(self):
435 return self._list
488 return self._list
436 def iterkeys(self):
489 def iterkeys(self):
437 return self._list.__iter__()
490 return self._list.__iter__()
438 def iteritems(self):
491 def iteritems(self):
439 for k in self._list:
492 for k in self._list:
440 yield k, self[k]
493 yield k, self[k]
441 def insert(self, index, key, val):
494 def insert(self, index, key, val):
442 self._list.insert(index, key)
495 self._list.insert(index, key)
443 dict.__setitem__(self, key, val)
496 dict.__setitem__(self, key, val)
444
497
445 class lrucachedict(object):
498 class lrucachedict(object):
446 '''cache most recent gets from or sets to this dictionary'''
499 '''cache most recent gets from or sets to this dictionary'''
447 def __init__(self, maxsize):
500 def __init__(self, maxsize):
448 self._cache = {}
501 self._cache = {}
449 self._maxsize = maxsize
502 self._maxsize = maxsize
450 self._order = collections.deque()
503 self._order = collections.deque()
451
504
452 def __getitem__(self, key):
505 def __getitem__(self, key):
453 value = self._cache[key]
506 value = self._cache[key]
454 self._order.remove(key)
507 self._order.remove(key)
455 self._order.append(key)
508 self._order.append(key)
456 return value
509 return value
457
510
458 def __setitem__(self, key, value):
511 def __setitem__(self, key, value):
459 if key not in self._cache:
512 if key not in self._cache:
460 if len(self._cache) >= self._maxsize:
513 if len(self._cache) >= self._maxsize:
461 del self._cache[self._order.popleft()]
514 del self._cache[self._order.popleft()]
462 else:
515 else:
463 self._order.remove(key)
516 self._order.remove(key)
464 self._cache[key] = value
517 self._cache[key] = value
465 self._order.append(key)
518 self._order.append(key)
466
519
467 def __contains__(self, key):
520 def __contains__(self, key):
468 return key in self._cache
521 return key in self._cache
469
522
470 def clear(self):
523 def clear(self):
471 self._cache.clear()
524 self._cache.clear()
472 self._order = collections.deque()
525 self._order = collections.deque()
473
526
474 def lrucachefunc(func):
527 def lrucachefunc(func):
475 '''cache most recent results of function calls'''
528 '''cache most recent results of function calls'''
476 cache = {}
529 cache = {}
477 order = collections.deque()
530 order = collections.deque()
478 if func.func_code.co_argcount == 1:
531 if func.func_code.co_argcount == 1:
479 def f(arg):
532 def f(arg):
480 if arg not in cache:
533 if arg not in cache:
481 if len(cache) > 20:
534 if len(cache) > 20:
482 del cache[order.popleft()]
535 del cache[order.popleft()]
483 cache[arg] = func(arg)
536 cache[arg] = func(arg)
484 else:
537 else:
485 order.remove(arg)
538 order.remove(arg)
486 order.append(arg)
539 order.append(arg)
487 return cache[arg]
540 return cache[arg]
488 else:
541 else:
489 def f(*args):
542 def f(*args):
490 if args not in cache:
543 if args not in cache:
491 if len(cache) > 20:
544 if len(cache) > 20:
492 del cache[order.popleft()]
545 del cache[order.popleft()]
493 cache[args] = func(*args)
546 cache[args] = func(*args)
494 else:
547 else:
495 order.remove(args)
548 order.remove(args)
496 order.append(args)
549 order.append(args)
497 return cache[args]
550 return cache[args]
498
551
499 return f
552 return f
500
553
501 class propertycache(object):
554 class propertycache(object):
502 def __init__(self, func):
555 def __init__(self, func):
503 self.func = func
556 self.func = func
504 self.name = func.__name__
557 self.name = func.__name__
505 def __get__(self, obj, type=None):
558 def __get__(self, obj, type=None):
506 result = self.func(obj)
559 result = self.func(obj)
507 self.cachevalue(obj, result)
560 self.cachevalue(obj, result)
508 return result
561 return result
509
562
510 def cachevalue(self, obj, value):
563 def cachevalue(self, obj, value):
511 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
564 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
512 obj.__dict__[self.name] = value
565 obj.__dict__[self.name] = value
513
566
514 def pipefilter(s, cmd):
567 def pipefilter(s, cmd):
515 '''filter string S through command CMD, returning its output'''
568 '''filter string S through command CMD, returning its output'''
516 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
569 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
517 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
570 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
518 pout, perr = p.communicate(s)
571 pout, perr = p.communicate(s)
519 return pout
572 return pout
520
573
521 def tempfilter(s, cmd):
574 def tempfilter(s, cmd):
522 '''filter string S through a pair of temporary files with CMD.
575 '''filter string S through a pair of temporary files with CMD.
523 CMD is used as a template to create the real command to be run,
576 CMD is used as a template to create the real command to be run,
524 with the strings INFILE and OUTFILE replaced by the real names of
577 with the strings INFILE and OUTFILE replaced by the real names of
525 the temporary files generated.'''
578 the temporary files generated.'''
526 inname, outname = None, None
579 inname, outname = None, None
527 try:
580 try:
528 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
581 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
529 fp = os.fdopen(infd, 'wb')
582 fp = os.fdopen(infd, 'wb')
530 fp.write(s)
583 fp.write(s)
531 fp.close()
584 fp.close()
532 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
585 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
533 os.close(outfd)
586 os.close(outfd)
534 cmd = cmd.replace('INFILE', inname)
587 cmd = cmd.replace('INFILE', inname)
535 cmd = cmd.replace('OUTFILE', outname)
588 cmd = cmd.replace('OUTFILE', outname)
536 code = os.system(cmd)
589 code = os.system(cmd)
537 if sys.platform == 'OpenVMS' and code & 1:
590 if sys.platform == 'OpenVMS' and code & 1:
538 code = 0
591 code = 0
539 if code:
592 if code:
540 raise Abort(_("command '%s' failed: %s") %
593 raise Abort(_("command '%s' failed: %s") %
541 (cmd, explainexit(code)))
594 (cmd, explainexit(code)))
542 fp = open(outname, 'rb')
595 fp = open(outname, 'rb')
543 r = fp.read()
596 r = fp.read()
544 fp.close()
597 fp.close()
545 return r
598 return r
546 finally:
599 finally:
547 try:
600 try:
548 if inname:
601 if inname:
549 os.unlink(inname)
602 os.unlink(inname)
550 except OSError:
603 except OSError:
551 pass
604 pass
552 try:
605 try:
553 if outname:
606 if outname:
554 os.unlink(outname)
607 os.unlink(outname)
555 except OSError:
608 except OSError:
556 pass
609 pass
557
610
558 filtertable = {
611 filtertable = {
559 'tempfile:': tempfilter,
612 'tempfile:': tempfilter,
560 'pipe:': pipefilter,
613 'pipe:': pipefilter,
561 }
614 }
562
615
563 def filter(s, cmd):
616 def filter(s, cmd):
564 "filter a string through a command that transforms its input to its output"
617 "filter a string through a command that transforms its input to its output"
565 for name, fn in filtertable.iteritems():
618 for name, fn in filtertable.iteritems():
566 if cmd.startswith(name):
619 if cmd.startswith(name):
567 return fn(s, cmd[len(name):].lstrip())
620 return fn(s, cmd[len(name):].lstrip())
568 return pipefilter(s, cmd)
621 return pipefilter(s, cmd)
569
622
570 def binary(s):
623 def binary(s):
571 """return true if a string is binary data"""
624 """return true if a string is binary data"""
572 return bool(s and '\0' in s)
625 return bool(s and '\0' in s)
573
626
574 def increasingchunks(source, min=1024, max=65536):
627 def increasingchunks(source, min=1024, max=65536):
575 '''return no less than min bytes per chunk while data remains,
628 '''return no less than min bytes per chunk while data remains,
576 doubling min after each chunk until it reaches max'''
629 doubling min after each chunk until it reaches max'''
577 def log2(x):
630 def log2(x):
578 if not x:
631 if not x:
579 return 0
632 return 0
580 i = 0
633 i = 0
581 while x:
634 while x:
582 x >>= 1
635 x >>= 1
583 i += 1
636 i += 1
584 return i - 1
637 return i - 1
585
638
586 buf = []
639 buf = []
587 blen = 0
640 blen = 0
588 for chunk in source:
641 for chunk in source:
589 buf.append(chunk)
642 buf.append(chunk)
590 blen += len(chunk)
643 blen += len(chunk)
591 if blen >= min:
644 if blen >= min:
592 if min < max:
645 if min < max:
593 min = min << 1
646 min = min << 1
594 nmin = 1 << log2(blen)
647 nmin = 1 << log2(blen)
595 if nmin > min:
648 if nmin > min:
596 min = nmin
649 min = nmin
597 if min > max:
650 if min > max:
598 min = max
651 min = max
599 yield ''.join(buf)
652 yield ''.join(buf)
600 blen = 0
653 blen = 0
601 buf = []
654 buf = []
602 if buf:
655 if buf:
603 yield ''.join(buf)
656 yield ''.join(buf)
604
657
605 Abort = error.Abort
658 Abort = error.Abort
606
659
607 def always(fn):
660 def always(fn):
608 return True
661 return True
609
662
610 def never(fn):
663 def never(fn):
611 return False
664 return False
612
665
613 def nogc(func):
666 def nogc(func):
614 """disable garbage collector
667 """disable garbage collector
615
668
616 Python's garbage collector triggers a GC each time a certain number of
669 Python's garbage collector triggers a GC each time a certain number of
617 container objects (the number being defined by gc.get_threshold()) are
670 container objects (the number being defined by gc.get_threshold()) are
618 allocated even when marked not to be tracked by the collector. Tracking has
671 allocated even when marked not to be tracked by the collector. Tracking has
619 no effect on when GCs are triggered, only on what objects the GC looks
672 no effect on when GCs are triggered, only on what objects the GC looks
620 into. As a workaround, disable GC while building complex (huge)
673 into. As a workaround, disable GC while building complex (huge)
621 containers.
674 containers.
622
675
623 This garbage collector issue have been fixed in 2.7.
676 This garbage collector issue have been fixed in 2.7.
624 """
677 """
625 def wrapper(*args, **kwargs):
678 def wrapper(*args, **kwargs):
626 gcenabled = gc.isenabled()
679 gcenabled = gc.isenabled()
627 gc.disable()
680 gc.disable()
628 try:
681 try:
629 return func(*args, **kwargs)
682 return func(*args, **kwargs)
630 finally:
683 finally:
631 if gcenabled:
684 if gcenabled:
632 gc.enable()
685 gc.enable()
633 return wrapper
686 return wrapper
634
687
635 def pathto(root, n1, n2):
688 def pathto(root, n1, n2):
636 '''return the relative path from one place to another.
689 '''return the relative path from one place to another.
637 root should use os.sep to separate directories
690 root should use os.sep to separate directories
638 n1 should use os.sep to separate directories
691 n1 should use os.sep to separate directories
639 n2 should use "/" to separate directories
692 n2 should use "/" to separate directories
640 returns an os.sep-separated path.
693 returns an os.sep-separated path.
641
694
642 If n1 is a relative path, it's assumed it's
695 If n1 is a relative path, it's assumed it's
643 relative to root.
696 relative to root.
644 n2 should always be relative to root.
697 n2 should always be relative to root.
645 '''
698 '''
646 if not n1:
699 if not n1:
647 return localpath(n2)
700 return localpath(n2)
648 if os.path.isabs(n1):
701 if os.path.isabs(n1):
649 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
702 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
650 return os.path.join(root, localpath(n2))
703 return os.path.join(root, localpath(n2))
651 n2 = '/'.join((pconvert(root), n2))
704 n2 = '/'.join((pconvert(root), n2))
652 a, b = splitpath(n1), n2.split('/')
705 a, b = splitpath(n1), n2.split('/')
653 a.reverse()
706 a.reverse()
654 b.reverse()
707 b.reverse()
655 while a and b and a[-1] == b[-1]:
708 while a and b and a[-1] == b[-1]:
656 a.pop()
709 a.pop()
657 b.pop()
710 b.pop()
658 b.reverse()
711 b.reverse()
659 return os.sep.join((['..'] * len(a)) + b) or '.'
712 return os.sep.join((['..'] * len(a)) + b) or '.'
660
713
661 def mainfrozen():
714 def mainfrozen():
662 """return True if we are a frozen executable.
715 """return True if we are a frozen executable.
663
716
664 The code supports py2exe (most common, Windows only) and tools/freeze
717 The code supports py2exe (most common, Windows only) and tools/freeze
665 (portable, not much used).
718 (portable, not much used).
666 """
719 """
667 return (safehasattr(sys, "frozen") or # new py2exe
720 return (safehasattr(sys, "frozen") or # new py2exe
668 safehasattr(sys, "importers") or # old py2exe
721 safehasattr(sys, "importers") or # old py2exe
669 imp.is_frozen("__main__")) # tools/freeze
722 imp.is_frozen("__main__")) # tools/freeze
670
723
671 # the location of data files matching the source code
724 # the location of data files matching the source code
672 if mainfrozen():
725 if mainfrozen():
673 # executable version (py2exe) doesn't support __file__
726 # executable version (py2exe) doesn't support __file__
674 datapath = os.path.dirname(sys.executable)
727 datapath = os.path.dirname(sys.executable)
675 else:
728 else:
676 datapath = os.path.dirname(__file__)
729 datapath = os.path.dirname(__file__)
677
730
678 i18n.setdatapath(datapath)
731 i18n.setdatapath(datapath)
679
732
680 _hgexecutable = None
733 _hgexecutable = None
681
734
682 def hgexecutable():
735 def hgexecutable():
683 """return location of the 'hg' executable.
736 """return location of the 'hg' executable.
684
737
685 Defaults to $HG or 'hg' in the search path.
738 Defaults to $HG or 'hg' in the search path.
686 """
739 """
687 if _hgexecutable is None:
740 if _hgexecutable is None:
688 hg = os.environ.get('HG')
741 hg = os.environ.get('HG')
689 mainmod = sys.modules['__main__']
742 mainmod = sys.modules['__main__']
690 if hg:
743 if hg:
691 _sethgexecutable(hg)
744 _sethgexecutable(hg)
692 elif mainfrozen():
745 elif mainfrozen():
693 _sethgexecutable(sys.executable)
746 _sethgexecutable(sys.executable)
694 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
747 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
695 _sethgexecutable(mainmod.__file__)
748 _sethgexecutable(mainmod.__file__)
696 else:
749 else:
697 exe = findexe('hg') or os.path.basename(sys.argv[0])
750 exe = findexe('hg') or os.path.basename(sys.argv[0])
698 _sethgexecutable(exe)
751 _sethgexecutable(exe)
699 return _hgexecutable
752 return _hgexecutable
700
753
701 def _sethgexecutable(path):
754 def _sethgexecutable(path):
702 """set location of the 'hg' executable"""
755 """set location of the 'hg' executable"""
703 global _hgexecutable
756 global _hgexecutable
704 _hgexecutable = path
757 _hgexecutable = path
705
758
706 def _isstdout(f):
759 def _isstdout(f):
707 fileno = getattr(f, 'fileno', None)
760 fileno = getattr(f, 'fileno', None)
708 return fileno and fileno() == sys.__stdout__.fileno()
761 return fileno and fileno() == sys.__stdout__.fileno()
709
762
710 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
763 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
711 '''enhanced shell command execution.
764 '''enhanced shell command execution.
712 run with environment maybe modified, maybe in different dir.
765 run with environment maybe modified, maybe in different dir.
713
766
714 if command fails and onerr is None, return status, else raise onerr
767 if command fails and onerr is None, return status, else raise onerr
715 object as exception.
768 object as exception.
716
769
717 if out is specified, it is assumed to be a file-like object that has a
770 if out is specified, it is assumed to be a file-like object that has a
718 write() method. stdout and stderr will be redirected to out.'''
771 write() method. stdout and stderr will be redirected to out.'''
719 if environ is None:
772 if environ is None:
720 environ = {}
773 environ = {}
721 try:
774 try:
722 sys.stdout.flush()
775 sys.stdout.flush()
723 except Exception:
776 except Exception:
724 pass
777 pass
725 def py2shell(val):
778 def py2shell(val):
726 'convert python object into string that is useful to shell'
779 'convert python object into string that is useful to shell'
727 if val is None or val is False:
780 if val is None or val is False:
728 return '0'
781 return '0'
729 if val is True:
782 if val is True:
730 return '1'
783 return '1'
731 return str(val)
784 return str(val)
732 origcmd = cmd
785 origcmd = cmd
733 cmd = quotecommand(cmd)
786 cmd = quotecommand(cmd)
734 if sys.platform == 'plan9' and (sys.version_info[0] == 2
787 if sys.platform == 'plan9' and (sys.version_info[0] == 2
735 and sys.version_info[1] < 7):
788 and sys.version_info[1] < 7):
736 # subprocess kludge to work around issues in half-baked Python
789 # subprocess kludge to work around issues in half-baked Python
737 # ports, notably bichued/python:
790 # ports, notably bichued/python:
738 if not cwd is None:
791 if not cwd is None:
739 os.chdir(cwd)
792 os.chdir(cwd)
740 rc = os.system(cmd)
793 rc = os.system(cmd)
741 else:
794 else:
742 env = dict(os.environ)
795 env = dict(os.environ)
743 env.update((k, py2shell(v)) for k, v in environ.iteritems())
796 env.update((k, py2shell(v)) for k, v in environ.iteritems())
744 env['HG'] = hgexecutable()
797 env['HG'] = hgexecutable()
745 if out is None or _isstdout(out):
798 if out is None or _isstdout(out):
746 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
799 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
747 env=env, cwd=cwd)
800 env=env, cwd=cwd)
748 else:
801 else:
749 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
802 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
750 env=env, cwd=cwd, stdout=subprocess.PIPE,
803 env=env, cwd=cwd, stdout=subprocess.PIPE,
751 stderr=subprocess.STDOUT)
804 stderr=subprocess.STDOUT)
752 while True:
805 while True:
753 line = proc.stdout.readline()
806 line = proc.stdout.readline()
754 if not line:
807 if not line:
755 break
808 break
756 out.write(line)
809 out.write(line)
757 proc.wait()
810 proc.wait()
758 rc = proc.returncode
811 rc = proc.returncode
759 if sys.platform == 'OpenVMS' and rc & 1:
812 if sys.platform == 'OpenVMS' and rc & 1:
760 rc = 0
813 rc = 0
761 if rc and onerr:
814 if rc and onerr:
762 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
815 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
763 explainexit(rc)[0])
816 explainexit(rc)[0])
764 if errprefix:
817 if errprefix:
765 errmsg = '%s: %s' % (errprefix, errmsg)
818 errmsg = '%s: %s' % (errprefix, errmsg)
766 raise onerr(errmsg)
819 raise onerr(errmsg)
767 return rc
820 return rc
768
821
769 def checksignature(func):
822 def checksignature(func):
770 '''wrap a function with code to check for calling errors'''
823 '''wrap a function with code to check for calling errors'''
771 def check(*args, **kwargs):
824 def check(*args, **kwargs):
772 try:
825 try:
773 return func(*args, **kwargs)
826 return func(*args, **kwargs)
774 except TypeError:
827 except TypeError:
775 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
828 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
776 raise error.SignatureError
829 raise error.SignatureError
777 raise
830 raise
778
831
779 return check
832 return check
780
833
781 def copyfile(src, dest, hardlink=False):
834 def copyfile(src, dest, hardlink=False):
782 "copy a file, preserving mode and atime/mtime"
835 "copy a file, preserving mode and atime/mtime"
783 if os.path.lexists(dest):
836 if os.path.lexists(dest):
784 unlink(dest)
837 unlink(dest)
785 # hardlinks are problematic on CIFS, quietly ignore this flag
838 # hardlinks are problematic on CIFS, quietly ignore this flag
786 # until we find a way to work around it cleanly (issue4546)
839 # until we find a way to work around it cleanly (issue4546)
787 if False and hardlink:
840 if False and hardlink:
788 try:
841 try:
789 oslink(src, dest)
842 oslink(src, dest)
790 return
843 return
791 except (IOError, OSError):
844 except (IOError, OSError):
792 pass # fall back to normal copy
845 pass # fall back to normal copy
793 if os.path.islink(src):
846 if os.path.islink(src):
794 os.symlink(os.readlink(src), dest)
847 os.symlink(os.readlink(src), dest)
795 else:
848 else:
796 try:
849 try:
797 shutil.copyfile(src, dest)
850 shutil.copyfile(src, dest)
798 shutil.copymode(src, dest)
851 shutil.copymode(src, dest)
799 except shutil.Error as inst:
852 except shutil.Error as inst:
800 raise Abort(str(inst))
853 raise Abort(str(inst))
801
854
802 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
855 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
803 """Copy a directory tree using hardlinks if possible."""
856 """Copy a directory tree using hardlinks if possible."""
804 num = 0
857 num = 0
805
858
806 if hardlink is None:
859 if hardlink is None:
807 hardlink = (os.stat(src).st_dev ==
860 hardlink = (os.stat(src).st_dev ==
808 os.stat(os.path.dirname(dst)).st_dev)
861 os.stat(os.path.dirname(dst)).st_dev)
809 if hardlink:
862 if hardlink:
810 topic = _('linking')
863 topic = _('linking')
811 else:
864 else:
812 topic = _('copying')
865 topic = _('copying')
813
866
814 if os.path.isdir(src):
867 if os.path.isdir(src):
815 os.mkdir(dst)
868 os.mkdir(dst)
816 for name, kind in osutil.listdir(src):
869 for name, kind in osutil.listdir(src):
817 srcname = os.path.join(src, name)
870 srcname = os.path.join(src, name)
818 dstname = os.path.join(dst, name)
871 dstname = os.path.join(dst, name)
819 def nprog(t, pos):
872 def nprog(t, pos):
820 if pos is not None:
873 if pos is not None:
821 return progress(t, pos + num)
874 return progress(t, pos + num)
822 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
875 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
823 num += n
876 num += n
824 else:
877 else:
825 if hardlink:
878 if hardlink:
826 try:
879 try:
827 oslink(src, dst)
880 oslink(src, dst)
828 except (IOError, OSError):
881 except (IOError, OSError):
829 hardlink = False
882 hardlink = False
830 shutil.copy(src, dst)
883 shutil.copy(src, dst)
831 else:
884 else:
832 shutil.copy(src, dst)
885 shutil.copy(src, dst)
833 num += 1
886 num += 1
834 progress(topic, num)
887 progress(topic, num)
835 progress(topic, None)
888 progress(topic, None)
836
889
837 return hardlink, num
890 return hardlink, num
838
891
839 _winreservednames = '''con prn aux nul
892 _winreservednames = '''con prn aux nul
840 com1 com2 com3 com4 com5 com6 com7 com8 com9
893 com1 com2 com3 com4 com5 com6 com7 com8 com9
841 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
894 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
842 _winreservedchars = ':*?"<>|'
895 _winreservedchars = ':*?"<>|'
843 def checkwinfilename(path):
896 def checkwinfilename(path):
844 r'''Check that the base-relative path is a valid filename on Windows.
897 r'''Check that the base-relative path is a valid filename on Windows.
845 Returns None if the path is ok, or a UI string describing the problem.
898 Returns None if the path is ok, or a UI string describing the problem.
846
899
847 >>> checkwinfilename("just/a/normal/path")
900 >>> checkwinfilename("just/a/normal/path")
848 >>> checkwinfilename("foo/bar/con.xml")
901 >>> checkwinfilename("foo/bar/con.xml")
849 "filename contains 'con', which is reserved on Windows"
902 "filename contains 'con', which is reserved on Windows"
850 >>> checkwinfilename("foo/con.xml/bar")
903 >>> checkwinfilename("foo/con.xml/bar")
851 "filename contains 'con', which is reserved on Windows"
904 "filename contains 'con', which is reserved on Windows"
852 >>> checkwinfilename("foo/bar/xml.con")
905 >>> checkwinfilename("foo/bar/xml.con")
853 >>> checkwinfilename("foo/bar/AUX/bla.txt")
906 >>> checkwinfilename("foo/bar/AUX/bla.txt")
854 "filename contains 'AUX', which is reserved on Windows"
907 "filename contains 'AUX', which is reserved on Windows"
855 >>> checkwinfilename("foo/bar/bla:.txt")
908 >>> checkwinfilename("foo/bar/bla:.txt")
856 "filename contains ':', which is reserved on Windows"
909 "filename contains ':', which is reserved on Windows"
857 >>> checkwinfilename("foo/bar/b\07la.txt")
910 >>> checkwinfilename("foo/bar/b\07la.txt")
858 "filename contains '\\x07', which is invalid on Windows"
911 "filename contains '\\x07', which is invalid on Windows"
859 >>> checkwinfilename("foo/bar/bla ")
912 >>> checkwinfilename("foo/bar/bla ")
860 "filename ends with ' ', which is not allowed on Windows"
913 "filename ends with ' ', which is not allowed on Windows"
861 >>> checkwinfilename("../bar")
914 >>> checkwinfilename("../bar")
862 >>> checkwinfilename("foo\\")
915 >>> checkwinfilename("foo\\")
863 "filename ends with '\\', which is invalid on Windows"
916 "filename ends with '\\', which is invalid on Windows"
864 >>> checkwinfilename("foo\\/bar")
917 >>> checkwinfilename("foo\\/bar")
865 "directory name ends with '\\', which is invalid on Windows"
918 "directory name ends with '\\', which is invalid on Windows"
866 '''
919 '''
867 if path.endswith('\\'):
920 if path.endswith('\\'):
868 return _("filename ends with '\\', which is invalid on Windows")
921 return _("filename ends with '\\', which is invalid on Windows")
869 if '\\/' in path:
922 if '\\/' in path:
870 return _("directory name ends with '\\', which is invalid on Windows")
923 return _("directory name ends with '\\', which is invalid on Windows")
871 for n in path.replace('\\', '/').split('/'):
924 for n in path.replace('\\', '/').split('/'):
872 if not n:
925 if not n:
873 continue
926 continue
874 for c in n:
927 for c in n:
875 if c in _winreservedchars:
928 if c in _winreservedchars:
876 return _("filename contains '%s', which is reserved "
929 return _("filename contains '%s', which is reserved "
877 "on Windows") % c
930 "on Windows") % c
878 if ord(c) <= 31:
931 if ord(c) <= 31:
879 return _("filename contains %r, which is invalid "
932 return _("filename contains %r, which is invalid "
880 "on Windows") % c
933 "on Windows") % c
881 base = n.split('.')[0]
934 base = n.split('.')[0]
882 if base and base.lower() in _winreservednames:
935 if base and base.lower() in _winreservednames:
883 return _("filename contains '%s', which is reserved "
936 return _("filename contains '%s', which is reserved "
884 "on Windows") % base
937 "on Windows") % base
885 t = n[-1]
938 t = n[-1]
886 if t in '. ' and n not in '..':
939 if t in '. ' and n not in '..':
887 return _("filename ends with '%s', which is not allowed "
940 return _("filename ends with '%s', which is not allowed "
888 "on Windows") % t
941 "on Windows") % t
889
942
890 if os.name == 'nt':
943 if os.name == 'nt':
891 checkosfilename = checkwinfilename
944 checkosfilename = checkwinfilename
892 else:
945 else:
893 checkosfilename = platform.checkosfilename
946 checkosfilename = platform.checkosfilename
894
947
895 def makelock(info, pathname):
948 def makelock(info, pathname):
896 try:
949 try:
897 return os.symlink(info, pathname)
950 return os.symlink(info, pathname)
898 except OSError as why:
951 except OSError as why:
899 if why.errno == errno.EEXIST:
952 if why.errno == errno.EEXIST:
900 raise
953 raise
901 except AttributeError: # no symlink in os
954 except AttributeError: # no symlink in os
902 pass
955 pass
903
956
904 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
957 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
905 os.write(ld, info)
958 os.write(ld, info)
906 os.close(ld)
959 os.close(ld)
907
960
908 def readlock(pathname):
961 def readlock(pathname):
909 try:
962 try:
910 return os.readlink(pathname)
963 return os.readlink(pathname)
911 except OSError as why:
964 except OSError as why:
912 if why.errno not in (errno.EINVAL, errno.ENOSYS):
965 if why.errno not in (errno.EINVAL, errno.ENOSYS):
913 raise
966 raise
914 except AttributeError: # no symlink in os
967 except AttributeError: # no symlink in os
915 pass
968 pass
916 fp = posixfile(pathname)
969 fp = posixfile(pathname)
917 r = fp.read()
970 r = fp.read()
918 fp.close()
971 fp.close()
919 return r
972 return r
920
973
921 def fstat(fp):
974 def fstat(fp):
922 '''stat file object that may not have fileno method.'''
975 '''stat file object that may not have fileno method.'''
923 try:
976 try:
924 return os.fstat(fp.fileno())
977 return os.fstat(fp.fileno())
925 except AttributeError:
978 except AttributeError:
926 return os.stat(fp.name)
979 return os.stat(fp.name)
927
980
928 # File system features
981 # File system features
929
982
930 def checkcase(path):
983 def checkcase(path):
931 """
984 """
932 Return true if the given path is on a case-sensitive filesystem
985 Return true if the given path is on a case-sensitive filesystem
933
986
934 Requires a path (like /foo/.hg) ending with a foldable final
987 Requires a path (like /foo/.hg) ending with a foldable final
935 directory component.
988 directory component.
936 """
989 """
937 s1 = os.lstat(path)
990 s1 = os.lstat(path)
938 d, b = os.path.split(path)
991 d, b = os.path.split(path)
939 b2 = b.upper()
992 b2 = b.upper()
940 if b == b2:
993 if b == b2:
941 b2 = b.lower()
994 b2 = b.lower()
942 if b == b2:
995 if b == b2:
943 return True # no evidence against case sensitivity
996 return True # no evidence against case sensitivity
944 p2 = os.path.join(d, b2)
997 p2 = os.path.join(d, b2)
945 try:
998 try:
946 s2 = os.lstat(p2)
999 s2 = os.lstat(p2)
947 if s2 == s1:
1000 if s2 == s1:
948 return False
1001 return False
949 return True
1002 return True
950 except OSError:
1003 except OSError:
951 return True
1004 return True
952
1005
953 try:
1006 try:
954 import re2
1007 import re2
955 _re2 = None
1008 _re2 = None
956 except ImportError:
1009 except ImportError:
957 _re2 = False
1010 _re2 = False
958
1011
959 class _re(object):
1012 class _re(object):
960 def _checkre2(self):
1013 def _checkre2(self):
961 global _re2
1014 global _re2
962 try:
1015 try:
963 # check if match works, see issue3964
1016 # check if match works, see issue3964
964 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1017 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
965 except ImportError:
1018 except ImportError:
966 _re2 = False
1019 _re2 = False
967
1020
968 def compile(self, pat, flags=0):
1021 def compile(self, pat, flags=0):
969 '''Compile a regular expression, using re2 if possible
1022 '''Compile a regular expression, using re2 if possible
970
1023
971 For best performance, use only re2-compatible regexp features. The
1024 For best performance, use only re2-compatible regexp features. The
972 only flags from the re module that are re2-compatible are
1025 only flags from the re module that are re2-compatible are
973 IGNORECASE and MULTILINE.'''
1026 IGNORECASE and MULTILINE.'''
974 if _re2 is None:
1027 if _re2 is None:
975 self._checkre2()
1028 self._checkre2()
976 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1029 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
977 if flags & remod.IGNORECASE:
1030 if flags & remod.IGNORECASE:
978 pat = '(?i)' + pat
1031 pat = '(?i)' + pat
979 if flags & remod.MULTILINE:
1032 if flags & remod.MULTILINE:
980 pat = '(?m)' + pat
1033 pat = '(?m)' + pat
981 try:
1034 try:
982 return re2.compile(pat)
1035 return re2.compile(pat)
983 except re2.error:
1036 except re2.error:
984 pass
1037 pass
985 return remod.compile(pat, flags)
1038 return remod.compile(pat, flags)
986
1039
987 @propertycache
1040 @propertycache
988 def escape(self):
1041 def escape(self):
989 '''Return the version of escape corresponding to self.compile.
1042 '''Return the version of escape corresponding to self.compile.
990
1043
991 This is imperfect because whether re2 or re is used for a particular
1044 This is imperfect because whether re2 or re is used for a particular
992 function depends on the flags, etc, but it's the best we can do.
1045 function depends on the flags, etc, but it's the best we can do.
993 '''
1046 '''
994 global _re2
1047 global _re2
995 if _re2 is None:
1048 if _re2 is None:
996 self._checkre2()
1049 self._checkre2()
997 if _re2:
1050 if _re2:
998 return re2.escape
1051 return re2.escape
999 else:
1052 else:
1000 return remod.escape
1053 return remod.escape
1001
1054
1002 re = _re()
1055 re = _re()
1003
1056
1004 _fspathcache = {}
1057 _fspathcache = {}
1005 def fspath(name, root):
1058 def fspath(name, root):
1006 '''Get name in the case stored in the filesystem
1059 '''Get name in the case stored in the filesystem
1007
1060
1008 The name should be relative to root, and be normcase-ed for efficiency.
1061 The name should be relative to root, and be normcase-ed for efficiency.
1009
1062
1010 Note that this function is unnecessary, and should not be
1063 Note that this function is unnecessary, and should not be
1011 called, for case-sensitive filesystems (simply because it's expensive).
1064 called, for case-sensitive filesystems (simply because it's expensive).
1012
1065
1013 The root should be normcase-ed, too.
1066 The root should be normcase-ed, too.
1014 '''
1067 '''
1015 def _makefspathcacheentry(dir):
1068 def _makefspathcacheentry(dir):
1016 return dict((normcase(n), n) for n in os.listdir(dir))
1069 return dict((normcase(n), n) for n in os.listdir(dir))
1017
1070
1018 seps = os.sep
1071 seps = os.sep
1019 if os.altsep:
1072 if os.altsep:
1020 seps = seps + os.altsep
1073 seps = seps + os.altsep
1021 # Protect backslashes. This gets silly very quickly.
1074 # Protect backslashes. This gets silly very quickly.
1022 seps.replace('\\','\\\\')
1075 seps.replace('\\','\\\\')
1023 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1076 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1024 dir = os.path.normpath(root)
1077 dir = os.path.normpath(root)
1025 result = []
1078 result = []
1026 for part, sep in pattern.findall(name):
1079 for part, sep in pattern.findall(name):
1027 if sep:
1080 if sep:
1028 result.append(sep)
1081 result.append(sep)
1029 continue
1082 continue
1030
1083
1031 if dir not in _fspathcache:
1084 if dir not in _fspathcache:
1032 _fspathcache[dir] = _makefspathcacheentry(dir)
1085 _fspathcache[dir] = _makefspathcacheentry(dir)
1033 contents = _fspathcache[dir]
1086 contents = _fspathcache[dir]
1034
1087
1035 found = contents.get(part)
1088 found = contents.get(part)
1036 if not found:
1089 if not found:
1037 # retry "once per directory" per "dirstate.walk" which
1090 # retry "once per directory" per "dirstate.walk" which
1038 # may take place for each patches of "hg qpush", for example
1091 # may take place for each patches of "hg qpush", for example
1039 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1092 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1040 found = contents.get(part)
1093 found = contents.get(part)
1041
1094
1042 result.append(found or part)
1095 result.append(found or part)
1043 dir = os.path.join(dir, part)
1096 dir = os.path.join(dir, part)
1044
1097
1045 return ''.join(result)
1098 return ''.join(result)
1046
1099
1047 def checknlink(testfile):
1100 def checknlink(testfile):
1048 '''check whether hardlink count reporting works properly'''
1101 '''check whether hardlink count reporting works properly'''
1049
1102
1050 # testfile may be open, so we need a separate file for checking to
1103 # testfile may be open, so we need a separate file for checking to
1051 # work around issue2543 (or testfile may get lost on Samba shares)
1104 # work around issue2543 (or testfile may get lost on Samba shares)
1052 f1 = testfile + ".hgtmp1"
1105 f1 = testfile + ".hgtmp1"
1053 if os.path.lexists(f1):
1106 if os.path.lexists(f1):
1054 return False
1107 return False
1055 try:
1108 try:
1056 posixfile(f1, 'w').close()
1109 posixfile(f1, 'w').close()
1057 except IOError:
1110 except IOError:
1058 return False
1111 return False
1059
1112
1060 f2 = testfile + ".hgtmp2"
1113 f2 = testfile + ".hgtmp2"
1061 fd = None
1114 fd = None
1062 try:
1115 try:
1063 oslink(f1, f2)
1116 oslink(f1, f2)
1064 # nlinks() may behave differently for files on Windows shares if
1117 # nlinks() may behave differently for files on Windows shares if
1065 # the file is open.
1118 # the file is open.
1066 fd = posixfile(f2)
1119 fd = posixfile(f2)
1067 return nlinks(f2) > 1
1120 return nlinks(f2) > 1
1068 except OSError:
1121 except OSError:
1069 return False
1122 return False
1070 finally:
1123 finally:
1071 if fd is not None:
1124 if fd is not None:
1072 fd.close()
1125 fd.close()
1073 for f in (f1, f2):
1126 for f in (f1, f2):
1074 try:
1127 try:
1075 os.unlink(f)
1128 os.unlink(f)
1076 except OSError:
1129 except OSError:
1077 pass
1130 pass
1078
1131
1079 def endswithsep(path):
1132 def endswithsep(path):
1080 '''Check path ends with os.sep or os.altsep.'''
1133 '''Check path ends with os.sep or os.altsep.'''
1081 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1134 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1082
1135
1083 def splitpath(path):
1136 def splitpath(path):
1084 '''Split path by os.sep.
1137 '''Split path by os.sep.
1085 Note that this function does not use os.altsep because this is
1138 Note that this function does not use os.altsep because this is
1086 an alternative of simple "xxx.split(os.sep)".
1139 an alternative of simple "xxx.split(os.sep)".
1087 It is recommended to use os.path.normpath() before using this
1140 It is recommended to use os.path.normpath() before using this
1088 function if need.'''
1141 function if need.'''
1089 return path.split(os.sep)
1142 return path.split(os.sep)
1090
1143
1091 def gui():
1144 def gui():
1092 '''Are we running in a GUI?'''
1145 '''Are we running in a GUI?'''
1093 if sys.platform == 'darwin':
1146 if sys.platform == 'darwin':
1094 if 'SSH_CONNECTION' in os.environ:
1147 if 'SSH_CONNECTION' in os.environ:
1095 # handle SSH access to a box where the user is logged in
1148 # handle SSH access to a box where the user is logged in
1096 return False
1149 return False
1097 elif getattr(osutil, 'isgui', None):
1150 elif getattr(osutil, 'isgui', None):
1098 # check if a CoreGraphics session is available
1151 # check if a CoreGraphics session is available
1099 return osutil.isgui()
1152 return osutil.isgui()
1100 else:
1153 else:
1101 # pure build; use a safe default
1154 # pure build; use a safe default
1102 return True
1155 return True
1103 else:
1156 else:
1104 return os.name == "nt" or os.environ.get("DISPLAY")
1157 return os.name == "nt" or os.environ.get("DISPLAY")
1105
1158
1106 def mktempcopy(name, emptyok=False, createmode=None):
1159 def mktempcopy(name, emptyok=False, createmode=None):
1107 """Create a temporary file with the same contents from name
1160 """Create a temporary file with the same contents from name
1108
1161
1109 The permission bits are copied from the original file.
1162 The permission bits are copied from the original file.
1110
1163
1111 If the temporary file is going to be truncated immediately, you
1164 If the temporary file is going to be truncated immediately, you
1112 can use emptyok=True as an optimization.
1165 can use emptyok=True as an optimization.
1113
1166
1114 Returns the name of the temporary file.
1167 Returns the name of the temporary file.
1115 """
1168 """
1116 d, fn = os.path.split(name)
1169 d, fn = os.path.split(name)
1117 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1170 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1118 os.close(fd)
1171 os.close(fd)
1119 # Temporary files are created with mode 0600, which is usually not
1172 # Temporary files are created with mode 0600, which is usually not
1120 # what we want. If the original file already exists, just copy
1173 # what we want. If the original file already exists, just copy
1121 # its mode. Otherwise, manually obey umask.
1174 # its mode. Otherwise, manually obey umask.
1122 copymode(name, temp, createmode)
1175 copymode(name, temp, createmode)
1123 if emptyok:
1176 if emptyok:
1124 return temp
1177 return temp
1125 try:
1178 try:
1126 try:
1179 try:
1127 ifp = posixfile(name, "rb")
1180 ifp = posixfile(name, "rb")
1128 except IOError as inst:
1181 except IOError as inst:
1129 if inst.errno == errno.ENOENT:
1182 if inst.errno == errno.ENOENT:
1130 return temp
1183 return temp
1131 if not getattr(inst, 'filename', None):
1184 if not getattr(inst, 'filename', None):
1132 inst.filename = name
1185 inst.filename = name
1133 raise
1186 raise
1134 ofp = posixfile(temp, "wb")
1187 ofp = posixfile(temp, "wb")
1135 for chunk in filechunkiter(ifp):
1188 for chunk in filechunkiter(ifp):
1136 ofp.write(chunk)
1189 ofp.write(chunk)
1137 ifp.close()
1190 ifp.close()
1138 ofp.close()
1191 ofp.close()
1139 except: # re-raises
1192 except: # re-raises
1140 try: os.unlink(temp)
1193 try: os.unlink(temp)
1141 except OSError: pass
1194 except OSError: pass
1142 raise
1195 raise
1143 return temp
1196 return temp
1144
1197
1145 class atomictempfile(object):
1198 class atomictempfile(object):
1146 '''writable file object that atomically updates a file
1199 '''writable file object that atomically updates a file
1147
1200
1148 All writes will go to a temporary copy of the original file. Call
1201 All writes will go to a temporary copy of the original file. Call
1149 close() when you are done writing, and atomictempfile will rename
1202 close() when you are done writing, and atomictempfile will rename
1150 the temporary copy to the original name, making the changes
1203 the temporary copy to the original name, making the changes
1151 visible. If the object is destroyed without being closed, all your
1204 visible. If the object is destroyed without being closed, all your
1152 writes are discarded.
1205 writes are discarded.
1153 '''
1206 '''
1154 def __init__(self, name, mode='w+b', createmode=None):
1207 def __init__(self, name, mode='w+b', createmode=None):
1155 self.__name = name # permanent name
1208 self.__name = name # permanent name
1156 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1209 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1157 createmode=createmode)
1210 createmode=createmode)
1158 self._fp = posixfile(self._tempname, mode)
1211 self._fp = posixfile(self._tempname, mode)
1159
1212
1160 # delegated methods
1213 # delegated methods
1161 self.write = self._fp.write
1214 self.write = self._fp.write
1162 self.seek = self._fp.seek
1215 self.seek = self._fp.seek
1163 self.tell = self._fp.tell
1216 self.tell = self._fp.tell
1164 self.fileno = self._fp.fileno
1217 self.fileno = self._fp.fileno
1165
1218
1166 def close(self):
1219 def close(self):
1167 if not self._fp.closed:
1220 if not self._fp.closed:
1168 self._fp.close()
1221 self._fp.close()
1169 rename(self._tempname, localpath(self.__name))
1222 rename(self._tempname, localpath(self.__name))
1170
1223
1171 def discard(self):
1224 def discard(self):
1172 if not self._fp.closed:
1225 if not self._fp.closed:
1173 try:
1226 try:
1174 os.unlink(self._tempname)
1227 os.unlink(self._tempname)
1175 except OSError:
1228 except OSError:
1176 pass
1229 pass
1177 self._fp.close()
1230 self._fp.close()
1178
1231
1179 def __del__(self):
1232 def __del__(self):
1180 if safehasattr(self, '_fp'): # constructor actually did something
1233 if safehasattr(self, '_fp'): # constructor actually did something
1181 self.discard()
1234 self.discard()
1182
1235
1183 def makedirs(name, mode=None, notindexed=False):
1236 def makedirs(name, mode=None, notindexed=False):
1184 """recursive directory creation with parent mode inheritance"""
1237 """recursive directory creation with parent mode inheritance"""
1185 try:
1238 try:
1186 makedir(name, notindexed)
1239 makedir(name, notindexed)
1187 except OSError as err:
1240 except OSError as err:
1188 if err.errno == errno.EEXIST:
1241 if err.errno == errno.EEXIST:
1189 return
1242 return
1190 if err.errno != errno.ENOENT or not name:
1243 if err.errno != errno.ENOENT or not name:
1191 raise
1244 raise
1192 parent = os.path.dirname(os.path.abspath(name))
1245 parent = os.path.dirname(os.path.abspath(name))
1193 if parent == name:
1246 if parent == name:
1194 raise
1247 raise
1195 makedirs(parent, mode, notindexed)
1248 makedirs(parent, mode, notindexed)
1196 makedir(name, notindexed)
1249 makedir(name, notindexed)
1197 if mode is not None:
1250 if mode is not None:
1198 os.chmod(name, mode)
1251 os.chmod(name, mode)
1199
1252
1200 def ensuredirs(name, mode=None, notindexed=False):
1253 def ensuredirs(name, mode=None, notindexed=False):
1201 """race-safe recursive directory creation
1254 """race-safe recursive directory creation
1202
1255
1203 Newly created directories are marked as "not to be indexed by
1256 Newly created directories are marked as "not to be indexed by
1204 the content indexing service", if ``notindexed`` is specified
1257 the content indexing service", if ``notindexed`` is specified
1205 for "write" mode access.
1258 for "write" mode access.
1206 """
1259 """
1207 if os.path.isdir(name):
1260 if os.path.isdir(name):
1208 return
1261 return
1209 parent = os.path.dirname(os.path.abspath(name))
1262 parent = os.path.dirname(os.path.abspath(name))
1210 if parent != name:
1263 if parent != name:
1211 ensuredirs(parent, mode, notindexed)
1264 ensuredirs(parent, mode, notindexed)
1212 try:
1265 try:
1213 makedir(name, notindexed)
1266 makedir(name, notindexed)
1214 except OSError as err:
1267 except OSError as err:
1215 if err.errno == errno.EEXIST and os.path.isdir(name):
1268 if err.errno == errno.EEXIST and os.path.isdir(name):
1216 # someone else seems to have won a directory creation race
1269 # someone else seems to have won a directory creation race
1217 return
1270 return
1218 raise
1271 raise
1219 if mode is not None:
1272 if mode is not None:
1220 os.chmod(name, mode)
1273 os.chmod(name, mode)
1221
1274
1222 def readfile(path):
1275 def readfile(path):
1223 fp = open(path, 'rb')
1276 fp = open(path, 'rb')
1224 try:
1277 try:
1225 return fp.read()
1278 return fp.read()
1226 finally:
1279 finally:
1227 fp.close()
1280 fp.close()
1228
1281
1229 def writefile(path, text):
1282 def writefile(path, text):
1230 fp = open(path, 'wb')
1283 fp = open(path, 'wb')
1231 try:
1284 try:
1232 fp.write(text)
1285 fp.write(text)
1233 finally:
1286 finally:
1234 fp.close()
1287 fp.close()
1235
1288
1236 def appendfile(path, text):
1289 def appendfile(path, text):
1237 fp = open(path, 'ab')
1290 fp = open(path, 'ab')
1238 try:
1291 try:
1239 fp.write(text)
1292 fp.write(text)
1240 finally:
1293 finally:
1241 fp.close()
1294 fp.close()
1242
1295
1243 class chunkbuffer(object):
1296 class chunkbuffer(object):
1244 """Allow arbitrary sized chunks of data to be efficiently read from an
1297 """Allow arbitrary sized chunks of data to be efficiently read from an
1245 iterator over chunks of arbitrary size."""
1298 iterator over chunks of arbitrary size."""
1246
1299
1247 def __init__(self, in_iter):
1300 def __init__(self, in_iter):
1248 """in_iter is the iterator that's iterating over the input chunks.
1301 """in_iter is the iterator that's iterating over the input chunks.
1249 targetsize is how big a buffer to try to maintain."""
1302 targetsize is how big a buffer to try to maintain."""
1250 def splitbig(chunks):
1303 def splitbig(chunks):
1251 for chunk in chunks:
1304 for chunk in chunks:
1252 if len(chunk) > 2**20:
1305 if len(chunk) > 2**20:
1253 pos = 0
1306 pos = 0
1254 while pos < len(chunk):
1307 while pos < len(chunk):
1255 end = pos + 2 ** 18
1308 end = pos + 2 ** 18
1256 yield chunk[pos:end]
1309 yield chunk[pos:end]
1257 pos = end
1310 pos = end
1258 else:
1311 else:
1259 yield chunk
1312 yield chunk
1260 self.iter = splitbig(in_iter)
1313 self.iter = splitbig(in_iter)
1261 self._queue = collections.deque()
1314 self._queue = collections.deque()
1262 self._chunkoffset = 0
1315 self._chunkoffset = 0
1263
1316
1264 def read(self, l=None):
1317 def read(self, l=None):
1265 """Read L bytes of data from the iterator of chunks of data.
1318 """Read L bytes of data from the iterator of chunks of data.
1266 Returns less than L bytes if the iterator runs dry.
1319 Returns less than L bytes if the iterator runs dry.
1267
1320
1268 If size parameter is omitted, read everything"""
1321 If size parameter is omitted, read everything"""
1269 if l is None:
1322 if l is None:
1270 return ''.join(self.iter)
1323 return ''.join(self.iter)
1271
1324
1272 left = l
1325 left = l
1273 buf = []
1326 buf = []
1274 queue = self._queue
1327 queue = self._queue
1275 while left > 0:
1328 while left > 0:
1276 # refill the queue
1329 # refill the queue
1277 if not queue:
1330 if not queue:
1278 target = 2**18
1331 target = 2**18
1279 for chunk in self.iter:
1332 for chunk in self.iter:
1280 queue.append(chunk)
1333 queue.append(chunk)
1281 target -= len(chunk)
1334 target -= len(chunk)
1282 if target <= 0:
1335 if target <= 0:
1283 break
1336 break
1284 if not queue:
1337 if not queue:
1285 break
1338 break
1286
1339
1287 # The easy way to do this would be to queue.popleft(), modify the
1340 # The easy way to do this would be to queue.popleft(), modify the
1288 # chunk (if necessary), then queue.appendleft(). However, for cases
1341 # chunk (if necessary), then queue.appendleft(). However, for cases
1289 # where we read partial chunk content, this incurs 2 dequeue
1342 # where we read partial chunk content, this incurs 2 dequeue
1290 # mutations and creates a new str for the remaining chunk in the
1343 # mutations and creates a new str for the remaining chunk in the
1291 # queue. Our code below avoids this overhead.
1344 # queue. Our code below avoids this overhead.
1292
1345
1293 chunk = queue[0]
1346 chunk = queue[0]
1294 chunkl = len(chunk)
1347 chunkl = len(chunk)
1295 offset = self._chunkoffset
1348 offset = self._chunkoffset
1296
1349
1297 # Use full chunk.
1350 # Use full chunk.
1298 if offset == 0 and left >= chunkl:
1351 if offset == 0 and left >= chunkl:
1299 left -= chunkl
1352 left -= chunkl
1300 queue.popleft()
1353 queue.popleft()
1301 buf.append(chunk)
1354 buf.append(chunk)
1302 # self._chunkoffset remains at 0.
1355 # self._chunkoffset remains at 0.
1303 continue
1356 continue
1304
1357
1305 chunkremaining = chunkl - offset
1358 chunkremaining = chunkl - offset
1306
1359
1307 # Use all of unconsumed part of chunk.
1360 # Use all of unconsumed part of chunk.
1308 if left >= chunkremaining:
1361 if left >= chunkremaining:
1309 left -= chunkremaining
1362 left -= chunkremaining
1310 queue.popleft()
1363 queue.popleft()
1311 # offset == 0 is enabled by block above, so this won't merely
1364 # offset == 0 is enabled by block above, so this won't merely
1312 # copy via ``chunk[0:]``.
1365 # copy via ``chunk[0:]``.
1313 buf.append(chunk[offset:])
1366 buf.append(chunk[offset:])
1314 self._chunkoffset = 0
1367 self._chunkoffset = 0
1315
1368
1316 # Partial chunk needed.
1369 # Partial chunk needed.
1317 else:
1370 else:
1318 buf.append(chunk[offset:offset + left])
1371 buf.append(chunk[offset:offset + left])
1319 self._chunkoffset += left
1372 self._chunkoffset += left
1320 left -= chunkremaining
1373 left -= chunkremaining
1321
1374
1322 return ''.join(buf)
1375 return ''.join(buf)
1323
1376
1324 def filechunkiter(f, size=65536, limit=None):
1377 def filechunkiter(f, size=65536, limit=None):
1325 """Create a generator that produces the data in the file size
1378 """Create a generator that produces the data in the file size
1326 (default 65536) bytes at a time, up to optional limit (default is
1379 (default 65536) bytes at a time, up to optional limit (default is
1327 to read all data). Chunks may be less than size bytes if the
1380 to read all data). Chunks may be less than size bytes if the
1328 chunk is the last chunk in the file, or the file is a socket or
1381 chunk is the last chunk in the file, or the file is a socket or
1329 some other type of file that sometimes reads less data than is
1382 some other type of file that sometimes reads less data than is
1330 requested."""
1383 requested."""
1331 assert size >= 0
1384 assert size >= 0
1332 assert limit is None or limit >= 0
1385 assert limit is None or limit >= 0
1333 while True:
1386 while True:
1334 if limit is None:
1387 if limit is None:
1335 nbytes = size
1388 nbytes = size
1336 else:
1389 else:
1337 nbytes = min(limit, size)
1390 nbytes = min(limit, size)
1338 s = nbytes and f.read(nbytes)
1391 s = nbytes and f.read(nbytes)
1339 if not s:
1392 if not s:
1340 break
1393 break
1341 if limit:
1394 if limit:
1342 limit -= len(s)
1395 limit -= len(s)
1343 yield s
1396 yield s
1344
1397
1345 def makedate(timestamp=None):
1398 def makedate(timestamp=None):
1346 '''Return a unix timestamp (or the current time) as a (unixtime,
1399 '''Return a unix timestamp (or the current time) as a (unixtime,
1347 offset) tuple based off the local timezone.'''
1400 offset) tuple based off the local timezone.'''
1348 if timestamp is None:
1401 if timestamp is None:
1349 timestamp = time.time()
1402 timestamp = time.time()
1350 if timestamp < 0:
1403 if timestamp < 0:
1351 hint = _("check your clock")
1404 hint = _("check your clock")
1352 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1405 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1353 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1406 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1354 datetime.datetime.fromtimestamp(timestamp))
1407 datetime.datetime.fromtimestamp(timestamp))
1355 tz = delta.days * 86400 + delta.seconds
1408 tz = delta.days * 86400 + delta.seconds
1356 return timestamp, tz
1409 return timestamp, tz
1357
1410
1358 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1411 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1359 """represent a (unixtime, offset) tuple as a localized time.
1412 """represent a (unixtime, offset) tuple as a localized time.
1360 unixtime is seconds since the epoch, and offset is the time zone's
1413 unixtime is seconds since the epoch, and offset is the time zone's
1361 number of seconds away from UTC. if timezone is false, do not
1414 number of seconds away from UTC. if timezone is false, do not
1362 append time zone to string."""
1415 append time zone to string."""
1363 t, tz = date or makedate()
1416 t, tz = date or makedate()
1364 if t < 0:
1417 if t < 0:
1365 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1418 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1366 tz = 0
1419 tz = 0
1367 if "%1" in format or "%2" in format or "%z" in format:
1420 if "%1" in format or "%2" in format or "%z" in format:
1368 sign = (tz > 0) and "-" or "+"
1421 sign = (tz > 0) and "-" or "+"
1369 minutes = abs(tz) // 60
1422 minutes = abs(tz) // 60
1370 q, r = divmod(minutes, 60)
1423 q, r = divmod(minutes, 60)
1371 format = format.replace("%z", "%1%2")
1424 format = format.replace("%z", "%1%2")
1372 format = format.replace("%1", "%c%02d" % (sign, q))
1425 format = format.replace("%1", "%c%02d" % (sign, q))
1373 format = format.replace("%2", "%02d" % r)
1426 format = format.replace("%2", "%02d" % r)
1374 try:
1427 try:
1375 t = time.gmtime(float(t) - tz)
1428 t = time.gmtime(float(t) - tz)
1376 except ValueError:
1429 except ValueError:
1377 # time was out of range
1430 # time was out of range
1378 t = time.gmtime(sys.maxint)
1431 t = time.gmtime(sys.maxint)
1379 s = time.strftime(format, t)
1432 s = time.strftime(format, t)
1380 return s
1433 return s
1381
1434
1382 def shortdate(date=None):
1435 def shortdate(date=None):
1383 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1436 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1384 return datestr(date, format='%Y-%m-%d')
1437 return datestr(date, format='%Y-%m-%d')
1385
1438
1386 def parsetimezone(tz):
1439 def parsetimezone(tz):
1387 """parse a timezone string and return an offset integer"""
1440 """parse a timezone string and return an offset integer"""
1388 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1441 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1389 sign = (tz[0] == "+") and 1 or -1
1442 sign = (tz[0] == "+") and 1 or -1
1390 hours = int(tz[1:3])
1443 hours = int(tz[1:3])
1391 minutes = int(tz[3:5])
1444 minutes = int(tz[3:5])
1392 return -sign * (hours * 60 + minutes) * 60
1445 return -sign * (hours * 60 + minutes) * 60
1393 if tz == "GMT" or tz == "UTC":
1446 if tz == "GMT" or tz == "UTC":
1394 return 0
1447 return 0
1395 return None
1448 return None
1396
1449
1397 def strdate(string, format, defaults=[]):
1450 def strdate(string, format, defaults=[]):
1398 """parse a localized time string and return a (unixtime, offset) tuple.
1451 """parse a localized time string and return a (unixtime, offset) tuple.
1399 if the string cannot be parsed, ValueError is raised."""
1452 if the string cannot be parsed, ValueError is raised."""
1400 # NOTE: unixtime = localunixtime + offset
1453 # NOTE: unixtime = localunixtime + offset
1401 offset, date = parsetimezone(string.split()[-1]), string
1454 offset, date = parsetimezone(string.split()[-1]), string
1402 if offset is not None:
1455 if offset is not None:
1403 date = " ".join(string.split()[:-1])
1456 date = " ".join(string.split()[:-1])
1404
1457
1405 # add missing elements from defaults
1458 # add missing elements from defaults
1406 usenow = False # default to using biased defaults
1459 usenow = False # default to using biased defaults
1407 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1460 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1408 found = [True for p in part if ("%"+p) in format]
1461 found = [True for p in part if ("%"+p) in format]
1409 if not found:
1462 if not found:
1410 date += "@" + defaults[part][usenow]
1463 date += "@" + defaults[part][usenow]
1411 format += "@%" + part[0]
1464 format += "@%" + part[0]
1412 else:
1465 else:
1413 # We've found a specific time element, less specific time
1466 # We've found a specific time element, less specific time
1414 # elements are relative to today
1467 # elements are relative to today
1415 usenow = True
1468 usenow = True
1416
1469
1417 timetuple = time.strptime(date, format)
1470 timetuple = time.strptime(date, format)
1418 localunixtime = int(calendar.timegm(timetuple))
1471 localunixtime = int(calendar.timegm(timetuple))
1419 if offset is None:
1472 if offset is None:
1420 # local timezone
1473 # local timezone
1421 unixtime = int(time.mktime(timetuple))
1474 unixtime = int(time.mktime(timetuple))
1422 offset = unixtime - localunixtime
1475 offset = unixtime - localunixtime
1423 else:
1476 else:
1424 unixtime = localunixtime + offset
1477 unixtime = localunixtime + offset
1425 return unixtime, offset
1478 return unixtime, offset
1426
1479
1427 def parsedate(date, formats=None, bias=None):
1480 def parsedate(date, formats=None, bias=None):
1428 """parse a localized date/time and return a (unixtime, offset) tuple.
1481 """parse a localized date/time and return a (unixtime, offset) tuple.
1429
1482
1430 The date may be a "unixtime offset" string or in one of the specified
1483 The date may be a "unixtime offset" string or in one of the specified
1431 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1484 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1432
1485
1433 >>> parsedate(' today ') == parsedate(\
1486 >>> parsedate(' today ') == parsedate(\
1434 datetime.date.today().strftime('%b %d'))
1487 datetime.date.today().strftime('%b %d'))
1435 True
1488 True
1436 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1489 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1437 datetime.timedelta(days=1)\
1490 datetime.timedelta(days=1)\
1438 ).strftime('%b %d'))
1491 ).strftime('%b %d'))
1439 True
1492 True
1440 >>> now, tz = makedate()
1493 >>> now, tz = makedate()
1441 >>> strnow, strtz = parsedate('now')
1494 >>> strnow, strtz = parsedate('now')
1442 >>> (strnow - now) < 1
1495 >>> (strnow - now) < 1
1443 True
1496 True
1444 >>> tz == strtz
1497 >>> tz == strtz
1445 True
1498 True
1446 """
1499 """
1447 if bias is None:
1500 if bias is None:
1448 bias = {}
1501 bias = {}
1449 if not date:
1502 if not date:
1450 return 0, 0
1503 return 0, 0
1451 if isinstance(date, tuple) and len(date) == 2:
1504 if isinstance(date, tuple) and len(date) == 2:
1452 return date
1505 return date
1453 if not formats:
1506 if not formats:
1454 formats = defaultdateformats
1507 formats = defaultdateformats
1455 date = date.strip()
1508 date = date.strip()
1456
1509
1457 if date == 'now' or date == _('now'):
1510 if date == 'now' or date == _('now'):
1458 return makedate()
1511 return makedate()
1459 if date == 'today' or date == _('today'):
1512 if date == 'today' or date == _('today'):
1460 date = datetime.date.today().strftime('%b %d')
1513 date = datetime.date.today().strftime('%b %d')
1461 elif date == 'yesterday' or date == _('yesterday'):
1514 elif date == 'yesterday' or date == _('yesterday'):
1462 date = (datetime.date.today() -
1515 date = (datetime.date.today() -
1463 datetime.timedelta(days=1)).strftime('%b %d')
1516 datetime.timedelta(days=1)).strftime('%b %d')
1464
1517
1465 try:
1518 try:
1466 when, offset = map(int, date.split(' '))
1519 when, offset = map(int, date.split(' '))
1467 except ValueError:
1520 except ValueError:
1468 # fill out defaults
1521 # fill out defaults
1469 now = makedate()
1522 now = makedate()
1470 defaults = {}
1523 defaults = {}
1471 for part in ("d", "mb", "yY", "HI", "M", "S"):
1524 for part in ("d", "mb", "yY", "HI", "M", "S"):
1472 # this piece is for rounding the specific end of unknowns
1525 # this piece is for rounding the specific end of unknowns
1473 b = bias.get(part)
1526 b = bias.get(part)
1474 if b is None:
1527 if b is None:
1475 if part[0] in "HMS":
1528 if part[0] in "HMS":
1476 b = "00"
1529 b = "00"
1477 else:
1530 else:
1478 b = "0"
1531 b = "0"
1479
1532
1480 # this piece is for matching the generic end to today's date
1533 # this piece is for matching the generic end to today's date
1481 n = datestr(now, "%" + part[0])
1534 n = datestr(now, "%" + part[0])
1482
1535
1483 defaults[part] = (b, n)
1536 defaults[part] = (b, n)
1484
1537
1485 for format in formats:
1538 for format in formats:
1486 try:
1539 try:
1487 when, offset = strdate(date, format, defaults)
1540 when, offset = strdate(date, format, defaults)
1488 except (ValueError, OverflowError):
1541 except (ValueError, OverflowError):
1489 pass
1542 pass
1490 else:
1543 else:
1491 break
1544 break
1492 else:
1545 else:
1493 raise Abort(_('invalid date: %r') % date)
1546 raise Abort(_('invalid date: %r') % date)
1494 # validate explicit (probably user-specified) date and
1547 # validate explicit (probably user-specified) date and
1495 # time zone offset. values must fit in signed 32 bits for
1548 # time zone offset. values must fit in signed 32 bits for
1496 # current 32-bit linux runtimes. timezones go from UTC-12
1549 # current 32-bit linux runtimes. timezones go from UTC-12
1497 # to UTC+14
1550 # to UTC+14
1498 if abs(when) > 0x7fffffff:
1551 if abs(when) > 0x7fffffff:
1499 raise Abort(_('date exceeds 32 bits: %d') % when)
1552 raise Abort(_('date exceeds 32 bits: %d') % when)
1500 if when < 0:
1553 if when < 0:
1501 raise Abort(_('negative date value: %d') % when)
1554 raise Abort(_('negative date value: %d') % when)
1502 if offset < -50400 or offset > 43200:
1555 if offset < -50400 or offset > 43200:
1503 raise Abort(_('impossible time zone offset: %d') % offset)
1556 raise Abort(_('impossible time zone offset: %d') % offset)
1504 return when, offset
1557 return when, offset
1505
1558
1506 def matchdate(date):
1559 def matchdate(date):
1507 """Return a function that matches a given date match specifier
1560 """Return a function that matches a given date match specifier
1508
1561
1509 Formats include:
1562 Formats include:
1510
1563
1511 '{date}' match a given date to the accuracy provided
1564 '{date}' match a given date to the accuracy provided
1512
1565
1513 '<{date}' on or before a given date
1566 '<{date}' on or before a given date
1514
1567
1515 '>{date}' on or after a given date
1568 '>{date}' on or after a given date
1516
1569
1517 >>> p1 = parsedate("10:29:59")
1570 >>> p1 = parsedate("10:29:59")
1518 >>> p2 = parsedate("10:30:00")
1571 >>> p2 = parsedate("10:30:00")
1519 >>> p3 = parsedate("10:30:59")
1572 >>> p3 = parsedate("10:30:59")
1520 >>> p4 = parsedate("10:31:00")
1573 >>> p4 = parsedate("10:31:00")
1521 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1574 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1522 >>> f = matchdate("10:30")
1575 >>> f = matchdate("10:30")
1523 >>> f(p1[0])
1576 >>> f(p1[0])
1524 False
1577 False
1525 >>> f(p2[0])
1578 >>> f(p2[0])
1526 True
1579 True
1527 >>> f(p3[0])
1580 >>> f(p3[0])
1528 True
1581 True
1529 >>> f(p4[0])
1582 >>> f(p4[0])
1530 False
1583 False
1531 >>> f(p5[0])
1584 >>> f(p5[0])
1532 False
1585 False
1533 """
1586 """
1534
1587
1535 def lower(date):
1588 def lower(date):
1536 d = {'mb': "1", 'd': "1"}
1589 d = {'mb': "1", 'd': "1"}
1537 return parsedate(date, extendeddateformats, d)[0]
1590 return parsedate(date, extendeddateformats, d)[0]
1538
1591
1539 def upper(date):
1592 def upper(date):
1540 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1593 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1541 for days in ("31", "30", "29"):
1594 for days in ("31", "30", "29"):
1542 try:
1595 try:
1543 d["d"] = days
1596 d["d"] = days
1544 return parsedate(date, extendeddateformats, d)[0]
1597 return parsedate(date, extendeddateformats, d)[0]
1545 except Abort:
1598 except Abort:
1546 pass
1599 pass
1547 d["d"] = "28"
1600 d["d"] = "28"
1548 return parsedate(date, extendeddateformats, d)[0]
1601 return parsedate(date, extendeddateformats, d)[0]
1549
1602
1550 date = date.strip()
1603 date = date.strip()
1551
1604
1552 if not date:
1605 if not date:
1553 raise Abort(_("dates cannot consist entirely of whitespace"))
1606 raise Abort(_("dates cannot consist entirely of whitespace"))
1554 elif date[0] == "<":
1607 elif date[0] == "<":
1555 if not date[1:]:
1608 if not date[1:]:
1556 raise Abort(_("invalid day spec, use '<DATE'"))
1609 raise Abort(_("invalid day spec, use '<DATE'"))
1557 when = upper(date[1:])
1610 when = upper(date[1:])
1558 return lambda x: x <= when
1611 return lambda x: x <= when
1559 elif date[0] == ">":
1612 elif date[0] == ">":
1560 if not date[1:]:
1613 if not date[1:]:
1561 raise Abort(_("invalid day spec, use '>DATE'"))
1614 raise Abort(_("invalid day spec, use '>DATE'"))
1562 when = lower(date[1:])
1615 when = lower(date[1:])
1563 return lambda x: x >= when
1616 return lambda x: x >= when
1564 elif date[0] == "-":
1617 elif date[0] == "-":
1565 try:
1618 try:
1566 days = int(date[1:])
1619 days = int(date[1:])
1567 except ValueError:
1620 except ValueError:
1568 raise Abort(_("invalid day spec: %s") % date[1:])
1621 raise Abort(_("invalid day spec: %s") % date[1:])
1569 if days < 0:
1622 if days < 0:
1570 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1623 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1571 % date[1:])
1624 % date[1:])
1572 when = makedate()[0] - days * 3600 * 24
1625 when = makedate()[0] - days * 3600 * 24
1573 return lambda x: x >= when
1626 return lambda x: x >= when
1574 elif " to " in date:
1627 elif " to " in date:
1575 a, b = date.split(" to ")
1628 a, b = date.split(" to ")
1576 start, stop = lower(a), upper(b)
1629 start, stop = lower(a), upper(b)
1577 return lambda x: x >= start and x <= stop
1630 return lambda x: x >= start and x <= stop
1578 else:
1631 else:
1579 start, stop = lower(date), upper(date)
1632 start, stop = lower(date), upper(date)
1580 return lambda x: x >= start and x <= stop
1633 return lambda x: x >= start and x <= stop
1581
1634
1582 def stringmatcher(pattern):
1635 def stringmatcher(pattern):
1583 """
1636 """
1584 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1637 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1585 returns the matcher name, pattern, and matcher function.
1638 returns the matcher name, pattern, and matcher function.
1586 missing or unknown prefixes are treated as literal matches.
1639 missing or unknown prefixes are treated as literal matches.
1587
1640
1588 helper for tests:
1641 helper for tests:
1589 >>> def test(pattern, *tests):
1642 >>> def test(pattern, *tests):
1590 ... kind, pattern, matcher = stringmatcher(pattern)
1643 ... kind, pattern, matcher = stringmatcher(pattern)
1591 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1644 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1592
1645
1593 exact matching (no prefix):
1646 exact matching (no prefix):
1594 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1647 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1595 ('literal', 'abcdefg', [False, False, True])
1648 ('literal', 'abcdefg', [False, False, True])
1596
1649
1597 regex matching ('re:' prefix)
1650 regex matching ('re:' prefix)
1598 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1651 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1599 ('re', 'a.+b', [False, False, True])
1652 ('re', 'a.+b', [False, False, True])
1600
1653
1601 force exact matches ('literal:' prefix)
1654 force exact matches ('literal:' prefix)
1602 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1655 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1603 ('literal', 're:foobar', [False, True])
1656 ('literal', 're:foobar', [False, True])
1604
1657
1605 unknown prefixes are ignored and treated as literals
1658 unknown prefixes are ignored and treated as literals
1606 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1659 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1607 ('literal', 'foo:bar', [False, False, True])
1660 ('literal', 'foo:bar', [False, False, True])
1608 """
1661 """
1609 if pattern.startswith('re:'):
1662 if pattern.startswith('re:'):
1610 pattern = pattern[3:]
1663 pattern = pattern[3:]
1611 try:
1664 try:
1612 regex = remod.compile(pattern)
1665 regex = remod.compile(pattern)
1613 except remod.error as e:
1666 except remod.error as e:
1614 raise error.ParseError(_('invalid regular expression: %s')
1667 raise error.ParseError(_('invalid regular expression: %s')
1615 % e)
1668 % e)
1616 return 're', pattern, regex.search
1669 return 're', pattern, regex.search
1617 elif pattern.startswith('literal:'):
1670 elif pattern.startswith('literal:'):
1618 pattern = pattern[8:]
1671 pattern = pattern[8:]
1619 return 'literal', pattern, pattern.__eq__
1672 return 'literal', pattern, pattern.__eq__
1620
1673
1621 def shortuser(user):
1674 def shortuser(user):
1622 """Return a short representation of a user name or email address."""
1675 """Return a short representation of a user name or email address."""
1623 f = user.find('@')
1676 f = user.find('@')
1624 if f >= 0:
1677 if f >= 0:
1625 user = user[:f]
1678 user = user[:f]
1626 f = user.find('<')
1679 f = user.find('<')
1627 if f >= 0:
1680 if f >= 0:
1628 user = user[f + 1:]
1681 user = user[f + 1:]
1629 f = user.find(' ')
1682 f = user.find(' ')
1630 if f >= 0:
1683 if f >= 0:
1631 user = user[:f]
1684 user = user[:f]
1632 f = user.find('.')
1685 f = user.find('.')
1633 if f >= 0:
1686 if f >= 0:
1634 user = user[:f]
1687 user = user[:f]
1635 return user
1688 return user
1636
1689
1637 def emailuser(user):
1690 def emailuser(user):
1638 """Return the user portion of an email address."""
1691 """Return the user portion of an email address."""
1639 f = user.find('@')
1692 f = user.find('@')
1640 if f >= 0:
1693 if f >= 0:
1641 user = user[:f]
1694 user = user[:f]
1642 f = user.find('<')
1695 f = user.find('<')
1643 if f >= 0:
1696 if f >= 0:
1644 user = user[f + 1:]
1697 user = user[f + 1:]
1645 return user
1698 return user
1646
1699
1647 def email(author):
1700 def email(author):
1648 '''get email of author.'''
1701 '''get email of author.'''
1649 r = author.find('>')
1702 r = author.find('>')
1650 if r == -1:
1703 if r == -1:
1651 r = None
1704 r = None
1652 return author[author.find('<') + 1:r]
1705 return author[author.find('<') + 1:r]
1653
1706
1654 def ellipsis(text, maxlength=400):
1707 def ellipsis(text, maxlength=400):
1655 """Trim string to at most maxlength (default: 400) columns in display."""
1708 """Trim string to at most maxlength (default: 400) columns in display."""
1656 return encoding.trim(text, maxlength, ellipsis='...')
1709 return encoding.trim(text, maxlength, ellipsis='...')
1657
1710
1658 def unitcountfn(*unittable):
1711 def unitcountfn(*unittable):
1659 '''return a function that renders a readable count of some quantity'''
1712 '''return a function that renders a readable count of some quantity'''
1660
1713
1661 def go(count):
1714 def go(count):
1662 for multiplier, divisor, format in unittable:
1715 for multiplier, divisor, format in unittable:
1663 if count >= divisor * multiplier:
1716 if count >= divisor * multiplier:
1664 return format % (count / float(divisor))
1717 return format % (count / float(divisor))
1665 return unittable[-1][2] % count
1718 return unittable[-1][2] % count
1666
1719
1667 return go
1720 return go
1668
1721
1669 bytecount = unitcountfn(
1722 bytecount = unitcountfn(
1670 (100, 1 << 30, _('%.0f GB')),
1723 (100, 1 << 30, _('%.0f GB')),
1671 (10, 1 << 30, _('%.1f GB')),
1724 (10, 1 << 30, _('%.1f GB')),
1672 (1, 1 << 30, _('%.2f GB')),
1725 (1, 1 << 30, _('%.2f GB')),
1673 (100, 1 << 20, _('%.0f MB')),
1726 (100, 1 << 20, _('%.0f MB')),
1674 (10, 1 << 20, _('%.1f MB')),
1727 (10, 1 << 20, _('%.1f MB')),
1675 (1, 1 << 20, _('%.2f MB')),
1728 (1, 1 << 20, _('%.2f MB')),
1676 (100, 1 << 10, _('%.0f KB')),
1729 (100, 1 << 10, _('%.0f KB')),
1677 (10, 1 << 10, _('%.1f KB')),
1730 (10, 1 << 10, _('%.1f KB')),
1678 (1, 1 << 10, _('%.2f KB')),
1731 (1, 1 << 10, _('%.2f KB')),
1679 (1, 1, _('%.0f bytes')),
1732 (1, 1, _('%.0f bytes')),
1680 )
1733 )
1681
1734
1682 def uirepr(s):
1735 def uirepr(s):
1683 # Avoid double backslash in Windows path repr()
1736 # Avoid double backslash in Windows path repr()
1684 return repr(s).replace('\\\\', '\\')
1737 return repr(s).replace('\\\\', '\\')
1685
1738
1686 # delay import of textwrap
1739 # delay import of textwrap
1687 def MBTextWrapper(**kwargs):
1740 def MBTextWrapper(**kwargs):
1688 class tw(textwrap.TextWrapper):
1741 class tw(textwrap.TextWrapper):
1689 """
1742 """
1690 Extend TextWrapper for width-awareness.
1743 Extend TextWrapper for width-awareness.
1691
1744
1692 Neither number of 'bytes' in any encoding nor 'characters' is
1745 Neither number of 'bytes' in any encoding nor 'characters' is
1693 appropriate to calculate terminal columns for specified string.
1746 appropriate to calculate terminal columns for specified string.
1694
1747
1695 Original TextWrapper implementation uses built-in 'len()' directly,
1748 Original TextWrapper implementation uses built-in 'len()' directly,
1696 so overriding is needed to use width information of each characters.
1749 so overriding is needed to use width information of each characters.
1697
1750
1698 In addition, characters classified into 'ambiguous' width are
1751 In addition, characters classified into 'ambiguous' width are
1699 treated as wide in East Asian area, but as narrow in other.
1752 treated as wide in East Asian area, but as narrow in other.
1700
1753
1701 This requires use decision to determine width of such characters.
1754 This requires use decision to determine width of such characters.
1702 """
1755 """
1703 def _cutdown(self, ucstr, space_left):
1756 def _cutdown(self, ucstr, space_left):
1704 l = 0
1757 l = 0
1705 colwidth = encoding.ucolwidth
1758 colwidth = encoding.ucolwidth
1706 for i in xrange(len(ucstr)):
1759 for i in xrange(len(ucstr)):
1707 l += colwidth(ucstr[i])
1760 l += colwidth(ucstr[i])
1708 if space_left < l:
1761 if space_left < l:
1709 return (ucstr[:i], ucstr[i:])
1762 return (ucstr[:i], ucstr[i:])
1710 return ucstr, ''
1763 return ucstr, ''
1711
1764
1712 # overriding of base class
1765 # overriding of base class
1713 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1766 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1714 space_left = max(width - cur_len, 1)
1767 space_left = max(width - cur_len, 1)
1715
1768
1716 if self.break_long_words:
1769 if self.break_long_words:
1717 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1770 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1718 cur_line.append(cut)
1771 cur_line.append(cut)
1719 reversed_chunks[-1] = res
1772 reversed_chunks[-1] = res
1720 elif not cur_line:
1773 elif not cur_line:
1721 cur_line.append(reversed_chunks.pop())
1774 cur_line.append(reversed_chunks.pop())
1722
1775
1723 # this overriding code is imported from TextWrapper of Python 2.6
1776 # this overriding code is imported from TextWrapper of Python 2.6
1724 # to calculate columns of string by 'encoding.ucolwidth()'
1777 # to calculate columns of string by 'encoding.ucolwidth()'
1725 def _wrap_chunks(self, chunks):
1778 def _wrap_chunks(self, chunks):
1726 colwidth = encoding.ucolwidth
1779 colwidth = encoding.ucolwidth
1727
1780
1728 lines = []
1781 lines = []
1729 if self.width <= 0:
1782 if self.width <= 0:
1730 raise ValueError("invalid width %r (must be > 0)" % self.width)
1783 raise ValueError("invalid width %r (must be > 0)" % self.width)
1731
1784
1732 # Arrange in reverse order so items can be efficiently popped
1785 # Arrange in reverse order so items can be efficiently popped
1733 # from a stack of chucks.
1786 # from a stack of chucks.
1734 chunks.reverse()
1787 chunks.reverse()
1735
1788
1736 while chunks:
1789 while chunks:
1737
1790
1738 # Start the list of chunks that will make up the current line.
1791 # Start the list of chunks that will make up the current line.
1739 # cur_len is just the length of all the chunks in cur_line.
1792 # cur_len is just the length of all the chunks in cur_line.
1740 cur_line = []
1793 cur_line = []
1741 cur_len = 0
1794 cur_len = 0
1742
1795
1743 # Figure out which static string will prefix this line.
1796 # Figure out which static string will prefix this line.
1744 if lines:
1797 if lines:
1745 indent = self.subsequent_indent
1798 indent = self.subsequent_indent
1746 else:
1799 else:
1747 indent = self.initial_indent
1800 indent = self.initial_indent
1748
1801
1749 # Maximum width for this line.
1802 # Maximum width for this line.
1750 width = self.width - len(indent)
1803 width = self.width - len(indent)
1751
1804
1752 # First chunk on line is whitespace -- drop it, unless this
1805 # First chunk on line is whitespace -- drop it, unless this
1753 # is the very beginning of the text (i.e. no lines started yet).
1806 # is the very beginning of the text (i.e. no lines started yet).
1754 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1807 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1755 del chunks[-1]
1808 del chunks[-1]
1756
1809
1757 while chunks:
1810 while chunks:
1758 l = colwidth(chunks[-1])
1811 l = colwidth(chunks[-1])
1759
1812
1760 # Can at least squeeze this chunk onto the current line.
1813 # Can at least squeeze this chunk onto the current line.
1761 if cur_len + l <= width:
1814 if cur_len + l <= width:
1762 cur_line.append(chunks.pop())
1815 cur_line.append(chunks.pop())
1763 cur_len += l
1816 cur_len += l
1764
1817
1765 # Nope, this line is full.
1818 # Nope, this line is full.
1766 else:
1819 else:
1767 break
1820 break
1768
1821
1769 # The current line is full, and the next chunk is too big to
1822 # The current line is full, and the next chunk is too big to
1770 # fit on *any* line (not just this one).
1823 # fit on *any* line (not just this one).
1771 if chunks and colwidth(chunks[-1]) > width:
1824 if chunks and colwidth(chunks[-1]) > width:
1772 self._handle_long_word(chunks, cur_line, cur_len, width)
1825 self._handle_long_word(chunks, cur_line, cur_len, width)
1773
1826
1774 # If the last chunk on this line is all whitespace, drop it.
1827 # If the last chunk on this line is all whitespace, drop it.
1775 if (self.drop_whitespace and
1828 if (self.drop_whitespace and
1776 cur_line and cur_line[-1].strip() == ''):
1829 cur_line and cur_line[-1].strip() == ''):
1777 del cur_line[-1]
1830 del cur_line[-1]
1778
1831
1779 # Convert current line back to a string and store it in list
1832 # Convert current line back to a string and store it in list
1780 # of all lines (return value).
1833 # of all lines (return value).
1781 if cur_line:
1834 if cur_line:
1782 lines.append(indent + ''.join(cur_line))
1835 lines.append(indent + ''.join(cur_line))
1783
1836
1784 return lines
1837 return lines
1785
1838
1786 global MBTextWrapper
1839 global MBTextWrapper
1787 MBTextWrapper = tw
1840 MBTextWrapper = tw
1788 return tw(**kwargs)
1841 return tw(**kwargs)
1789
1842
1790 def wrap(line, width, initindent='', hangindent=''):
1843 def wrap(line, width, initindent='', hangindent=''):
1791 maxindent = max(len(hangindent), len(initindent))
1844 maxindent = max(len(hangindent), len(initindent))
1792 if width <= maxindent:
1845 if width <= maxindent:
1793 # adjust for weird terminal size
1846 # adjust for weird terminal size
1794 width = max(78, maxindent + 1)
1847 width = max(78, maxindent + 1)
1795 line = line.decode(encoding.encoding, encoding.encodingmode)
1848 line = line.decode(encoding.encoding, encoding.encodingmode)
1796 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1849 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1797 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1850 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1798 wrapper = MBTextWrapper(width=width,
1851 wrapper = MBTextWrapper(width=width,
1799 initial_indent=initindent,
1852 initial_indent=initindent,
1800 subsequent_indent=hangindent)
1853 subsequent_indent=hangindent)
1801 return wrapper.fill(line).encode(encoding.encoding)
1854 return wrapper.fill(line).encode(encoding.encoding)
1802
1855
1803 def iterlines(iterator):
1856 def iterlines(iterator):
1804 for chunk in iterator:
1857 for chunk in iterator:
1805 for line in chunk.splitlines():
1858 for line in chunk.splitlines():
1806 yield line
1859 yield line
1807
1860
1808 def expandpath(path):
1861 def expandpath(path):
1809 return os.path.expanduser(os.path.expandvars(path))
1862 return os.path.expanduser(os.path.expandvars(path))
1810
1863
1811 def hgcmd():
1864 def hgcmd():
1812 """Return the command used to execute current hg
1865 """Return the command used to execute current hg
1813
1866
1814 This is different from hgexecutable() because on Windows we want
1867 This is different from hgexecutable() because on Windows we want
1815 to avoid things opening new shell windows like batch files, so we
1868 to avoid things opening new shell windows like batch files, so we
1816 get either the python call or current executable.
1869 get either the python call or current executable.
1817 """
1870 """
1818 if mainfrozen():
1871 if mainfrozen():
1819 return [sys.executable]
1872 return [sys.executable]
1820 return gethgcmd()
1873 return gethgcmd()
1821
1874
1822 def rundetached(args, condfn):
1875 def rundetached(args, condfn):
1823 """Execute the argument list in a detached process.
1876 """Execute the argument list in a detached process.
1824
1877
1825 condfn is a callable which is called repeatedly and should return
1878 condfn is a callable which is called repeatedly and should return
1826 True once the child process is known to have started successfully.
1879 True once the child process is known to have started successfully.
1827 At this point, the child process PID is returned. If the child
1880 At this point, the child process PID is returned. If the child
1828 process fails to start or finishes before condfn() evaluates to
1881 process fails to start or finishes before condfn() evaluates to
1829 True, return -1.
1882 True, return -1.
1830 """
1883 """
1831 # Windows case is easier because the child process is either
1884 # Windows case is easier because the child process is either
1832 # successfully starting and validating the condition or exiting
1885 # successfully starting and validating the condition or exiting
1833 # on failure. We just poll on its PID. On Unix, if the child
1886 # on failure. We just poll on its PID. On Unix, if the child
1834 # process fails to start, it will be left in a zombie state until
1887 # process fails to start, it will be left in a zombie state until
1835 # the parent wait on it, which we cannot do since we expect a long
1888 # the parent wait on it, which we cannot do since we expect a long
1836 # running process on success. Instead we listen for SIGCHLD telling
1889 # running process on success. Instead we listen for SIGCHLD telling
1837 # us our child process terminated.
1890 # us our child process terminated.
1838 terminated = set()
1891 terminated = set()
1839 def handler(signum, frame):
1892 def handler(signum, frame):
1840 terminated.add(os.wait())
1893 terminated.add(os.wait())
1841 prevhandler = None
1894 prevhandler = None
1842 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1895 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1843 if SIGCHLD is not None:
1896 if SIGCHLD is not None:
1844 prevhandler = signal.signal(SIGCHLD, handler)
1897 prevhandler = signal.signal(SIGCHLD, handler)
1845 try:
1898 try:
1846 pid = spawndetached(args)
1899 pid = spawndetached(args)
1847 while not condfn():
1900 while not condfn():
1848 if ((pid in terminated or not testpid(pid))
1901 if ((pid in terminated or not testpid(pid))
1849 and not condfn()):
1902 and not condfn()):
1850 return -1
1903 return -1
1851 time.sleep(0.1)
1904 time.sleep(0.1)
1852 return pid
1905 return pid
1853 finally:
1906 finally:
1854 if prevhandler is not None:
1907 if prevhandler is not None:
1855 signal.signal(signal.SIGCHLD, prevhandler)
1908 signal.signal(signal.SIGCHLD, prevhandler)
1856
1909
1857 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1910 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1858 """Return the result of interpolating items in the mapping into string s.
1911 """Return the result of interpolating items in the mapping into string s.
1859
1912
1860 prefix is a single character string, or a two character string with
1913 prefix is a single character string, or a two character string with
1861 a backslash as the first character if the prefix needs to be escaped in
1914 a backslash as the first character if the prefix needs to be escaped in
1862 a regular expression.
1915 a regular expression.
1863
1916
1864 fn is an optional function that will be applied to the replacement text
1917 fn is an optional function that will be applied to the replacement text
1865 just before replacement.
1918 just before replacement.
1866
1919
1867 escape_prefix is an optional flag that allows using doubled prefix for
1920 escape_prefix is an optional flag that allows using doubled prefix for
1868 its escaping.
1921 its escaping.
1869 """
1922 """
1870 fn = fn or (lambda s: s)
1923 fn = fn or (lambda s: s)
1871 patterns = '|'.join(mapping.keys())
1924 patterns = '|'.join(mapping.keys())
1872 if escape_prefix:
1925 if escape_prefix:
1873 patterns += '|' + prefix
1926 patterns += '|' + prefix
1874 if len(prefix) > 1:
1927 if len(prefix) > 1:
1875 prefix_char = prefix[1:]
1928 prefix_char = prefix[1:]
1876 else:
1929 else:
1877 prefix_char = prefix
1930 prefix_char = prefix
1878 mapping[prefix_char] = prefix_char
1931 mapping[prefix_char] = prefix_char
1879 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1932 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1880 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1933 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1881
1934
1882 def getport(port):
1935 def getport(port):
1883 """Return the port for a given network service.
1936 """Return the port for a given network service.
1884
1937
1885 If port is an integer, it's returned as is. If it's a string, it's
1938 If port is an integer, it's returned as is. If it's a string, it's
1886 looked up using socket.getservbyname(). If there's no matching
1939 looked up using socket.getservbyname(). If there's no matching
1887 service, error.Abort is raised.
1940 service, error.Abort is raised.
1888 """
1941 """
1889 try:
1942 try:
1890 return int(port)
1943 return int(port)
1891 except ValueError:
1944 except ValueError:
1892 pass
1945 pass
1893
1946
1894 try:
1947 try:
1895 return socket.getservbyname(port)
1948 return socket.getservbyname(port)
1896 except socket.error:
1949 except socket.error:
1897 raise Abort(_("no port number associated with service '%s'") % port)
1950 raise Abort(_("no port number associated with service '%s'") % port)
1898
1951
1899 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1952 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1900 '0': False, 'no': False, 'false': False, 'off': False,
1953 '0': False, 'no': False, 'false': False, 'off': False,
1901 'never': False}
1954 'never': False}
1902
1955
1903 def parsebool(s):
1956 def parsebool(s):
1904 """Parse s into a boolean.
1957 """Parse s into a boolean.
1905
1958
1906 If s is not a valid boolean, returns None.
1959 If s is not a valid boolean, returns None.
1907 """
1960 """
1908 return _booleans.get(s.lower(), None)
1961 return _booleans.get(s.lower(), None)
1909
1962
1910 _hexdig = '0123456789ABCDEFabcdef'
1963 _hexdig = '0123456789ABCDEFabcdef'
1911 _hextochr = dict((a + b, chr(int(a + b, 16)))
1964 _hextochr = dict((a + b, chr(int(a + b, 16)))
1912 for a in _hexdig for b in _hexdig)
1965 for a in _hexdig for b in _hexdig)
1913
1966
1914 def _urlunquote(s):
1967 def _urlunquote(s):
1915 """Decode HTTP/HTML % encoding.
1968 """Decode HTTP/HTML % encoding.
1916
1969
1917 >>> _urlunquote('abc%20def')
1970 >>> _urlunquote('abc%20def')
1918 'abc def'
1971 'abc def'
1919 """
1972 """
1920 res = s.split('%')
1973 res = s.split('%')
1921 # fastpath
1974 # fastpath
1922 if len(res) == 1:
1975 if len(res) == 1:
1923 return s
1976 return s
1924 s = res[0]
1977 s = res[0]
1925 for item in res[1:]:
1978 for item in res[1:]:
1926 try:
1979 try:
1927 s += _hextochr[item[:2]] + item[2:]
1980 s += _hextochr[item[:2]] + item[2:]
1928 except KeyError:
1981 except KeyError:
1929 s += '%' + item
1982 s += '%' + item
1930 except UnicodeDecodeError:
1983 except UnicodeDecodeError:
1931 s += unichr(int(item[:2], 16)) + item[2:]
1984 s += unichr(int(item[:2], 16)) + item[2:]
1932 return s
1985 return s
1933
1986
1934 class url(object):
1987 class url(object):
1935 r"""Reliable URL parser.
1988 r"""Reliable URL parser.
1936
1989
1937 This parses URLs and provides attributes for the following
1990 This parses URLs and provides attributes for the following
1938 components:
1991 components:
1939
1992
1940 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1993 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1941
1994
1942 Missing components are set to None. The only exception is
1995 Missing components are set to None. The only exception is
1943 fragment, which is set to '' if present but empty.
1996 fragment, which is set to '' if present but empty.
1944
1997
1945 If parsefragment is False, fragment is included in query. If
1998 If parsefragment is False, fragment is included in query. If
1946 parsequery is False, query is included in path. If both are
1999 parsequery is False, query is included in path. If both are
1947 False, both fragment and query are included in path.
2000 False, both fragment and query are included in path.
1948
2001
1949 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2002 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1950
2003
1951 Note that for backward compatibility reasons, bundle URLs do not
2004 Note that for backward compatibility reasons, bundle URLs do not
1952 take host names. That means 'bundle://../' has a path of '../'.
2005 take host names. That means 'bundle://../' has a path of '../'.
1953
2006
1954 Examples:
2007 Examples:
1955
2008
1956 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2009 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1957 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2010 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1958 >>> url('ssh://[::1]:2200//home/joe/repo')
2011 >>> url('ssh://[::1]:2200//home/joe/repo')
1959 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2012 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1960 >>> url('file:///home/joe/repo')
2013 >>> url('file:///home/joe/repo')
1961 <url scheme: 'file', path: '/home/joe/repo'>
2014 <url scheme: 'file', path: '/home/joe/repo'>
1962 >>> url('file:///c:/temp/foo/')
2015 >>> url('file:///c:/temp/foo/')
1963 <url scheme: 'file', path: 'c:/temp/foo/'>
2016 <url scheme: 'file', path: 'c:/temp/foo/'>
1964 >>> url('bundle:foo')
2017 >>> url('bundle:foo')
1965 <url scheme: 'bundle', path: 'foo'>
2018 <url scheme: 'bundle', path: 'foo'>
1966 >>> url('bundle://../foo')
2019 >>> url('bundle://../foo')
1967 <url scheme: 'bundle', path: '../foo'>
2020 <url scheme: 'bundle', path: '../foo'>
1968 >>> url(r'c:\foo\bar')
2021 >>> url(r'c:\foo\bar')
1969 <url path: 'c:\\foo\\bar'>
2022 <url path: 'c:\\foo\\bar'>
1970 >>> url(r'\\blah\blah\blah')
2023 >>> url(r'\\blah\blah\blah')
1971 <url path: '\\\\blah\\blah\\blah'>
2024 <url path: '\\\\blah\\blah\\blah'>
1972 >>> url(r'\\blah\blah\blah#baz')
2025 >>> url(r'\\blah\blah\blah#baz')
1973 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2026 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1974 >>> url(r'file:///C:\users\me')
2027 >>> url(r'file:///C:\users\me')
1975 <url scheme: 'file', path: 'C:\\users\\me'>
2028 <url scheme: 'file', path: 'C:\\users\\me'>
1976
2029
1977 Authentication credentials:
2030 Authentication credentials:
1978
2031
1979 >>> url('ssh://joe:xyz@x/repo')
2032 >>> url('ssh://joe:xyz@x/repo')
1980 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2033 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1981 >>> url('ssh://joe@x/repo')
2034 >>> url('ssh://joe@x/repo')
1982 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2035 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1983
2036
1984 Query strings and fragments:
2037 Query strings and fragments:
1985
2038
1986 >>> url('http://host/a?b#c')
2039 >>> url('http://host/a?b#c')
1987 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2040 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1988 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2041 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1989 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2042 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1990 """
2043 """
1991
2044
1992 _safechars = "!~*'()+"
2045 _safechars = "!~*'()+"
1993 _safepchars = "/!~*'()+:\\"
2046 _safepchars = "/!~*'()+:\\"
1994 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2047 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1995
2048
1996 def __init__(self, path, parsequery=True, parsefragment=True):
2049 def __init__(self, path, parsequery=True, parsefragment=True):
1997 # We slowly chomp away at path until we have only the path left
2050 # We slowly chomp away at path until we have only the path left
1998 self.scheme = self.user = self.passwd = self.host = None
2051 self.scheme = self.user = self.passwd = self.host = None
1999 self.port = self.path = self.query = self.fragment = None
2052 self.port = self.path = self.query = self.fragment = None
2000 self._localpath = True
2053 self._localpath = True
2001 self._hostport = ''
2054 self._hostport = ''
2002 self._origpath = path
2055 self._origpath = path
2003
2056
2004 if parsefragment and '#' in path:
2057 if parsefragment and '#' in path:
2005 path, self.fragment = path.split('#', 1)
2058 path, self.fragment = path.split('#', 1)
2006 if not path:
2059 if not path:
2007 path = None
2060 path = None
2008
2061
2009 # special case for Windows drive letters and UNC paths
2062 # special case for Windows drive letters and UNC paths
2010 if hasdriveletter(path) or path.startswith(r'\\'):
2063 if hasdriveletter(path) or path.startswith(r'\\'):
2011 self.path = path
2064 self.path = path
2012 return
2065 return
2013
2066
2014 # For compatibility reasons, we can't handle bundle paths as
2067 # For compatibility reasons, we can't handle bundle paths as
2015 # normal URLS
2068 # normal URLS
2016 if path.startswith('bundle:'):
2069 if path.startswith('bundle:'):
2017 self.scheme = 'bundle'
2070 self.scheme = 'bundle'
2018 path = path[7:]
2071 path = path[7:]
2019 if path.startswith('//'):
2072 if path.startswith('//'):
2020 path = path[2:]
2073 path = path[2:]
2021 self.path = path
2074 self.path = path
2022 return
2075 return
2023
2076
2024 if self._matchscheme(path):
2077 if self._matchscheme(path):
2025 parts = path.split(':', 1)
2078 parts = path.split(':', 1)
2026 if parts[0]:
2079 if parts[0]:
2027 self.scheme, path = parts
2080 self.scheme, path = parts
2028 self._localpath = False
2081 self._localpath = False
2029
2082
2030 if not path:
2083 if not path:
2031 path = None
2084 path = None
2032 if self._localpath:
2085 if self._localpath:
2033 self.path = ''
2086 self.path = ''
2034 return
2087 return
2035 else:
2088 else:
2036 if self._localpath:
2089 if self._localpath:
2037 self.path = path
2090 self.path = path
2038 return
2091 return
2039
2092
2040 if parsequery and '?' in path:
2093 if parsequery and '?' in path:
2041 path, self.query = path.split('?', 1)
2094 path, self.query = path.split('?', 1)
2042 if not path:
2095 if not path:
2043 path = None
2096 path = None
2044 if not self.query:
2097 if not self.query:
2045 self.query = None
2098 self.query = None
2046
2099
2047 # // is required to specify a host/authority
2100 # // is required to specify a host/authority
2048 if path and path.startswith('//'):
2101 if path and path.startswith('//'):
2049 parts = path[2:].split('/', 1)
2102 parts = path[2:].split('/', 1)
2050 if len(parts) > 1:
2103 if len(parts) > 1:
2051 self.host, path = parts
2104 self.host, path = parts
2052 else:
2105 else:
2053 self.host = parts[0]
2106 self.host = parts[0]
2054 path = None
2107 path = None
2055 if not self.host:
2108 if not self.host:
2056 self.host = None
2109 self.host = None
2057 # path of file:///d is /d
2110 # path of file:///d is /d
2058 # path of file:///d:/ is d:/, not /d:/
2111 # path of file:///d:/ is d:/, not /d:/
2059 if path and not hasdriveletter(path):
2112 if path and not hasdriveletter(path):
2060 path = '/' + path
2113 path = '/' + path
2061
2114
2062 if self.host and '@' in self.host:
2115 if self.host and '@' in self.host:
2063 self.user, self.host = self.host.rsplit('@', 1)
2116 self.user, self.host = self.host.rsplit('@', 1)
2064 if ':' in self.user:
2117 if ':' in self.user:
2065 self.user, self.passwd = self.user.split(':', 1)
2118 self.user, self.passwd = self.user.split(':', 1)
2066 if not self.host:
2119 if not self.host:
2067 self.host = None
2120 self.host = None
2068
2121
2069 # Don't split on colons in IPv6 addresses without ports
2122 # Don't split on colons in IPv6 addresses without ports
2070 if (self.host and ':' in self.host and
2123 if (self.host and ':' in self.host and
2071 not (self.host.startswith('[') and self.host.endswith(']'))):
2124 not (self.host.startswith('[') and self.host.endswith(']'))):
2072 self._hostport = self.host
2125 self._hostport = self.host
2073 self.host, self.port = self.host.rsplit(':', 1)
2126 self.host, self.port = self.host.rsplit(':', 1)
2074 if not self.host:
2127 if not self.host:
2075 self.host = None
2128 self.host = None
2076
2129
2077 if (self.host and self.scheme == 'file' and
2130 if (self.host and self.scheme == 'file' and
2078 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2131 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2079 raise Abort(_('file:// URLs can only refer to localhost'))
2132 raise Abort(_('file:// URLs can only refer to localhost'))
2080
2133
2081 self.path = path
2134 self.path = path
2082
2135
2083 # leave the query string escaped
2136 # leave the query string escaped
2084 for a in ('user', 'passwd', 'host', 'port',
2137 for a in ('user', 'passwd', 'host', 'port',
2085 'path', 'fragment'):
2138 'path', 'fragment'):
2086 v = getattr(self, a)
2139 v = getattr(self, a)
2087 if v is not None:
2140 if v is not None:
2088 setattr(self, a, _urlunquote(v))
2141 setattr(self, a, _urlunquote(v))
2089
2142
2090 def __repr__(self):
2143 def __repr__(self):
2091 attrs = []
2144 attrs = []
2092 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2145 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2093 'query', 'fragment'):
2146 'query', 'fragment'):
2094 v = getattr(self, a)
2147 v = getattr(self, a)
2095 if v is not None:
2148 if v is not None:
2096 attrs.append('%s: %r' % (a, v))
2149 attrs.append('%s: %r' % (a, v))
2097 return '<url %s>' % ', '.join(attrs)
2150 return '<url %s>' % ', '.join(attrs)
2098
2151
2099 def __str__(self):
2152 def __str__(self):
2100 r"""Join the URL's components back into a URL string.
2153 r"""Join the URL's components back into a URL string.
2101
2154
2102 Examples:
2155 Examples:
2103
2156
2104 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2157 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2105 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2158 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2106 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2159 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2107 'http://user:pw@host:80/?foo=bar&baz=42'
2160 'http://user:pw@host:80/?foo=bar&baz=42'
2108 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2161 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2109 'http://user:pw@host:80/?foo=bar%3dbaz'
2162 'http://user:pw@host:80/?foo=bar%3dbaz'
2110 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2163 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2111 'ssh://user:pw@[::1]:2200//home/joe#'
2164 'ssh://user:pw@[::1]:2200//home/joe#'
2112 >>> str(url('http://localhost:80//'))
2165 >>> str(url('http://localhost:80//'))
2113 'http://localhost:80//'
2166 'http://localhost:80//'
2114 >>> str(url('http://localhost:80/'))
2167 >>> str(url('http://localhost:80/'))
2115 'http://localhost:80/'
2168 'http://localhost:80/'
2116 >>> str(url('http://localhost:80'))
2169 >>> str(url('http://localhost:80'))
2117 'http://localhost:80/'
2170 'http://localhost:80/'
2118 >>> str(url('bundle:foo'))
2171 >>> str(url('bundle:foo'))
2119 'bundle:foo'
2172 'bundle:foo'
2120 >>> str(url('bundle://../foo'))
2173 >>> str(url('bundle://../foo'))
2121 'bundle:../foo'
2174 'bundle:../foo'
2122 >>> str(url('path'))
2175 >>> str(url('path'))
2123 'path'
2176 'path'
2124 >>> str(url('file:///tmp/foo/bar'))
2177 >>> str(url('file:///tmp/foo/bar'))
2125 'file:///tmp/foo/bar'
2178 'file:///tmp/foo/bar'
2126 >>> str(url('file:///c:/tmp/foo/bar'))
2179 >>> str(url('file:///c:/tmp/foo/bar'))
2127 'file:///c:/tmp/foo/bar'
2180 'file:///c:/tmp/foo/bar'
2128 >>> print url(r'bundle:foo\bar')
2181 >>> print url(r'bundle:foo\bar')
2129 bundle:foo\bar
2182 bundle:foo\bar
2130 >>> print url(r'file:///D:\data\hg')
2183 >>> print url(r'file:///D:\data\hg')
2131 file:///D:\data\hg
2184 file:///D:\data\hg
2132 """
2185 """
2133 if self._localpath:
2186 if self._localpath:
2134 s = self.path
2187 s = self.path
2135 if self.scheme == 'bundle':
2188 if self.scheme == 'bundle':
2136 s = 'bundle:' + s
2189 s = 'bundle:' + s
2137 if self.fragment:
2190 if self.fragment:
2138 s += '#' + self.fragment
2191 s += '#' + self.fragment
2139 return s
2192 return s
2140
2193
2141 s = self.scheme + ':'
2194 s = self.scheme + ':'
2142 if self.user or self.passwd or self.host:
2195 if self.user or self.passwd or self.host:
2143 s += '//'
2196 s += '//'
2144 elif self.scheme and (not self.path or self.path.startswith('/')
2197 elif self.scheme and (not self.path or self.path.startswith('/')
2145 or hasdriveletter(self.path)):
2198 or hasdriveletter(self.path)):
2146 s += '//'
2199 s += '//'
2147 if hasdriveletter(self.path):
2200 if hasdriveletter(self.path):
2148 s += '/'
2201 s += '/'
2149 if self.user:
2202 if self.user:
2150 s += urllib.quote(self.user, safe=self._safechars)
2203 s += urllib.quote(self.user, safe=self._safechars)
2151 if self.passwd:
2204 if self.passwd:
2152 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2205 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2153 if self.user or self.passwd:
2206 if self.user or self.passwd:
2154 s += '@'
2207 s += '@'
2155 if self.host:
2208 if self.host:
2156 if not (self.host.startswith('[') and self.host.endswith(']')):
2209 if not (self.host.startswith('[') and self.host.endswith(']')):
2157 s += urllib.quote(self.host)
2210 s += urllib.quote(self.host)
2158 else:
2211 else:
2159 s += self.host
2212 s += self.host
2160 if self.port:
2213 if self.port:
2161 s += ':' + urllib.quote(self.port)
2214 s += ':' + urllib.quote(self.port)
2162 if self.host:
2215 if self.host:
2163 s += '/'
2216 s += '/'
2164 if self.path:
2217 if self.path:
2165 # TODO: similar to the query string, we should not unescape the
2218 # TODO: similar to the query string, we should not unescape the
2166 # path when we store it, the path might contain '%2f' = '/',
2219 # path when we store it, the path might contain '%2f' = '/',
2167 # which we should *not* escape.
2220 # which we should *not* escape.
2168 s += urllib.quote(self.path, safe=self._safepchars)
2221 s += urllib.quote(self.path, safe=self._safepchars)
2169 if self.query:
2222 if self.query:
2170 # we store the query in escaped form.
2223 # we store the query in escaped form.
2171 s += '?' + self.query
2224 s += '?' + self.query
2172 if self.fragment is not None:
2225 if self.fragment is not None:
2173 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2226 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2174 return s
2227 return s
2175
2228
2176 def authinfo(self):
2229 def authinfo(self):
2177 user, passwd = self.user, self.passwd
2230 user, passwd = self.user, self.passwd
2178 try:
2231 try:
2179 self.user, self.passwd = None, None
2232 self.user, self.passwd = None, None
2180 s = str(self)
2233 s = str(self)
2181 finally:
2234 finally:
2182 self.user, self.passwd = user, passwd
2235 self.user, self.passwd = user, passwd
2183 if not self.user:
2236 if not self.user:
2184 return (s, None)
2237 return (s, None)
2185 # authinfo[1] is passed to urllib2 password manager, and its
2238 # authinfo[1] is passed to urllib2 password manager, and its
2186 # URIs must not contain credentials. The host is passed in the
2239 # URIs must not contain credentials. The host is passed in the
2187 # URIs list because Python < 2.4.3 uses only that to search for
2240 # URIs list because Python < 2.4.3 uses only that to search for
2188 # a password.
2241 # a password.
2189 return (s, (None, (s, self.host),
2242 return (s, (None, (s, self.host),
2190 self.user, self.passwd or ''))
2243 self.user, self.passwd or ''))
2191
2244
2192 def isabs(self):
2245 def isabs(self):
2193 if self.scheme and self.scheme != 'file':
2246 if self.scheme and self.scheme != 'file':
2194 return True # remote URL
2247 return True # remote URL
2195 if hasdriveletter(self.path):
2248 if hasdriveletter(self.path):
2196 return True # absolute for our purposes - can't be joined()
2249 return True # absolute for our purposes - can't be joined()
2197 if self.path.startswith(r'\\'):
2250 if self.path.startswith(r'\\'):
2198 return True # Windows UNC path
2251 return True # Windows UNC path
2199 if self.path.startswith('/'):
2252 if self.path.startswith('/'):
2200 return True # POSIX-style
2253 return True # POSIX-style
2201 return False
2254 return False
2202
2255
2203 def localpath(self):
2256 def localpath(self):
2204 if self.scheme == 'file' or self.scheme == 'bundle':
2257 if self.scheme == 'file' or self.scheme == 'bundle':
2205 path = self.path or '/'
2258 path = self.path or '/'
2206 # For Windows, we need to promote hosts containing drive
2259 # For Windows, we need to promote hosts containing drive
2207 # letters to paths with drive letters.
2260 # letters to paths with drive letters.
2208 if hasdriveletter(self._hostport):
2261 if hasdriveletter(self._hostport):
2209 path = self._hostport + '/' + self.path
2262 path = self._hostport + '/' + self.path
2210 elif (self.host is not None and self.path
2263 elif (self.host is not None and self.path
2211 and not hasdriveletter(path)):
2264 and not hasdriveletter(path)):
2212 path = '/' + path
2265 path = '/' + path
2213 return path
2266 return path
2214 return self._origpath
2267 return self._origpath
2215
2268
2216 def islocal(self):
2269 def islocal(self):
2217 '''whether localpath will return something that posixfile can open'''
2270 '''whether localpath will return something that posixfile can open'''
2218 return (not self.scheme or self.scheme == 'file'
2271 return (not self.scheme or self.scheme == 'file'
2219 or self.scheme == 'bundle')
2272 or self.scheme == 'bundle')
2220
2273
2221 def hasscheme(path):
2274 def hasscheme(path):
2222 return bool(url(path).scheme)
2275 return bool(url(path).scheme)
2223
2276
2224 def hasdriveletter(path):
2277 def hasdriveletter(path):
2225 return path and path[1:2] == ':' and path[0:1].isalpha()
2278 return path and path[1:2] == ':' and path[0:1].isalpha()
2226
2279
2227 def urllocalpath(path):
2280 def urllocalpath(path):
2228 return url(path, parsequery=False, parsefragment=False).localpath()
2281 return url(path, parsequery=False, parsefragment=False).localpath()
2229
2282
2230 def hidepassword(u):
2283 def hidepassword(u):
2231 '''hide user credential in a url string'''
2284 '''hide user credential in a url string'''
2232 u = url(u)
2285 u = url(u)
2233 if u.passwd:
2286 if u.passwd:
2234 u.passwd = '***'
2287 u.passwd = '***'
2235 return str(u)
2288 return str(u)
2236
2289
2237 def removeauth(u):
2290 def removeauth(u):
2238 '''remove all authentication information from a url string'''
2291 '''remove all authentication information from a url string'''
2239 u = url(u)
2292 u = url(u)
2240 u.user = u.passwd = None
2293 u.user = u.passwd = None
2241 return str(u)
2294 return str(u)
2242
2295
2243 def isatty(fd):
2296 def isatty(fd):
2244 try:
2297 try:
2245 return fd.isatty()
2298 return fd.isatty()
2246 except AttributeError:
2299 except AttributeError:
2247 return False
2300 return False
2248
2301
2249 timecount = unitcountfn(
2302 timecount = unitcountfn(
2250 (1, 1e3, _('%.0f s')),
2303 (1, 1e3, _('%.0f s')),
2251 (100, 1, _('%.1f s')),
2304 (100, 1, _('%.1f s')),
2252 (10, 1, _('%.2f s')),
2305 (10, 1, _('%.2f s')),
2253 (1, 1, _('%.3f s')),
2306 (1, 1, _('%.3f s')),
2254 (100, 0.001, _('%.1f ms')),
2307 (100, 0.001, _('%.1f ms')),
2255 (10, 0.001, _('%.2f ms')),
2308 (10, 0.001, _('%.2f ms')),
2256 (1, 0.001, _('%.3f ms')),
2309 (1, 0.001, _('%.3f ms')),
2257 (100, 0.000001, _('%.1f us')),
2310 (100, 0.000001, _('%.1f us')),
2258 (10, 0.000001, _('%.2f us')),
2311 (10, 0.000001, _('%.2f us')),
2259 (1, 0.000001, _('%.3f us')),
2312 (1, 0.000001, _('%.3f us')),
2260 (100, 0.000000001, _('%.1f ns')),
2313 (100, 0.000000001, _('%.1f ns')),
2261 (10, 0.000000001, _('%.2f ns')),
2314 (10, 0.000000001, _('%.2f ns')),
2262 (1, 0.000000001, _('%.3f ns')),
2315 (1, 0.000000001, _('%.3f ns')),
2263 )
2316 )
2264
2317
2265 _timenesting = [0]
2318 _timenesting = [0]
2266
2319
2267 def timed(func):
2320 def timed(func):
2268 '''Report the execution time of a function call to stderr.
2321 '''Report the execution time of a function call to stderr.
2269
2322
2270 During development, use as a decorator when you need to measure
2323 During development, use as a decorator when you need to measure
2271 the cost of a function, e.g. as follows:
2324 the cost of a function, e.g. as follows:
2272
2325
2273 @util.timed
2326 @util.timed
2274 def foo(a, b, c):
2327 def foo(a, b, c):
2275 pass
2328 pass
2276 '''
2329 '''
2277
2330
2278 def wrapper(*args, **kwargs):
2331 def wrapper(*args, **kwargs):
2279 start = time.time()
2332 start = time.time()
2280 indent = 2
2333 indent = 2
2281 _timenesting[0] += indent
2334 _timenesting[0] += indent
2282 try:
2335 try:
2283 return func(*args, **kwargs)
2336 return func(*args, **kwargs)
2284 finally:
2337 finally:
2285 elapsed = time.time() - start
2338 elapsed = time.time() - start
2286 _timenesting[0] -= indent
2339 _timenesting[0] -= indent
2287 sys.stderr.write('%s%s: %s\n' %
2340 sys.stderr.write('%s%s: %s\n' %
2288 (' ' * _timenesting[0], func.__name__,
2341 (' ' * _timenesting[0], func.__name__,
2289 timecount(elapsed)))
2342 timecount(elapsed)))
2290 return wrapper
2343 return wrapper
2291
2344
2292 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2345 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2293 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2346 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2294
2347
2295 def sizetoint(s):
2348 def sizetoint(s):
2296 '''Convert a space specifier to a byte count.
2349 '''Convert a space specifier to a byte count.
2297
2350
2298 >>> sizetoint('30')
2351 >>> sizetoint('30')
2299 30
2352 30
2300 >>> sizetoint('2.2kb')
2353 >>> sizetoint('2.2kb')
2301 2252
2354 2252
2302 >>> sizetoint('6M')
2355 >>> sizetoint('6M')
2303 6291456
2356 6291456
2304 '''
2357 '''
2305 t = s.strip().lower()
2358 t = s.strip().lower()
2306 try:
2359 try:
2307 for k, u in _sizeunits:
2360 for k, u in _sizeunits:
2308 if t.endswith(k):
2361 if t.endswith(k):
2309 return int(float(t[:-len(k)]) * u)
2362 return int(float(t[:-len(k)]) * u)
2310 return int(t)
2363 return int(t)
2311 except ValueError:
2364 except ValueError:
2312 raise error.ParseError(_("couldn't parse size: %s") % s)
2365 raise error.ParseError(_("couldn't parse size: %s") % s)
2313
2366
2314 class hooks(object):
2367 class hooks(object):
2315 '''A collection of hook functions that can be used to extend a
2368 '''A collection of hook functions that can be used to extend a
2316 function's behavior. Hooks are called in lexicographic order,
2369 function's behavior. Hooks are called in lexicographic order,
2317 based on the names of their sources.'''
2370 based on the names of their sources.'''
2318
2371
2319 def __init__(self):
2372 def __init__(self):
2320 self._hooks = []
2373 self._hooks = []
2321
2374
2322 def add(self, source, hook):
2375 def add(self, source, hook):
2323 self._hooks.append((source, hook))
2376 self._hooks.append((source, hook))
2324
2377
2325 def __call__(self, *args):
2378 def __call__(self, *args):
2326 self._hooks.sort(key=lambda x: x[0])
2379 self._hooks.sort(key=lambda x: x[0])
2327 results = []
2380 results = []
2328 for source, hook in self._hooks:
2381 for source, hook in self._hooks:
2329 results.append(hook(*args))
2382 results.append(hook(*args))
2330 return results
2383 return results
2331
2384
2332 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2385 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2333 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2386 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2334 Skips the 'skip' last entries. By default it will flush stdout first.
2387 Skips the 'skip' last entries. By default it will flush stdout first.
2335 It can be used everywhere and do intentionally not require an ui object.
2388 It can be used everywhere and do intentionally not require an ui object.
2336 Not be used in production code but very convenient while developing.
2389 Not be used in production code but very convenient while developing.
2337 '''
2390 '''
2338 if otherf:
2391 if otherf:
2339 otherf.flush()
2392 otherf.flush()
2340 f.write('%s at:\n' % msg)
2393 f.write('%s at:\n' % msg)
2341 entries = [('%s:%s' % (fn, ln), func)
2394 entries = [('%s:%s' % (fn, ln), func)
2342 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2395 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2343 if entries:
2396 if entries:
2344 fnmax = max(len(entry[0]) for entry in entries)
2397 fnmax = max(len(entry[0]) for entry in entries)
2345 for fnln, func in entries:
2398 for fnln, func in entries:
2346 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2399 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2347 f.flush()
2400 f.flush()
2348
2401
2349 class dirs(object):
2402 class dirs(object):
2350 '''a multiset of directory names from a dirstate or manifest'''
2403 '''a multiset of directory names from a dirstate or manifest'''
2351
2404
2352 def __init__(self, map, skip=None):
2405 def __init__(self, map, skip=None):
2353 self._dirs = {}
2406 self._dirs = {}
2354 addpath = self.addpath
2407 addpath = self.addpath
2355 if safehasattr(map, 'iteritems') and skip is not None:
2408 if safehasattr(map, 'iteritems') and skip is not None:
2356 for f, s in map.iteritems():
2409 for f, s in map.iteritems():
2357 if s[0] != skip:
2410 if s[0] != skip:
2358 addpath(f)
2411 addpath(f)
2359 else:
2412 else:
2360 for f in map:
2413 for f in map:
2361 addpath(f)
2414 addpath(f)
2362
2415
2363 def addpath(self, path):
2416 def addpath(self, path):
2364 dirs = self._dirs
2417 dirs = self._dirs
2365 for base in finddirs(path):
2418 for base in finddirs(path):
2366 if base in dirs:
2419 if base in dirs:
2367 dirs[base] += 1
2420 dirs[base] += 1
2368 return
2421 return
2369 dirs[base] = 1
2422 dirs[base] = 1
2370
2423
2371 def delpath(self, path):
2424 def delpath(self, path):
2372 dirs = self._dirs
2425 dirs = self._dirs
2373 for base in finddirs(path):
2426 for base in finddirs(path):
2374 if dirs[base] > 1:
2427 if dirs[base] > 1:
2375 dirs[base] -= 1
2428 dirs[base] -= 1
2376 return
2429 return
2377 del dirs[base]
2430 del dirs[base]
2378
2431
2379 def __iter__(self):
2432 def __iter__(self):
2380 return self._dirs.iterkeys()
2433 return self._dirs.iterkeys()
2381
2434
2382 def __contains__(self, d):
2435 def __contains__(self, d):
2383 return d in self._dirs
2436 return d in self._dirs
2384
2437
2385 if safehasattr(parsers, 'dirs'):
2438 if safehasattr(parsers, 'dirs'):
2386 dirs = parsers.dirs
2439 dirs = parsers.dirs
2387
2440
2388 def finddirs(path):
2441 def finddirs(path):
2389 pos = path.rfind('/')
2442 pos = path.rfind('/')
2390 while pos != -1:
2443 while pos != -1:
2391 yield path[:pos]
2444 yield path[:pos]
2392 pos = path.rfind('/', 0, pos)
2445 pos = path.rfind('/', 0, pos)
2393
2446
2394 # compression utility
2447 # compression utility
2395
2448
2396 class nocompress(object):
2449 class nocompress(object):
2397 def compress(self, x):
2450 def compress(self, x):
2398 return x
2451 return x
2399 def flush(self):
2452 def flush(self):
2400 return ""
2453 return ""
2401
2454
2402 compressors = {
2455 compressors = {
2403 None: nocompress,
2456 None: nocompress,
2404 # lambda to prevent early import
2457 # lambda to prevent early import
2405 'BZ': lambda: bz2.BZ2Compressor(),
2458 'BZ': lambda: bz2.BZ2Compressor(),
2406 'GZ': lambda: zlib.compressobj(),
2459 'GZ': lambda: zlib.compressobj(),
2407 }
2460 }
2408 # also support the old form by courtesies
2461 # also support the old form by courtesies
2409 compressors['UN'] = compressors[None]
2462 compressors['UN'] = compressors[None]
2410
2463
2411 def _makedecompressor(decompcls):
2464 def _makedecompressor(decompcls):
2412 def generator(f):
2465 def generator(f):
2413 d = decompcls()
2466 d = decompcls()
2414 for chunk in filechunkiter(f):
2467 for chunk in filechunkiter(f):
2415 yield d.decompress(chunk)
2468 yield d.decompress(chunk)
2416 def func(fh):
2469 def func(fh):
2417 return chunkbuffer(generator(fh))
2470 return chunkbuffer(generator(fh))
2418 return func
2471 return func
2419
2472
2420 def _bz2():
2473 def _bz2():
2421 d = bz2.BZ2Decompressor()
2474 d = bz2.BZ2Decompressor()
2422 # Bzip2 stream start with BZ, but we stripped it.
2475 # Bzip2 stream start with BZ, but we stripped it.
2423 # we put it back for good measure.
2476 # we put it back for good measure.
2424 d.decompress('BZ')
2477 d.decompress('BZ')
2425 return d
2478 return d
2426
2479
2427 decompressors = {None: lambda fh: fh,
2480 decompressors = {None: lambda fh: fh,
2428 '_truncatedBZ': _makedecompressor(_bz2),
2481 '_truncatedBZ': _makedecompressor(_bz2),
2429 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2482 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2430 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2483 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2431 }
2484 }
2432 # also support the old form by courtesies
2485 # also support the old form by courtesies
2433 decompressors['UN'] = decompressors[None]
2486 decompressors['UN'] = decompressors[None]
2434
2487
2435 # convenient shortcut
2488 # convenient shortcut
2436 dst = debugstacktrace
2489 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now