##// END OF EJS Templates
copyfile: add an optional parameter to copy other stat data...
Siddharth Agarwal -
r27369:c48ecc0b stable
parent child Browse files
Show More
@@ -1,2477 +1,2484 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 import i18n
16 import i18n
17 _ = i18n._
17 _ = i18n._
18 import error, osutil, encoding, parsers
18 import error, osutil, encoding, parsers
19 import errno, shutil, sys, tempfile, traceback
19 import errno, shutil, sys, tempfile, traceback
20 import re as remod
20 import re as remod
21 import os, time, datetime, calendar, textwrap, signal, collections
21 import os, time, datetime, calendar, textwrap, signal, collections
22 import stat
22 import stat
23 import imp, socket, urllib
23 import imp, socket, urllib
24 import gc
24 import gc
25 import bz2
25 import bz2
26 import zlib
26 import zlib
27
27
28 if os.name == 'nt':
28 if os.name == 'nt':
29 import windows as platform
29 import windows as platform
30 else:
30 else:
31 import posix as platform
31 import posix as platform
32
32
33 cachestat = platform.cachestat
33 cachestat = platform.cachestat
34 checkexec = platform.checkexec
34 checkexec = platform.checkexec
35 checklink = platform.checklink
35 checklink = platform.checklink
36 copymode = platform.copymode
36 copymode = platform.copymode
37 executablepath = platform.executablepath
37 executablepath = platform.executablepath
38 expandglobs = platform.expandglobs
38 expandglobs = platform.expandglobs
39 explainexit = platform.explainexit
39 explainexit = platform.explainexit
40 findexe = platform.findexe
40 findexe = platform.findexe
41 gethgcmd = platform.gethgcmd
41 gethgcmd = platform.gethgcmd
42 getuser = platform.getuser
42 getuser = platform.getuser
43 groupmembers = platform.groupmembers
43 groupmembers = platform.groupmembers
44 groupname = platform.groupname
44 groupname = platform.groupname
45 hidewindow = platform.hidewindow
45 hidewindow = platform.hidewindow
46 isexec = platform.isexec
46 isexec = platform.isexec
47 isowner = platform.isowner
47 isowner = platform.isowner
48 localpath = platform.localpath
48 localpath = platform.localpath
49 lookupreg = platform.lookupreg
49 lookupreg = platform.lookupreg
50 makedir = platform.makedir
50 makedir = platform.makedir
51 nlinks = platform.nlinks
51 nlinks = platform.nlinks
52 normpath = platform.normpath
52 normpath = platform.normpath
53 normcase = platform.normcase
53 normcase = platform.normcase
54 normcasespec = platform.normcasespec
54 normcasespec = platform.normcasespec
55 normcasefallback = platform.normcasefallback
55 normcasefallback = platform.normcasefallback
56 openhardlinks = platform.openhardlinks
56 openhardlinks = platform.openhardlinks
57 oslink = platform.oslink
57 oslink = platform.oslink
58 parsepatchoutput = platform.parsepatchoutput
58 parsepatchoutput = platform.parsepatchoutput
59 pconvert = platform.pconvert
59 pconvert = platform.pconvert
60 poll = platform.poll
60 poll = platform.poll
61 popen = platform.popen
61 popen = platform.popen
62 posixfile = platform.posixfile
62 posixfile = platform.posixfile
63 quotecommand = platform.quotecommand
63 quotecommand = platform.quotecommand
64 readpipe = platform.readpipe
64 readpipe = platform.readpipe
65 rename = platform.rename
65 rename = platform.rename
66 removedirs = platform.removedirs
66 removedirs = platform.removedirs
67 samedevice = platform.samedevice
67 samedevice = platform.samedevice
68 samefile = platform.samefile
68 samefile = platform.samefile
69 samestat = platform.samestat
69 samestat = platform.samestat
70 setbinary = platform.setbinary
70 setbinary = platform.setbinary
71 setflags = platform.setflags
71 setflags = platform.setflags
72 setsignalhandler = platform.setsignalhandler
72 setsignalhandler = platform.setsignalhandler
73 shellquote = platform.shellquote
73 shellquote = platform.shellquote
74 spawndetached = platform.spawndetached
74 spawndetached = platform.spawndetached
75 split = platform.split
75 split = platform.split
76 sshargs = platform.sshargs
76 sshargs = platform.sshargs
77 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
77 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
78 statisexec = platform.statisexec
78 statisexec = platform.statisexec
79 statislink = platform.statislink
79 statislink = platform.statislink
80 termwidth = platform.termwidth
80 termwidth = platform.termwidth
81 testpid = platform.testpid
81 testpid = platform.testpid
82 umask = platform.umask
82 umask = platform.umask
83 unlink = platform.unlink
83 unlink = platform.unlink
84 unlinkpath = platform.unlinkpath
84 unlinkpath = platform.unlinkpath
85 username = platform.username
85 username = platform.username
86
86
87 # Python compatibility
87 # Python compatibility
88
88
89 _notset = object()
89 _notset = object()
90
90
91 def safehasattr(thing, attr):
91 def safehasattr(thing, attr):
92 return getattr(thing, attr, _notset) is not _notset
92 return getattr(thing, attr, _notset) is not _notset
93
93
94 def sha1(s=''):
94 def sha1(s=''):
95 '''
95 '''
96 Low-overhead wrapper around Python's SHA support
96 Low-overhead wrapper around Python's SHA support
97
97
98 >>> f = _fastsha1
98 >>> f = _fastsha1
99 >>> a = sha1()
99 >>> a = sha1()
100 >>> a = f()
100 >>> a = f()
101 >>> a.hexdigest()
101 >>> a.hexdigest()
102 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
102 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
103 '''
103 '''
104
104
105 return _fastsha1(s)
105 return _fastsha1(s)
106
106
107 def _fastsha1(s=''):
107 def _fastsha1(s=''):
108 # This function will import sha1 from hashlib or sha (whichever is
108 # This function will import sha1 from hashlib or sha (whichever is
109 # available) and overwrite itself with it on the first call.
109 # available) and overwrite itself with it on the first call.
110 # Subsequent calls will go directly to the imported function.
110 # Subsequent calls will go directly to the imported function.
111 if sys.version_info >= (2, 5):
111 if sys.version_info >= (2, 5):
112 from hashlib import sha1 as _sha1
112 from hashlib import sha1 as _sha1
113 else:
113 else:
114 from sha import sha as _sha1
114 from sha import sha as _sha1
115 global _fastsha1, sha1
115 global _fastsha1, sha1
116 _fastsha1 = sha1 = _sha1
116 _fastsha1 = sha1 = _sha1
117 return _sha1(s)
117 return _sha1(s)
118
118
119 def md5(s=''):
119 def md5(s=''):
120 try:
120 try:
121 from hashlib import md5 as _md5
121 from hashlib import md5 as _md5
122 except ImportError:
122 except ImportError:
123 from md5 import md5 as _md5
123 from md5 import md5 as _md5
124 global md5
124 global md5
125 md5 = _md5
125 md5 = _md5
126 return _md5(s)
126 return _md5(s)
127
127
128 DIGESTS = {
128 DIGESTS = {
129 'md5': md5,
129 'md5': md5,
130 'sha1': sha1,
130 'sha1': sha1,
131 }
131 }
132 # List of digest types from strongest to weakest
132 # List of digest types from strongest to weakest
133 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
133 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
134
134
135 try:
135 try:
136 import hashlib
136 import hashlib
137 DIGESTS.update({
137 DIGESTS.update({
138 'sha512': hashlib.sha512,
138 'sha512': hashlib.sha512,
139 })
139 })
140 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
140 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
141 except ImportError:
141 except ImportError:
142 pass
142 pass
143
143
144 for k in DIGESTS_BY_STRENGTH:
144 for k in DIGESTS_BY_STRENGTH:
145 assert k in DIGESTS
145 assert k in DIGESTS
146
146
147 class digester(object):
147 class digester(object):
148 """helper to compute digests.
148 """helper to compute digests.
149
149
150 This helper can be used to compute one or more digests given their name.
150 This helper can be used to compute one or more digests given their name.
151
151
152 >>> d = digester(['md5', 'sha1'])
152 >>> d = digester(['md5', 'sha1'])
153 >>> d.update('foo')
153 >>> d.update('foo')
154 >>> [k for k in sorted(d)]
154 >>> [k for k in sorted(d)]
155 ['md5', 'sha1']
155 ['md5', 'sha1']
156 >>> d['md5']
156 >>> d['md5']
157 'acbd18db4cc2f85cedef654fccc4a4d8'
157 'acbd18db4cc2f85cedef654fccc4a4d8'
158 >>> d['sha1']
158 >>> d['sha1']
159 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
159 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
160 >>> digester.preferred(['md5', 'sha1'])
160 >>> digester.preferred(['md5', 'sha1'])
161 'sha1'
161 'sha1'
162 """
162 """
163
163
164 def __init__(self, digests, s=''):
164 def __init__(self, digests, s=''):
165 self._hashes = {}
165 self._hashes = {}
166 for k in digests:
166 for k in digests:
167 if k not in DIGESTS:
167 if k not in DIGESTS:
168 raise Abort(_('unknown digest type: %s') % k)
168 raise Abort(_('unknown digest type: %s') % k)
169 self._hashes[k] = DIGESTS[k]()
169 self._hashes[k] = DIGESTS[k]()
170 if s:
170 if s:
171 self.update(s)
171 self.update(s)
172
172
173 def update(self, data):
173 def update(self, data):
174 for h in self._hashes.values():
174 for h in self._hashes.values():
175 h.update(data)
175 h.update(data)
176
176
177 def __getitem__(self, key):
177 def __getitem__(self, key):
178 if key not in DIGESTS:
178 if key not in DIGESTS:
179 raise Abort(_('unknown digest type: %s') % k)
179 raise Abort(_('unknown digest type: %s') % k)
180 return self._hashes[key].hexdigest()
180 return self._hashes[key].hexdigest()
181
181
182 def __iter__(self):
182 def __iter__(self):
183 return iter(self._hashes)
183 return iter(self._hashes)
184
184
185 @staticmethod
185 @staticmethod
186 def preferred(supported):
186 def preferred(supported):
187 """returns the strongest digest type in both supported and DIGESTS."""
187 """returns the strongest digest type in both supported and DIGESTS."""
188
188
189 for k in DIGESTS_BY_STRENGTH:
189 for k in DIGESTS_BY_STRENGTH:
190 if k in supported:
190 if k in supported:
191 return k
191 return k
192 return None
192 return None
193
193
194 class digestchecker(object):
194 class digestchecker(object):
195 """file handle wrapper that additionally checks content against a given
195 """file handle wrapper that additionally checks content against a given
196 size and digests.
196 size and digests.
197
197
198 d = digestchecker(fh, size, {'md5': '...'})
198 d = digestchecker(fh, size, {'md5': '...'})
199
199
200 When multiple digests are given, all of them are validated.
200 When multiple digests are given, all of them are validated.
201 """
201 """
202
202
203 def __init__(self, fh, size, digests):
203 def __init__(self, fh, size, digests):
204 self._fh = fh
204 self._fh = fh
205 self._size = size
205 self._size = size
206 self._got = 0
206 self._got = 0
207 self._digests = dict(digests)
207 self._digests = dict(digests)
208 self._digester = digester(self._digests.keys())
208 self._digester = digester(self._digests.keys())
209
209
210 def read(self, length=-1):
210 def read(self, length=-1):
211 content = self._fh.read(length)
211 content = self._fh.read(length)
212 self._digester.update(content)
212 self._digester.update(content)
213 self._got += len(content)
213 self._got += len(content)
214 return content
214 return content
215
215
216 def validate(self):
216 def validate(self):
217 if self._size != self._got:
217 if self._size != self._got:
218 raise Abort(_('size mismatch: expected %d, got %d') %
218 raise Abort(_('size mismatch: expected %d, got %d') %
219 (self._size, self._got))
219 (self._size, self._got))
220 for k, v in self._digests.items():
220 for k, v in self._digests.items():
221 if v != self._digester[k]:
221 if v != self._digester[k]:
222 # i18n: first parameter is a digest name
222 # i18n: first parameter is a digest name
223 raise Abort(_('%s mismatch: expected %s, got %s') %
223 raise Abort(_('%s mismatch: expected %s, got %s') %
224 (k, v, self._digester[k]))
224 (k, v, self._digester[k]))
225
225
226 try:
226 try:
227 buffer = buffer
227 buffer = buffer
228 except NameError:
228 except NameError:
229 if sys.version_info[0] < 3:
229 if sys.version_info[0] < 3:
230 def buffer(sliceable, offset=0):
230 def buffer(sliceable, offset=0):
231 return sliceable[offset:]
231 return sliceable[offset:]
232 else:
232 else:
233 def buffer(sliceable, offset=0):
233 def buffer(sliceable, offset=0):
234 return memoryview(sliceable)[offset:]
234 return memoryview(sliceable)[offset:]
235
235
236 import subprocess
236 import subprocess
237 closefds = os.name == 'posix'
237 closefds = os.name == 'posix'
238
238
239 _chunksize = 4096
239 _chunksize = 4096
240
240
241 class bufferedinputpipe(object):
241 class bufferedinputpipe(object):
242 """a manually buffered input pipe
242 """a manually buffered input pipe
243
243
244 Python will not let us use buffered IO and lazy reading with 'polling' at
244 Python will not let us use buffered IO and lazy reading with 'polling' at
245 the same time. We cannot probe the buffer state and select will not detect
245 the same time. We cannot probe the buffer state and select will not detect
246 that data are ready to read if they are already buffered.
246 that data are ready to read if they are already buffered.
247
247
248 This class let us work around that by implementing its own buffering
248 This class let us work around that by implementing its own buffering
249 (allowing efficient readline) while offering a way to know if the buffer is
249 (allowing efficient readline) while offering a way to know if the buffer is
250 empty from the output (allowing collaboration of the buffer with polling).
250 empty from the output (allowing collaboration of the buffer with polling).
251
251
252 This class lives in the 'util' module because it makes use of the 'os'
252 This class lives in the 'util' module because it makes use of the 'os'
253 module from the python stdlib.
253 module from the python stdlib.
254 """
254 """
255
255
256 def __init__(self, input):
256 def __init__(self, input):
257 self._input = input
257 self._input = input
258 self._buffer = []
258 self._buffer = []
259 self._eof = False
259 self._eof = False
260 self._lenbuf = 0
260 self._lenbuf = 0
261
261
262 @property
262 @property
263 def hasbuffer(self):
263 def hasbuffer(self):
264 """True is any data is currently buffered
264 """True is any data is currently buffered
265
265
266 This will be used externally a pre-step for polling IO. If there is
266 This will be used externally a pre-step for polling IO. If there is
267 already data then no polling should be set in place."""
267 already data then no polling should be set in place."""
268 return bool(self._buffer)
268 return bool(self._buffer)
269
269
270 @property
270 @property
271 def closed(self):
271 def closed(self):
272 return self._input.closed
272 return self._input.closed
273
273
274 def fileno(self):
274 def fileno(self):
275 return self._input.fileno()
275 return self._input.fileno()
276
276
277 def close(self):
277 def close(self):
278 return self._input.close()
278 return self._input.close()
279
279
280 def read(self, size):
280 def read(self, size):
281 while (not self._eof) and (self._lenbuf < size):
281 while (not self._eof) and (self._lenbuf < size):
282 self._fillbuffer()
282 self._fillbuffer()
283 return self._frombuffer(size)
283 return self._frombuffer(size)
284
284
285 def readline(self, *args, **kwargs):
285 def readline(self, *args, **kwargs):
286 if 1 < len(self._buffer):
286 if 1 < len(self._buffer):
287 # this should not happen because both read and readline end with a
287 # this should not happen because both read and readline end with a
288 # _frombuffer call that collapse it.
288 # _frombuffer call that collapse it.
289 self._buffer = [''.join(self._buffer)]
289 self._buffer = [''.join(self._buffer)]
290 self._lenbuf = len(self._buffer[0])
290 self._lenbuf = len(self._buffer[0])
291 lfi = -1
291 lfi = -1
292 if self._buffer:
292 if self._buffer:
293 lfi = self._buffer[-1].find('\n')
293 lfi = self._buffer[-1].find('\n')
294 while (not self._eof) and lfi < 0:
294 while (not self._eof) and lfi < 0:
295 self._fillbuffer()
295 self._fillbuffer()
296 if self._buffer:
296 if self._buffer:
297 lfi = self._buffer[-1].find('\n')
297 lfi = self._buffer[-1].find('\n')
298 size = lfi + 1
298 size = lfi + 1
299 if lfi < 0: # end of file
299 if lfi < 0: # end of file
300 size = self._lenbuf
300 size = self._lenbuf
301 elif 1 < len(self._buffer):
301 elif 1 < len(self._buffer):
302 # we need to take previous chunks into account
302 # we need to take previous chunks into account
303 size += self._lenbuf - len(self._buffer[-1])
303 size += self._lenbuf - len(self._buffer[-1])
304 return self._frombuffer(size)
304 return self._frombuffer(size)
305
305
306 def _frombuffer(self, size):
306 def _frombuffer(self, size):
307 """return at most 'size' data from the buffer
307 """return at most 'size' data from the buffer
308
308
309 The data are removed from the buffer."""
309 The data are removed from the buffer."""
310 if size == 0 or not self._buffer:
310 if size == 0 or not self._buffer:
311 return ''
311 return ''
312 buf = self._buffer[0]
312 buf = self._buffer[0]
313 if 1 < len(self._buffer):
313 if 1 < len(self._buffer):
314 buf = ''.join(self._buffer)
314 buf = ''.join(self._buffer)
315
315
316 data = buf[:size]
316 data = buf[:size]
317 buf = buf[len(data):]
317 buf = buf[len(data):]
318 if buf:
318 if buf:
319 self._buffer = [buf]
319 self._buffer = [buf]
320 self._lenbuf = len(buf)
320 self._lenbuf = len(buf)
321 else:
321 else:
322 self._buffer = []
322 self._buffer = []
323 self._lenbuf = 0
323 self._lenbuf = 0
324 return data
324 return data
325
325
326 def _fillbuffer(self):
326 def _fillbuffer(self):
327 """read data to the buffer"""
327 """read data to the buffer"""
328 data = os.read(self._input.fileno(), _chunksize)
328 data = os.read(self._input.fileno(), _chunksize)
329 if not data:
329 if not data:
330 self._eof = True
330 self._eof = True
331 else:
331 else:
332 self._lenbuf += len(data)
332 self._lenbuf += len(data)
333 self._buffer.append(data)
333 self._buffer.append(data)
334
334
335 def popen2(cmd, env=None, newlines=False):
335 def popen2(cmd, env=None, newlines=False):
336 # Setting bufsize to -1 lets the system decide the buffer size.
336 # Setting bufsize to -1 lets the system decide the buffer size.
337 # The default for bufsize is 0, meaning unbuffered. This leads to
337 # The default for bufsize is 0, meaning unbuffered. This leads to
338 # poor performance on Mac OS X: http://bugs.python.org/issue4194
338 # poor performance on Mac OS X: http://bugs.python.org/issue4194
339 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
339 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
340 close_fds=closefds,
340 close_fds=closefds,
341 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
341 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
342 universal_newlines=newlines,
342 universal_newlines=newlines,
343 env=env)
343 env=env)
344 return p.stdin, p.stdout
344 return p.stdin, p.stdout
345
345
346 def popen3(cmd, env=None, newlines=False):
346 def popen3(cmd, env=None, newlines=False):
347 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
347 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
348 return stdin, stdout, stderr
348 return stdin, stdout, stderr
349
349
350 def popen4(cmd, env=None, newlines=False, bufsize=-1):
350 def popen4(cmd, env=None, newlines=False, bufsize=-1):
351 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
351 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
352 close_fds=closefds,
352 close_fds=closefds,
353 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
353 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
354 stderr=subprocess.PIPE,
354 stderr=subprocess.PIPE,
355 universal_newlines=newlines,
355 universal_newlines=newlines,
356 env=env)
356 env=env)
357 return p.stdin, p.stdout, p.stderr, p
357 return p.stdin, p.stdout, p.stderr, p
358
358
359 def version():
359 def version():
360 """Return version information if available."""
360 """Return version information if available."""
361 try:
361 try:
362 import __version__
362 import __version__
363 return __version__.version
363 return __version__.version
364 except ImportError:
364 except ImportError:
365 return 'unknown'
365 return 'unknown'
366
366
367 # used by parsedate
367 # used by parsedate
368 defaultdateformats = (
368 defaultdateformats = (
369 '%Y-%m-%d %H:%M:%S',
369 '%Y-%m-%d %H:%M:%S',
370 '%Y-%m-%d %I:%M:%S%p',
370 '%Y-%m-%d %I:%M:%S%p',
371 '%Y-%m-%d %H:%M',
371 '%Y-%m-%d %H:%M',
372 '%Y-%m-%d %I:%M%p',
372 '%Y-%m-%d %I:%M%p',
373 '%Y-%m-%d',
373 '%Y-%m-%d',
374 '%m-%d',
374 '%m-%d',
375 '%m/%d',
375 '%m/%d',
376 '%m/%d/%y',
376 '%m/%d/%y',
377 '%m/%d/%Y',
377 '%m/%d/%Y',
378 '%a %b %d %H:%M:%S %Y',
378 '%a %b %d %H:%M:%S %Y',
379 '%a %b %d %I:%M:%S%p %Y',
379 '%a %b %d %I:%M:%S%p %Y',
380 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
380 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
381 '%b %d %H:%M:%S %Y',
381 '%b %d %H:%M:%S %Y',
382 '%b %d %I:%M:%S%p %Y',
382 '%b %d %I:%M:%S%p %Y',
383 '%b %d %H:%M:%S',
383 '%b %d %H:%M:%S',
384 '%b %d %I:%M:%S%p',
384 '%b %d %I:%M:%S%p',
385 '%b %d %H:%M',
385 '%b %d %H:%M',
386 '%b %d %I:%M%p',
386 '%b %d %I:%M%p',
387 '%b %d %Y',
387 '%b %d %Y',
388 '%b %d',
388 '%b %d',
389 '%H:%M:%S',
389 '%H:%M:%S',
390 '%I:%M:%S%p',
390 '%I:%M:%S%p',
391 '%H:%M',
391 '%H:%M',
392 '%I:%M%p',
392 '%I:%M%p',
393 )
393 )
394
394
395 extendeddateformats = defaultdateformats + (
395 extendeddateformats = defaultdateformats + (
396 "%Y",
396 "%Y",
397 "%Y-%m",
397 "%Y-%m",
398 "%b",
398 "%b",
399 "%b %Y",
399 "%b %Y",
400 )
400 )
401
401
402 def cachefunc(func):
402 def cachefunc(func):
403 '''cache the result of function calls'''
403 '''cache the result of function calls'''
404 # XXX doesn't handle keywords args
404 # XXX doesn't handle keywords args
405 if func.func_code.co_argcount == 0:
405 if func.func_code.co_argcount == 0:
406 cache = []
406 cache = []
407 def f():
407 def f():
408 if len(cache) == 0:
408 if len(cache) == 0:
409 cache.append(func())
409 cache.append(func())
410 return cache[0]
410 return cache[0]
411 return f
411 return f
412 cache = {}
412 cache = {}
413 if func.func_code.co_argcount == 1:
413 if func.func_code.co_argcount == 1:
414 # we gain a small amount of time because
414 # we gain a small amount of time because
415 # we don't need to pack/unpack the list
415 # we don't need to pack/unpack the list
416 def f(arg):
416 def f(arg):
417 if arg not in cache:
417 if arg not in cache:
418 cache[arg] = func(arg)
418 cache[arg] = func(arg)
419 return cache[arg]
419 return cache[arg]
420 else:
420 else:
421 def f(*args):
421 def f(*args):
422 if args not in cache:
422 if args not in cache:
423 cache[args] = func(*args)
423 cache[args] = func(*args)
424 return cache[args]
424 return cache[args]
425
425
426 return f
426 return f
427
427
428 class sortdict(dict):
428 class sortdict(dict):
429 '''a simple sorted dictionary'''
429 '''a simple sorted dictionary'''
430 def __init__(self, data=None):
430 def __init__(self, data=None):
431 self._list = []
431 self._list = []
432 if data:
432 if data:
433 self.update(data)
433 self.update(data)
434 def copy(self):
434 def copy(self):
435 return sortdict(self)
435 return sortdict(self)
436 def __setitem__(self, key, val):
436 def __setitem__(self, key, val):
437 if key in self:
437 if key in self:
438 self._list.remove(key)
438 self._list.remove(key)
439 self._list.append(key)
439 self._list.append(key)
440 dict.__setitem__(self, key, val)
440 dict.__setitem__(self, key, val)
441 def __iter__(self):
441 def __iter__(self):
442 return self._list.__iter__()
442 return self._list.__iter__()
443 def update(self, src):
443 def update(self, src):
444 if isinstance(src, dict):
444 if isinstance(src, dict):
445 src = src.iteritems()
445 src = src.iteritems()
446 for k, v in src:
446 for k, v in src:
447 self[k] = v
447 self[k] = v
448 def clear(self):
448 def clear(self):
449 dict.clear(self)
449 dict.clear(self)
450 self._list = []
450 self._list = []
451 def items(self):
451 def items(self):
452 return [(k, self[k]) for k in self._list]
452 return [(k, self[k]) for k in self._list]
453 def __delitem__(self, key):
453 def __delitem__(self, key):
454 dict.__delitem__(self, key)
454 dict.__delitem__(self, key)
455 self._list.remove(key)
455 self._list.remove(key)
456 def pop(self, key, *args, **kwargs):
456 def pop(self, key, *args, **kwargs):
457 dict.pop(self, key, *args, **kwargs)
457 dict.pop(self, key, *args, **kwargs)
458 try:
458 try:
459 self._list.remove(key)
459 self._list.remove(key)
460 except ValueError:
460 except ValueError:
461 pass
461 pass
462 def keys(self):
462 def keys(self):
463 return self._list
463 return self._list
464 def iterkeys(self):
464 def iterkeys(self):
465 return self._list.__iter__()
465 return self._list.__iter__()
466 def iteritems(self):
466 def iteritems(self):
467 for k in self._list:
467 for k in self._list:
468 yield k, self[k]
468 yield k, self[k]
469 def insert(self, index, key, val):
469 def insert(self, index, key, val):
470 self._list.insert(index, key)
470 self._list.insert(index, key)
471 dict.__setitem__(self, key, val)
471 dict.__setitem__(self, key, val)
472
472
473 class lrucachedict(object):
473 class lrucachedict(object):
474 '''cache most recent gets from or sets to this dictionary'''
474 '''cache most recent gets from or sets to this dictionary'''
475 def __init__(self, maxsize):
475 def __init__(self, maxsize):
476 self._cache = {}
476 self._cache = {}
477 self._maxsize = maxsize
477 self._maxsize = maxsize
478 self._order = collections.deque()
478 self._order = collections.deque()
479
479
480 def __getitem__(self, key):
480 def __getitem__(self, key):
481 value = self._cache[key]
481 value = self._cache[key]
482 self._order.remove(key)
482 self._order.remove(key)
483 self._order.append(key)
483 self._order.append(key)
484 return value
484 return value
485
485
486 def __setitem__(self, key, value):
486 def __setitem__(self, key, value):
487 if key not in self._cache:
487 if key not in self._cache:
488 if len(self._cache) >= self._maxsize:
488 if len(self._cache) >= self._maxsize:
489 del self._cache[self._order.popleft()]
489 del self._cache[self._order.popleft()]
490 else:
490 else:
491 self._order.remove(key)
491 self._order.remove(key)
492 self._cache[key] = value
492 self._cache[key] = value
493 self._order.append(key)
493 self._order.append(key)
494
494
495 def __contains__(self, key):
495 def __contains__(self, key):
496 return key in self._cache
496 return key in self._cache
497
497
498 def clear(self):
498 def clear(self):
499 self._cache.clear()
499 self._cache.clear()
500 self._order = collections.deque()
500 self._order = collections.deque()
501
501
502 def lrucachefunc(func):
502 def lrucachefunc(func):
503 '''cache most recent results of function calls'''
503 '''cache most recent results of function calls'''
504 cache = {}
504 cache = {}
505 order = collections.deque()
505 order = collections.deque()
506 if func.func_code.co_argcount == 1:
506 if func.func_code.co_argcount == 1:
507 def f(arg):
507 def f(arg):
508 if arg not in cache:
508 if arg not in cache:
509 if len(cache) > 20:
509 if len(cache) > 20:
510 del cache[order.popleft()]
510 del cache[order.popleft()]
511 cache[arg] = func(arg)
511 cache[arg] = func(arg)
512 else:
512 else:
513 order.remove(arg)
513 order.remove(arg)
514 order.append(arg)
514 order.append(arg)
515 return cache[arg]
515 return cache[arg]
516 else:
516 else:
517 def f(*args):
517 def f(*args):
518 if args not in cache:
518 if args not in cache:
519 if len(cache) > 20:
519 if len(cache) > 20:
520 del cache[order.popleft()]
520 del cache[order.popleft()]
521 cache[args] = func(*args)
521 cache[args] = func(*args)
522 else:
522 else:
523 order.remove(args)
523 order.remove(args)
524 order.append(args)
524 order.append(args)
525 return cache[args]
525 return cache[args]
526
526
527 return f
527 return f
528
528
529 class propertycache(object):
529 class propertycache(object):
530 def __init__(self, func):
530 def __init__(self, func):
531 self.func = func
531 self.func = func
532 self.name = func.__name__
532 self.name = func.__name__
533 def __get__(self, obj, type=None):
533 def __get__(self, obj, type=None):
534 result = self.func(obj)
534 result = self.func(obj)
535 self.cachevalue(obj, result)
535 self.cachevalue(obj, result)
536 return result
536 return result
537
537
538 def cachevalue(self, obj, value):
538 def cachevalue(self, obj, value):
539 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
539 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
540 obj.__dict__[self.name] = value
540 obj.__dict__[self.name] = value
541
541
542 def pipefilter(s, cmd):
542 def pipefilter(s, cmd):
543 '''filter string S through command CMD, returning its output'''
543 '''filter string S through command CMD, returning its output'''
544 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
544 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
545 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
545 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
546 pout, perr = p.communicate(s)
546 pout, perr = p.communicate(s)
547 return pout
547 return pout
548
548
549 def tempfilter(s, cmd):
549 def tempfilter(s, cmd):
550 '''filter string S through a pair of temporary files with CMD.
550 '''filter string S through a pair of temporary files with CMD.
551 CMD is used as a template to create the real command to be run,
551 CMD is used as a template to create the real command to be run,
552 with the strings INFILE and OUTFILE replaced by the real names of
552 with the strings INFILE and OUTFILE replaced by the real names of
553 the temporary files generated.'''
553 the temporary files generated.'''
554 inname, outname = None, None
554 inname, outname = None, None
555 try:
555 try:
556 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
556 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
557 fp = os.fdopen(infd, 'wb')
557 fp = os.fdopen(infd, 'wb')
558 fp.write(s)
558 fp.write(s)
559 fp.close()
559 fp.close()
560 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
560 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
561 os.close(outfd)
561 os.close(outfd)
562 cmd = cmd.replace('INFILE', inname)
562 cmd = cmd.replace('INFILE', inname)
563 cmd = cmd.replace('OUTFILE', outname)
563 cmd = cmd.replace('OUTFILE', outname)
564 code = os.system(cmd)
564 code = os.system(cmd)
565 if sys.platform == 'OpenVMS' and code & 1:
565 if sys.platform == 'OpenVMS' and code & 1:
566 code = 0
566 code = 0
567 if code:
567 if code:
568 raise Abort(_("command '%s' failed: %s") %
568 raise Abort(_("command '%s' failed: %s") %
569 (cmd, explainexit(code)))
569 (cmd, explainexit(code)))
570 fp = open(outname, 'rb')
570 fp = open(outname, 'rb')
571 r = fp.read()
571 r = fp.read()
572 fp.close()
572 fp.close()
573 return r
573 return r
574 finally:
574 finally:
575 try:
575 try:
576 if inname:
576 if inname:
577 os.unlink(inname)
577 os.unlink(inname)
578 except OSError:
578 except OSError:
579 pass
579 pass
580 try:
580 try:
581 if outname:
581 if outname:
582 os.unlink(outname)
582 os.unlink(outname)
583 except OSError:
583 except OSError:
584 pass
584 pass
585
585
586 filtertable = {
586 filtertable = {
587 'tempfile:': tempfilter,
587 'tempfile:': tempfilter,
588 'pipe:': pipefilter,
588 'pipe:': pipefilter,
589 }
589 }
590
590
591 def filter(s, cmd):
591 def filter(s, cmd):
592 "filter a string through a command that transforms its input to its output"
592 "filter a string through a command that transforms its input to its output"
593 for name, fn in filtertable.iteritems():
593 for name, fn in filtertable.iteritems():
594 if cmd.startswith(name):
594 if cmd.startswith(name):
595 return fn(s, cmd[len(name):].lstrip())
595 return fn(s, cmd[len(name):].lstrip())
596 return pipefilter(s, cmd)
596 return pipefilter(s, cmd)
597
597
598 def binary(s):
598 def binary(s):
599 """return true if a string is binary data"""
599 """return true if a string is binary data"""
600 return bool(s and '\0' in s)
600 return bool(s and '\0' in s)
601
601
602 def increasingchunks(source, min=1024, max=65536):
602 def increasingchunks(source, min=1024, max=65536):
603 '''return no less than min bytes per chunk while data remains,
603 '''return no less than min bytes per chunk while data remains,
604 doubling min after each chunk until it reaches max'''
604 doubling min after each chunk until it reaches max'''
605 def log2(x):
605 def log2(x):
606 if not x:
606 if not x:
607 return 0
607 return 0
608 i = 0
608 i = 0
609 while x:
609 while x:
610 x >>= 1
610 x >>= 1
611 i += 1
611 i += 1
612 return i - 1
612 return i - 1
613
613
614 buf = []
614 buf = []
615 blen = 0
615 blen = 0
616 for chunk in source:
616 for chunk in source:
617 buf.append(chunk)
617 buf.append(chunk)
618 blen += len(chunk)
618 blen += len(chunk)
619 if blen >= min:
619 if blen >= min:
620 if min < max:
620 if min < max:
621 min = min << 1
621 min = min << 1
622 nmin = 1 << log2(blen)
622 nmin = 1 << log2(blen)
623 if nmin > min:
623 if nmin > min:
624 min = nmin
624 min = nmin
625 if min > max:
625 if min > max:
626 min = max
626 min = max
627 yield ''.join(buf)
627 yield ''.join(buf)
628 blen = 0
628 blen = 0
629 buf = []
629 buf = []
630 if buf:
630 if buf:
631 yield ''.join(buf)
631 yield ''.join(buf)
632
632
633 Abort = error.Abort
633 Abort = error.Abort
634
634
635 def always(fn):
635 def always(fn):
636 return True
636 return True
637
637
638 def never(fn):
638 def never(fn):
639 return False
639 return False
640
640
641 def nogc(func):
641 def nogc(func):
642 """disable garbage collector
642 """disable garbage collector
643
643
644 Python's garbage collector triggers a GC each time a certain number of
644 Python's garbage collector triggers a GC each time a certain number of
645 container objects (the number being defined by gc.get_threshold()) are
645 container objects (the number being defined by gc.get_threshold()) are
646 allocated even when marked not to be tracked by the collector. Tracking has
646 allocated even when marked not to be tracked by the collector. Tracking has
647 no effect on when GCs are triggered, only on what objects the GC looks
647 no effect on when GCs are triggered, only on what objects the GC looks
648 into. As a workaround, disable GC while building complex (huge)
648 into. As a workaround, disable GC while building complex (huge)
649 containers.
649 containers.
650
650
651 This garbage collector issue have been fixed in 2.7.
651 This garbage collector issue have been fixed in 2.7.
652 """
652 """
653 def wrapper(*args, **kwargs):
653 def wrapper(*args, **kwargs):
654 gcenabled = gc.isenabled()
654 gcenabled = gc.isenabled()
655 gc.disable()
655 gc.disable()
656 try:
656 try:
657 return func(*args, **kwargs)
657 return func(*args, **kwargs)
658 finally:
658 finally:
659 if gcenabled:
659 if gcenabled:
660 gc.enable()
660 gc.enable()
661 return wrapper
661 return wrapper
662
662
663 def pathto(root, n1, n2):
663 def pathto(root, n1, n2):
664 '''return the relative path from one place to another.
664 '''return the relative path from one place to another.
665 root should use os.sep to separate directories
665 root should use os.sep to separate directories
666 n1 should use os.sep to separate directories
666 n1 should use os.sep to separate directories
667 n2 should use "/" to separate directories
667 n2 should use "/" to separate directories
668 returns an os.sep-separated path.
668 returns an os.sep-separated path.
669
669
670 If n1 is a relative path, it's assumed it's
670 If n1 is a relative path, it's assumed it's
671 relative to root.
671 relative to root.
672 n2 should always be relative to root.
672 n2 should always be relative to root.
673 '''
673 '''
674 if not n1:
674 if not n1:
675 return localpath(n2)
675 return localpath(n2)
676 if os.path.isabs(n1):
676 if os.path.isabs(n1):
677 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
677 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
678 return os.path.join(root, localpath(n2))
678 return os.path.join(root, localpath(n2))
679 n2 = '/'.join((pconvert(root), n2))
679 n2 = '/'.join((pconvert(root), n2))
680 a, b = splitpath(n1), n2.split('/')
680 a, b = splitpath(n1), n2.split('/')
681 a.reverse()
681 a.reverse()
682 b.reverse()
682 b.reverse()
683 while a and b and a[-1] == b[-1]:
683 while a and b and a[-1] == b[-1]:
684 a.pop()
684 a.pop()
685 b.pop()
685 b.pop()
686 b.reverse()
686 b.reverse()
687 return os.sep.join((['..'] * len(a)) + b) or '.'
687 return os.sep.join((['..'] * len(a)) + b) or '.'
688
688
689 def mainfrozen():
689 def mainfrozen():
690 """return True if we are a frozen executable.
690 """return True if we are a frozen executable.
691
691
692 The code supports py2exe (most common, Windows only) and tools/freeze
692 The code supports py2exe (most common, Windows only) and tools/freeze
693 (portable, not much used).
693 (portable, not much used).
694 """
694 """
695 return (safehasattr(sys, "frozen") or # new py2exe
695 return (safehasattr(sys, "frozen") or # new py2exe
696 safehasattr(sys, "importers") or # old py2exe
696 safehasattr(sys, "importers") or # old py2exe
697 imp.is_frozen("__main__")) # tools/freeze
697 imp.is_frozen("__main__")) # tools/freeze
698
698
699 # the location of data files matching the source code
699 # the location of data files matching the source code
700 if mainfrozen():
700 if mainfrozen():
701 # executable version (py2exe) doesn't support __file__
701 # executable version (py2exe) doesn't support __file__
702 datapath = os.path.dirname(sys.executable)
702 datapath = os.path.dirname(sys.executable)
703 else:
703 else:
704 datapath = os.path.dirname(__file__)
704 datapath = os.path.dirname(__file__)
705
705
706 i18n.setdatapath(datapath)
706 i18n.setdatapath(datapath)
707
707
708 _hgexecutable = None
708 _hgexecutable = None
709
709
710 def hgexecutable():
710 def hgexecutable():
711 """return location of the 'hg' executable.
711 """return location of the 'hg' executable.
712
712
713 Defaults to $HG or 'hg' in the search path.
713 Defaults to $HG or 'hg' in the search path.
714 """
714 """
715 if _hgexecutable is None:
715 if _hgexecutable is None:
716 hg = os.environ.get('HG')
716 hg = os.environ.get('HG')
717 mainmod = sys.modules['__main__']
717 mainmod = sys.modules['__main__']
718 if hg:
718 if hg:
719 _sethgexecutable(hg)
719 _sethgexecutable(hg)
720 elif mainfrozen():
720 elif mainfrozen():
721 _sethgexecutable(sys.executable)
721 _sethgexecutable(sys.executable)
722 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
722 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
723 _sethgexecutable(mainmod.__file__)
723 _sethgexecutable(mainmod.__file__)
724 else:
724 else:
725 exe = findexe('hg') or os.path.basename(sys.argv[0])
725 exe = findexe('hg') or os.path.basename(sys.argv[0])
726 _sethgexecutable(exe)
726 _sethgexecutable(exe)
727 return _hgexecutable
727 return _hgexecutable
728
728
729 def _sethgexecutable(path):
729 def _sethgexecutable(path):
730 """set location of the 'hg' executable"""
730 """set location of the 'hg' executable"""
731 global _hgexecutable
731 global _hgexecutable
732 _hgexecutable = path
732 _hgexecutable = path
733
733
734 def _isstdout(f):
734 def _isstdout(f):
735 fileno = getattr(f, 'fileno', None)
735 fileno = getattr(f, 'fileno', None)
736 return fileno and fileno() == sys.__stdout__.fileno()
736 return fileno and fileno() == sys.__stdout__.fileno()
737
737
738 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
738 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
739 '''enhanced shell command execution.
739 '''enhanced shell command execution.
740 run with environment maybe modified, maybe in different dir.
740 run with environment maybe modified, maybe in different dir.
741
741
742 if command fails and onerr is None, return status, else raise onerr
742 if command fails and onerr is None, return status, else raise onerr
743 object as exception.
743 object as exception.
744
744
745 if out is specified, it is assumed to be a file-like object that has a
745 if out is specified, it is assumed to be a file-like object that has a
746 write() method. stdout and stderr will be redirected to out.'''
746 write() method. stdout and stderr will be redirected to out.'''
747 if environ is None:
747 if environ is None:
748 environ = {}
748 environ = {}
749 try:
749 try:
750 sys.stdout.flush()
750 sys.stdout.flush()
751 except Exception:
751 except Exception:
752 pass
752 pass
753 def py2shell(val):
753 def py2shell(val):
754 'convert python object into string that is useful to shell'
754 'convert python object into string that is useful to shell'
755 if val is None or val is False:
755 if val is None or val is False:
756 return '0'
756 return '0'
757 if val is True:
757 if val is True:
758 return '1'
758 return '1'
759 return str(val)
759 return str(val)
760 origcmd = cmd
760 origcmd = cmd
761 cmd = quotecommand(cmd)
761 cmd = quotecommand(cmd)
762 if sys.platform == 'plan9' and (sys.version_info[0] == 2
762 if sys.platform == 'plan9' and (sys.version_info[0] == 2
763 and sys.version_info[1] < 7):
763 and sys.version_info[1] < 7):
764 # subprocess kludge to work around issues in half-baked Python
764 # subprocess kludge to work around issues in half-baked Python
765 # ports, notably bichued/python:
765 # ports, notably bichued/python:
766 if not cwd is None:
766 if not cwd is None:
767 os.chdir(cwd)
767 os.chdir(cwd)
768 rc = os.system(cmd)
768 rc = os.system(cmd)
769 else:
769 else:
770 env = dict(os.environ)
770 env = dict(os.environ)
771 env.update((k, py2shell(v)) for k, v in environ.iteritems())
771 env.update((k, py2shell(v)) for k, v in environ.iteritems())
772 env['HG'] = hgexecutable()
772 env['HG'] = hgexecutable()
773 if out is None or _isstdout(out):
773 if out is None or _isstdout(out):
774 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
774 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
775 env=env, cwd=cwd)
775 env=env, cwd=cwd)
776 else:
776 else:
777 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
777 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
778 env=env, cwd=cwd, stdout=subprocess.PIPE,
778 env=env, cwd=cwd, stdout=subprocess.PIPE,
779 stderr=subprocess.STDOUT)
779 stderr=subprocess.STDOUT)
780 while True:
780 while True:
781 line = proc.stdout.readline()
781 line = proc.stdout.readline()
782 if not line:
782 if not line:
783 break
783 break
784 out.write(line)
784 out.write(line)
785 proc.wait()
785 proc.wait()
786 rc = proc.returncode
786 rc = proc.returncode
787 if sys.platform == 'OpenVMS' and rc & 1:
787 if sys.platform == 'OpenVMS' and rc & 1:
788 rc = 0
788 rc = 0
789 if rc and onerr:
789 if rc and onerr:
790 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
790 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
791 explainexit(rc)[0])
791 explainexit(rc)[0])
792 if errprefix:
792 if errprefix:
793 errmsg = '%s: %s' % (errprefix, errmsg)
793 errmsg = '%s: %s' % (errprefix, errmsg)
794 raise onerr(errmsg)
794 raise onerr(errmsg)
795 return rc
795 return rc
796
796
797 def checksignature(func):
797 def checksignature(func):
798 '''wrap a function with code to check for calling errors'''
798 '''wrap a function with code to check for calling errors'''
799 def check(*args, **kwargs):
799 def check(*args, **kwargs):
800 try:
800 try:
801 return func(*args, **kwargs)
801 return func(*args, **kwargs)
802 except TypeError:
802 except TypeError:
803 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
803 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
804 raise error.SignatureError
804 raise error.SignatureError
805 raise
805 raise
806
806
807 return check
807 return check
808
808
809 def copyfile(src, dest, hardlink=False):
809 def copyfile(src, dest, hardlink=False, copystat=False):
810 "copy a file, preserving mode and atime/mtime"
810 '''copy a file, preserving mode and optionally other stat info like
811 atime/mtime'''
811 if os.path.lexists(dest):
812 if os.path.lexists(dest):
812 unlink(dest)
813 unlink(dest)
813 # hardlinks are problematic on CIFS, quietly ignore this flag
814 # hardlinks are problematic on CIFS, quietly ignore this flag
814 # until we find a way to work around it cleanly (issue4546)
815 # until we find a way to work around it cleanly (issue4546)
815 if False and hardlink:
816 if False and hardlink:
816 try:
817 try:
817 oslink(src, dest)
818 oslink(src, dest)
818 return
819 return
819 except (IOError, OSError):
820 except (IOError, OSError):
820 pass # fall back to normal copy
821 pass # fall back to normal copy
821 if os.path.islink(src):
822 if os.path.islink(src):
822 os.symlink(os.readlink(src), dest)
823 os.symlink(os.readlink(src), dest)
824 # copytime is ignored for symlinks, but in general copytime isn't needed
825 # for them anyway
823 else:
826 else:
824 try:
827 try:
825 shutil.copyfile(src, dest)
828 shutil.copyfile(src, dest)
829 if copystat:
830 # copystat also copies mode
831 shutil.copystat(src, dest)
832 else:
826 shutil.copymode(src, dest)
833 shutil.copymode(src, dest)
827 except shutil.Error as inst:
834 except shutil.Error as inst:
828 raise Abort(str(inst))
835 raise Abort(str(inst))
829
836
830 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
837 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
831 """Copy a directory tree using hardlinks if possible."""
838 """Copy a directory tree using hardlinks if possible."""
832 num = 0
839 num = 0
833
840
834 if hardlink is None:
841 if hardlink is None:
835 hardlink = (os.stat(src).st_dev ==
842 hardlink = (os.stat(src).st_dev ==
836 os.stat(os.path.dirname(dst)).st_dev)
843 os.stat(os.path.dirname(dst)).st_dev)
837 if hardlink:
844 if hardlink:
838 topic = _('linking')
845 topic = _('linking')
839 else:
846 else:
840 topic = _('copying')
847 topic = _('copying')
841
848
842 if os.path.isdir(src):
849 if os.path.isdir(src):
843 os.mkdir(dst)
850 os.mkdir(dst)
844 for name, kind in osutil.listdir(src):
851 for name, kind in osutil.listdir(src):
845 srcname = os.path.join(src, name)
852 srcname = os.path.join(src, name)
846 dstname = os.path.join(dst, name)
853 dstname = os.path.join(dst, name)
847 def nprog(t, pos):
854 def nprog(t, pos):
848 if pos is not None:
855 if pos is not None:
849 return progress(t, pos + num)
856 return progress(t, pos + num)
850 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
857 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
851 num += n
858 num += n
852 else:
859 else:
853 if hardlink:
860 if hardlink:
854 try:
861 try:
855 oslink(src, dst)
862 oslink(src, dst)
856 except (IOError, OSError):
863 except (IOError, OSError):
857 hardlink = False
864 hardlink = False
858 shutil.copy(src, dst)
865 shutil.copy(src, dst)
859 else:
866 else:
860 shutil.copy(src, dst)
867 shutil.copy(src, dst)
861 num += 1
868 num += 1
862 progress(topic, num)
869 progress(topic, num)
863 progress(topic, None)
870 progress(topic, None)
864
871
865 return hardlink, num
872 return hardlink, num
866
873
867 _winreservednames = '''con prn aux nul
874 _winreservednames = '''con prn aux nul
868 com1 com2 com3 com4 com5 com6 com7 com8 com9
875 com1 com2 com3 com4 com5 com6 com7 com8 com9
869 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
876 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
870 _winreservedchars = ':*?"<>|'
877 _winreservedchars = ':*?"<>|'
871 def checkwinfilename(path):
878 def checkwinfilename(path):
872 r'''Check that the base-relative path is a valid filename on Windows.
879 r'''Check that the base-relative path is a valid filename on Windows.
873 Returns None if the path is ok, or a UI string describing the problem.
880 Returns None if the path is ok, or a UI string describing the problem.
874
881
875 >>> checkwinfilename("just/a/normal/path")
882 >>> checkwinfilename("just/a/normal/path")
876 >>> checkwinfilename("foo/bar/con.xml")
883 >>> checkwinfilename("foo/bar/con.xml")
877 "filename contains 'con', which is reserved on Windows"
884 "filename contains 'con', which is reserved on Windows"
878 >>> checkwinfilename("foo/con.xml/bar")
885 >>> checkwinfilename("foo/con.xml/bar")
879 "filename contains 'con', which is reserved on Windows"
886 "filename contains 'con', which is reserved on Windows"
880 >>> checkwinfilename("foo/bar/xml.con")
887 >>> checkwinfilename("foo/bar/xml.con")
881 >>> checkwinfilename("foo/bar/AUX/bla.txt")
888 >>> checkwinfilename("foo/bar/AUX/bla.txt")
882 "filename contains 'AUX', which is reserved on Windows"
889 "filename contains 'AUX', which is reserved on Windows"
883 >>> checkwinfilename("foo/bar/bla:.txt")
890 >>> checkwinfilename("foo/bar/bla:.txt")
884 "filename contains ':', which is reserved on Windows"
891 "filename contains ':', which is reserved on Windows"
885 >>> checkwinfilename("foo/bar/b\07la.txt")
892 >>> checkwinfilename("foo/bar/b\07la.txt")
886 "filename contains '\\x07', which is invalid on Windows"
893 "filename contains '\\x07', which is invalid on Windows"
887 >>> checkwinfilename("foo/bar/bla ")
894 >>> checkwinfilename("foo/bar/bla ")
888 "filename ends with ' ', which is not allowed on Windows"
895 "filename ends with ' ', which is not allowed on Windows"
889 >>> checkwinfilename("../bar")
896 >>> checkwinfilename("../bar")
890 >>> checkwinfilename("foo\\")
897 >>> checkwinfilename("foo\\")
891 "filename ends with '\\', which is invalid on Windows"
898 "filename ends with '\\', which is invalid on Windows"
892 >>> checkwinfilename("foo\\/bar")
899 >>> checkwinfilename("foo\\/bar")
893 "directory name ends with '\\', which is invalid on Windows"
900 "directory name ends with '\\', which is invalid on Windows"
894 '''
901 '''
895 if path.endswith('\\'):
902 if path.endswith('\\'):
896 return _("filename ends with '\\', which is invalid on Windows")
903 return _("filename ends with '\\', which is invalid on Windows")
897 if '\\/' in path:
904 if '\\/' in path:
898 return _("directory name ends with '\\', which is invalid on Windows")
905 return _("directory name ends with '\\', which is invalid on Windows")
899 for n in path.replace('\\', '/').split('/'):
906 for n in path.replace('\\', '/').split('/'):
900 if not n:
907 if not n:
901 continue
908 continue
902 for c in n:
909 for c in n:
903 if c in _winreservedchars:
910 if c in _winreservedchars:
904 return _("filename contains '%s', which is reserved "
911 return _("filename contains '%s', which is reserved "
905 "on Windows") % c
912 "on Windows") % c
906 if ord(c) <= 31:
913 if ord(c) <= 31:
907 return _("filename contains %r, which is invalid "
914 return _("filename contains %r, which is invalid "
908 "on Windows") % c
915 "on Windows") % c
909 base = n.split('.')[0]
916 base = n.split('.')[0]
910 if base and base.lower() in _winreservednames:
917 if base and base.lower() in _winreservednames:
911 return _("filename contains '%s', which is reserved "
918 return _("filename contains '%s', which is reserved "
912 "on Windows") % base
919 "on Windows") % base
913 t = n[-1]
920 t = n[-1]
914 if t in '. ' and n not in '..':
921 if t in '. ' and n not in '..':
915 return _("filename ends with '%s', which is not allowed "
922 return _("filename ends with '%s', which is not allowed "
916 "on Windows") % t
923 "on Windows") % t
917
924
918 if os.name == 'nt':
925 if os.name == 'nt':
919 checkosfilename = checkwinfilename
926 checkosfilename = checkwinfilename
920 else:
927 else:
921 checkosfilename = platform.checkosfilename
928 checkosfilename = platform.checkosfilename
922
929
923 def makelock(info, pathname):
930 def makelock(info, pathname):
924 try:
931 try:
925 return os.symlink(info, pathname)
932 return os.symlink(info, pathname)
926 except OSError as why:
933 except OSError as why:
927 if why.errno == errno.EEXIST:
934 if why.errno == errno.EEXIST:
928 raise
935 raise
929 except AttributeError: # no symlink in os
936 except AttributeError: # no symlink in os
930 pass
937 pass
931
938
932 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
939 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
933 os.write(ld, info)
940 os.write(ld, info)
934 os.close(ld)
941 os.close(ld)
935
942
936 def readlock(pathname):
943 def readlock(pathname):
937 try:
944 try:
938 return os.readlink(pathname)
945 return os.readlink(pathname)
939 except OSError as why:
946 except OSError as why:
940 if why.errno not in (errno.EINVAL, errno.ENOSYS):
947 if why.errno not in (errno.EINVAL, errno.ENOSYS):
941 raise
948 raise
942 except AttributeError: # no symlink in os
949 except AttributeError: # no symlink in os
943 pass
950 pass
944 fp = posixfile(pathname)
951 fp = posixfile(pathname)
945 r = fp.read()
952 r = fp.read()
946 fp.close()
953 fp.close()
947 return r
954 return r
948
955
949 def fstat(fp):
956 def fstat(fp):
950 '''stat file object that may not have fileno method.'''
957 '''stat file object that may not have fileno method.'''
951 try:
958 try:
952 return os.fstat(fp.fileno())
959 return os.fstat(fp.fileno())
953 except AttributeError:
960 except AttributeError:
954 return os.stat(fp.name)
961 return os.stat(fp.name)
955
962
956 def statmtimesec(st):
963 def statmtimesec(st):
957 """Get mtime as integer of seconds
964 """Get mtime as integer of seconds
958
965
959 'int(st.st_mtime)' cannot be used because st.st_mtime is computed as
966 'int(st.st_mtime)' cannot be used because st.st_mtime is computed as
960 'sec + 1e-9 * nsec' and double-precision floating-point type is too narrow
967 'sec + 1e-9 * nsec' and double-precision floating-point type is too narrow
961 to represent nanoseconds. If 'nsec' is close to 1 sec, 'int(st.st_mtime)'
968 to represent nanoseconds. If 'nsec' is close to 1 sec, 'int(st.st_mtime)'
962 can be 'sec + 1'. (issue4836)
969 can be 'sec + 1'. (issue4836)
963 """
970 """
964 try:
971 try:
965 return st[stat.ST_MTIME]
972 return st[stat.ST_MTIME]
966 except (TypeError, IndexError):
973 except (TypeError, IndexError):
967 # osutil.stat doesn't allow index access and its st_mtime is int
974 # osutil.stat doesn't allow index access and its st_mtime is int
968 return st.st_mtime
975 return st.st_mtime
969
976
970 # File system features
977 # File system features
971
978
972 def checkcase(path):
979 def checkcase(path):
973 """
980 """
974 Return true if the given path is on a case-sensitive filesystem
981 Return true if the given path is on a case-sensitive filesystem
975
982
976 Requires a path (like /foo/.hg) ending with a foldable final
983 Requires a path (like /foo/.hg) ending with a foldable final
977 directory component.
984 directory component.
978 """
985 """
979 s1 = os.lstat(path)
986 s1 = os.lstat(path)
980 d, b = os.path.split(path)
987 d, b = os.path.split(path)
981 b2 = b.upper()
988 b2 = b.upper()
982 if b == b2:
989 if b == b2:
983 b2 = b.lower()
990 b2 = b.lower()
984 if b == b2:
991 if b == b2:
985 return True # no evidence against case sensitivity
992 return True # no evidence against case sensitivity
986 p2 = os.path.join(d, b2)
993 p2 = os.path.join(d, b2)
987 try:
994 try:
988 s2 = os.lstat(p2)
995 s2 = os.lstat(p2)
989 if s2 == s1:
996 if s2 == s1:
990 return False
997 return False
991 return True
998 return True
992 except OSError:
999 except OSError:
993 return True
1000 return True
994
1001
995 try:
1002 try:
996 import re2
1003 import re2
997 _re2 = None
1004 _re2 = None
998 except ImportError:
1005 except ImportError:
999 _re2 = False
1006 _re2 = False
1000
1007
1001 class _re(object):
1008 class _re(object):
1002 def _checkre2(self):
1009 def _checkre2(self):
1003 global _re2
1010 global _re2
1004 try:
1011 try:
1005 # check if match works, see issue3964
1012 # check if match works, see issue3964
1006 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1013 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1007 except ImportError:
1014 except ImportError:
1008 _re2 = False
1015 _re2 = False
1009
1016
1010 def compile(self, pat, flags=0):
1017 def compile(self, pat, flags=0):
1011 '''Compile a regular expression, using re2 if possible
1018 '''Compile a regular expression, using re2 if possible
1012
1019
1013 For best performance, use only re2-compatible regexp features. The
1020 For best performance, use only re2-compatible regexp features. The
1014 only flags from the re module that are re2-compatible are
1021 only flags from the re module that are re2-compatible are
1015 IGNORECASE and MULTILINE.'''
1022 IGNORECASE and MULTILINE.'''
1016 if _re2 is None:
1023 if _re2 is None:
1017 self._checkre2()
1024 self._checkre2()
1018 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1025 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1019 if flags & remod.IGNORECASE:
1026 if flags & remod.IGNORECASE:
1020 pat = '(?i)' + pat
1027 pat = '(?i)' + pat
1021 if flags & remod.MULTILINE:
1028 if flags & remod.MULTILINE:
1022 pat = '(?m)' + pat
1029 pat = '(?m)' + pat
1023 try:
1030 try:
1024 return re2.compile(pat)
1031 return re2.compile(pat)
1025 except re2.error:
1032 except re2.error:
1026 pass
1033 pass
1027 return remod.compile(pat, flags)
1034 return remod.compile(pat, flags)
1028
1035
1029 @propertycache
1036 @propertycache
1030 def escape(self):
1037 def escape(self):
1031 '''Return the version of escape corresponding to self.compile.
1038 '''Return the version of escape corresponding to self.compile.
1032
1039
1033 This is imperfect because whether re2 or re is used for a particular
1040 This is imperfect because whether re2 or re is used for a particular
1034 function depends on the flags, etc, but it's the best we can do.
1041 function depends on the flags, etc, but it's the best we can do.
1035 '''
1042 '''
1036 global _re2
1043 global _re2
1037 if _re2 is None:
1044 if _re2 is None:
1038 self._checkre2()
1045 self._checkre2()
1039 if _re2:
1046 if _re2:
1040 return re2.escape
1047 return re2.escape
1041 else:
1048 else:
1042 return remod.escape
1049 return remod.escape
1043
1050
1044 re = _re()
1051 re = _re()
1045
1052
1046 _fspathcache = {}
1053 _fspathcache = {}
1047 def fspath(name, root):
1054 def fspath(name, root):
1048 '''Get name in the case stored in the filesystem
1055 '''Get name in the case stored in the filesystem
1049
1056
1050 The name should be relative to root, and be normcase-ed for efficiency.
1057 The name should be relative to root, and be normcase-ed for efficiency.
1051
1058
1052 Note that this function is unnecessary, and should not be
1059 Note that this function is unnecessary, and should not be
1053 called, for case-sensitive filesystems (simply because it's expensive).
1060 called, for case-sensitive filesystems (simply because it's expensive).
1054
1061
1055 The root should be normcase-ed, too.
1062 The root should be normcase-ed, too.
1056 '''
1063 '''
1057 def _makefspathcacheentry(dir):
1064 def _makefspathcacheentry(dir):
1058 return dict((normcase(n), n) for n in os.listdir(dir))
1065 return dict((normcase(n), n) for n in os.listdir(dir))
1059
1066
1060 seps = os.sep
1067 seps = os.sep
1061 if os.altsep:
1068 if os.altsep:
1062 seps = seps + os.altsep
1069 seps = seps + os.altsep
1063 # Protect backslashes. This gets silly very quickly.
1070 # Protect backslashes. This gets silly very quickly.
1064 seps.replace('\\','\\\\')
1071 seps.replace('\\','\\\\')
1065 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1072 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1066 dir = os.path.normpath(root)
1073 dir = os.path.normpath(root)
1067 result = []
1074 result = []
1068 for part, sep in pattern.findall(name):
1075 for part, sep in pattern.findall(name):
1069 if sep:
1076 if sep:
1070 result.append(sep)
1077 result.append(sep)
1071 continue
1078 continue
1072
1079
1073 if dir not in _fspathcache:
1080 if dir not in _fspathcache:
1074 _fspathcache[dir] = _makefspathcacheentry(dir)
1081 _fspathcache[dir] = _makefspathcacheentry(dir)
1075 contents = _fspathcache[dir]
1082 contents = _fspathcache[dir]
1076
1083
1077 found = contents.get(part)
1084 found = contents.get(part)
1078 if not found:
1085 if not found:
1079 # retry "once per directory" per "dirstate.walk" which
1086 # retry "once per directory" per "dirstate.walk" which
1080 # may take place for each patches of "hg qpush", for example
1087 # may take place for each patches of "hg qpush", for example
1081 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1088 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1082 found = contents.get(part)
1089 found = contents.get(part)
1083
1090
1084 result.append(found or part)
1091 result.append(found or part)
1085 dir = os.path.join(dir, part)
1092 dir = os.path.join(dir, part)
1086
1093
1087 return ''.join(result)
1094 return ''.join(result)
1088
1095
1089 def checknlink(testfile):
1096 def checknlink(testfile):
1090 '''check whether hardlink count reporting works properly'''
1097 '''check whether hardlink count reporting works properly'''
1091
1098
1092 # testfile may be open, so we need a separate file for checking to
1099 # testfile may be open, so we need a separate file for checking to
1093 # work around issue2543 (or testfile may get lost on Samba shares)
1100 # work around issue2543 (or testfile may get lost on Samba shares)
1094 f1 = testfile + ".hgtmp1"
1101 f1 = testfile + ".hgtmp1"
1095 if os.path.lexists(f1):
1102 if os.path.lexists(f1):
1096 return False
1103 return False
1097 try:
1104 try:
1098 posixfile(f1, 'w').close()
1105 posixfile(f1, 'w').close()
1099 except IOError:
1106 except IOError:
1100 return False
1107 return False
1101
1108
1102 f2 = testfile + ".hgtmp2"
1109 f2 = testfile + ".hgtmp2"
1103 fd = None
1110 fd = None
1104 try:
1111 try:
1105 oslink(f1, f2)
1112 oslink(f1, f2)
1106 # nlinks() may behave differently for files on Windows shares if
1113 # nlinks() may behave differently for files on Windows shares if
1107 # the file is open.
1114 # the file is open.
1108 fd = posixfile(f2)
1115 fd = posixfile(f2)
1109 return nlinks(f2) > 1
1116 return nlinks(f2) > 1
1110 except OSError:
1117 except OSError:
1111 return False
1118 return False
1112 finally:
1119 finally:
1113 if fd is not None:
1120 if fd is not None:
1114 fd.close()
1121 fd.close()
1115 for f in (f1, f2):
1122 for f in (f1, f2):
1116 try:
1123 try:
1117 os.unlink(f)
1124 os.unlink(f)
1118 except OSError:
1125 except OSError:
1119 pass
1126 pass
1120
1127
1121 def endswithsep(path):
1128 def endswithsep(path):
1122 '''Check path ends with os.sep or os.altsep.'''
1129 '''Check path ends with os.sep or os.altsep.'''
1123 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1130 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1124
1131
1125 def splitpath(path):
1132 def splitpath(path):
1126 '''Split path by os.sep.
1133 '''Split path by os.sep.
1127 Note that this function does not use os.altsep because this is
1134 Note that this function does not use os.altsep because this is
1128 an alternative of simple "xxx.split(os.sep)".
1135 an alternative of simple "xxx.split(os.sep)".
1129 It is recommended to use os.path.normpath() before using this
1136 It is recommended to use os.path.normpath() before using this
1130 function if need.'''
1137 function if need.'''
1131 return path.split(os.sep)
1138 return path.split(os.sep)
1132
1139
1133 def gui():
1140 def gui():
1134 '''Are we running in a GUI?'''
1141 '''Are we running in a GUI?'''
1135 if sys.platform == 'darwin':
1142 if sys.platform == 'darwin':
1136 if 'SSH_CONNECTION' in os.environ:
1143 if 'SSH_CONNECTION' in os.environ:
1137 # handle SSH access to a box where the user is logged in
1144 # handle SSH access to a box where the user is logged in
1138 return False
1145 return False
1139 elif getattr(osutil, 'isgui', None):
1146 elif getattr(osutil, 'isgui', None):
1140 # check if a CoreGraphics session is available
1147 # check if a CoreGraphics session is available
1141 return osutil.isgui()
1148 return osutil.isgui()
1142 else:
1149 else:
1143 # pure build; use a safe default
1150 # pure build; use a safe default
1144 return True
1151 return True
1145 else:
1152 else:
1146 return os.name == "nt" or os.environ.get("DISPLAY")
1153 return os.name == "nt" or os.environ.get("DISPLAY")
1147
1154
1148 def mktempcopy(name, emptyok=False, createmode=None):
1155 def mktempcopy(name, emptyok=False, createmode=None):
1149 """Create a temporary file with the same contents from name
1156 """Create a temporary file with the same contents from name
1150
1157
1151 The permission bits are copied from the original file.
1158 The permission bits are copied from the original file.
1152
1159
1153 If the temporary file is going to be truncated immediately, you
1160 If the temporary file is going to be truncated immediately, you
1154 can use emptyok=True as an optimization.
1161 can use emptyok=True as an optimization.
1155
1162
1156 Returns the name of the temporary file.
1163 Returns the name of the temporary file.
1157 """
1164 """
1158 d, fn = os.path.split(name)
1165 d, fn = os.path.split(name)
1159 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1166 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1160 os.close(fd)
1167 os.close(fd)
1161 # Temporary files are created with mode 0600, which is usually not
1168 # Temporary files are created with mode 0600, which is usually not
1162 # what we want. If the original file already exists, just copy
1169 # what we want. If the original file already exists, just copy
1163 # its mode. Otherwise, manually obey umask.
1170 # its mode. Otherwise, manually obey umask.
1164 copymode(name, temp, createmode)
1171 copymode(name, temp, createmode)
1165 if emptyok:
1172 if emptyok:
1166 return temp
1173 return temp
1167 try:
1174 try:
1168 try:
1175 try:
1169 ifp = posixfile(name, "rb")
1176 ifp = posixfile(name, "rb")
1170 except IOError as inst:
1177 except IOError as inst:
1171 if inst.errno == errno.ENOENT:
1178 if inst.errno == errno.ENOENT:
1172 return temp
1179 return temp
1173 if not getattr(inst, 'filename', None):
1180 if not getattr(inst, 'filename', None):
1174 inst.filename = name
1181 inst.filename = name
1175 raise
1182 raise
1176 ofp = posixfile(temp, "wb")
1183 ofp = posixfile(temp, "wb")
1177 for chunk in filechunkiter(ifp):
1184 for chunk in filechunkiter(ifp):
1178 ofp.write(chunk)
1185 ofp.write(chunk)
1179 ifp.close()
1186 ifp.close()
1180 ofp.close()
1187 ofp.close()
1181 except: # re-raises
1188 except: # re-raises
1182 try: os.unlink(temp)
1189 try: os.unlink(temp)
1183 except OSError: pass
1190 except OSError: pass
1184 raise
1191 raise
1185 return temp
1192 return temp
1186
1193
1187 class atomictempfile(object):
1194 class atomictempfile(object):
1188 '''writable file object that atomically updates a file
1195 '''writable file object that atomically updates a file
1189
1196
1190 All writes will go to a temporary copy of the original file. Call
1197 All writes will go to a temporary copy of the original file. Call
1191 close() when you are done writing, and atomictempfile will rename
1198 close() when you are done writing, and atomictempfile will rename
1192 the temporary copy to the original name, making the changes
1199 the temporary copy to the original name, making the changes
1193 visible. If the object is destroyed without being closed, all your
1200 visible. If the object is destroyed without being closed, all your
1194 writes are discarded.
1201 writes are discarded.
1195 '''
1202 '''
1196 def __init__(self, name, mode='w+b', createmode=None):
1203 def __init__(self, name, mode='w+b', createmode=None):
1197 self.__name = name # permanent name
1204 self.__name = name # permanent name
1198 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1205 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1199 createmode=createmode)
1206 createmode=createmode)
1200 self._fp = posixfile(self._tempname, mode)
1207 self._fp = posixfile(self._tempname, mode)
1201
1208
1202 # delegated methods
1209 # delegated methods
1203 self.write = self._fp.write
1210 self.write = self._fp.write
1204 self.seek = self._fp.seek
1211 self.seek = self._fp.seek
1205 self.tell = self._fp.tell
1212 self.tell = self._fp.tell
1206 self.fileno = self._fp.fileno
1213 self.fileno = self._fp.fileno
1207
1214
1208 def close(self):
1215 def close(self):
1209 if not self._fp.closed:
1216 if not self._fp.closed:
1210 self._fp.close()
1217 self._fp.close()
1211 rename(self._tempname, localpath(self.__name))
1218 rename(self._tempname, localpath(self.__name))
1212
1219
1213 def discard(self):
1220 def discard(self):
1214 if not self._fp.closed:
1221 if not self._fp.closed:
1215 try:
1222 try:
1216 os.unlink(self._tempname)
1223 os.unlink(self._tempname)
1217 except OSError:
1224 except OSError:
1218 pass
1225 pass
1219 self._fp.close()
1226 self._fp.close()
1220
1227
1221 def __del__(self):
1228 def __del__(self):
1222 if safehasattr(self, '_fp'): # constructor actually did something
1229 if safehasattr(self, '_fp'): # constructor actually did something
1223 self.discard()
1230 self.discard()
1224
1231
1225 def makedirs(name, mode=None, notindexed=False):
1232 def makedirs(name, mode=None, notindexed=False):
1226 """recursive directory creation with parent mode inheritance"""
1233 """recursive directory creation with parent mode inheritance"""
1227 try:
1234 try:
1228 makedir(name, notindexed)
1235 makedir(name, notindexed)
1229 except OSError as err:
1236 except OSError as err:
1230 if err.errno == errno.EEXIST:
1237 if err.errno == errno.EEXIST:
1231 return
1238 return
1232 if err.errno != errno.ENOENT or not name:
1239 if err.errno != errno.ENOENT or not name:
1233 raise
1240 raise
1234 parent = os.path.dirname(os.path.abspath(name))
1241 parent = os.path.dirname(os.path.abspath(name))
1235 if parent == name:
1242 if parent == name:
1236 raise
1243 raise
1237 makedirs(parent, mode, notindexed)
1244 makedirs(parent, mode, notindexed)
1238 makedir(name, notindexed)
1245 makedir(name, notindexed)
1239 if mode is not None:
1246 if mode is not None:
1240 os.chmod(name, mode)
1247 os.chmod(name, mode)
1241
1248
1242 def ensuredirs(name, mode=None, notindexed=False):
1249 def ensuredirs(name, mode=None, notindexed=False):
1243 """race-safe recursive directory creation
1250 """race-safe recursive directory creation
1244
1251
1245 Newly created directories are marked as "not to be indexed by
1252 Newly created directories are marked as "not to be indexed by
1246 the content indexing service", if ``notindexed`` is specified
1253 the content indexing service", if ``notindexed`` is specified
1247 for "write" mode access.
1254 for "write" mode access.
1248 """
1255 """
1249 if os.path.isdir(name):
1256 if os.path.isdir(name):
1250 return
1257 return
1251 parent = os.path.dirname(os.path.abspath(name))
1258 parent = os.path.dirname(os.path.abspath(name))
1252 if parent != name:
1259 if parent != name:
1253 ensuredirs(parent, mode, notindexed)
1260 ensuredirs(parent, mode, notindexed)
1254 try:
1261 try:
1255 makedir(name, notindexed)
1262 makedir(name, notindexed)
1256 except OSError as err:
1263 except OSError as err:
1257 if err.errno == errno.EEXIST and os.path.isdir(name):
1264 if err.errno == errno.EEXIST and os.path.isdir(name):
1258 # someone else seems to have won a directory creation race
1265 # someone else seems to have won a directory creation race
1259 return
1266 return
1260 raise
1267 raise
1261 if mode is not None:
1268 if mode is not None:
1262 os.chmod(name, mode)
1269 os.chmod(name, mode)
1263
1270
1264 def readfile(path):
1271 def readfile(path):
1265 fp = open(path, 'rb')
1272 fp = open(path, 'rb')
1266 try:
1273 try:
1267 return fp.read()
1274 return fp.read()
1268 finally:
1275 finally:
1269 fp.close()
1276 fp.close()
1270
1277
1271 def writefile(path, text):
1278 def writefile(path, text):
1272 fp = open(path, 'wb')
1279 fp = open(path, 'wb')
1273 try:
1280 try:
1274 fp.write(text)
1281 fp.write(text)
1275 finally:
1282 finally:
1276 fp.close()
1283 fp.close()
1277
1284
1278 def appendfile(path, text):
1285 def appendfile(path, text):
1279 fp = open(path, 'ab')
1286 fp = open(path, 'ab')
1280 try:
1287 try:
1281 fp.write(text)
1288 fp.write(text)
1282 finally:
1289 finally:
1283 fp.close()
1290 fp.close()
1284
1291
1285 class chunkbuffer(object):
1292 class chunkbuffer(object):
1286 """Allow arbitrary sized chunks of data to be efficiently read from an
1293 """Allow arbitrary sized chunks of data to be efficiently read from an
1287 iterator over chunks of arbitrary size."""
1294 iterator over chunks of arbitrary size."""
1288
1295
1289 def __init__(self, in_iter):
1296 def __init__(self, in_iter):
1290 """in_iter is the iterator that's iterating over the input chunks.
1297 """in_iter is the iterator that's iterating over the input chunks.
1291 targetsize is how big a buffer to try to maintain."""
1298 targetsize is how big a buffer to try to maintain."""
1292 def splitbig(chunks):
1299 def splitbig(chunks):
1293 for chunk in chunks:
1300 for chunk in chunks:
1294 if len(chunk) > 2**20:
1301 if len(chunk) > 2**20:
1295 pos = 0
1302 pos = 0
1296 while pos < len(chunk):
1303 while pos < len(chunk):
1297 end = pos + 2 ** 18
1304 end = pos + 2 ** 18
1298 yield chunk[pos:end]
1305 yield chunk[pos:end]
1299 pos = end
1306 pos = end
1300 else:
1307 else:
1301 yield chunk
1308 yield chunk
1302 self.iter = splitbig(in_iter)
1309 self.iter = splitbig(in_iter)
1303 self._queue = collections.deque()
1310 self._queue = collections.deque()
1304 self._chunkoffset = 0
1311 self._chunkoffset = 0
1305
1312
1306 def read(self, l=None):
1313 def read(self, l=None):
1307 """Read L bytes of data from the iterator of chunks of data.
1314 """Read L bytes of data from the iterator of chunks of data.
1308 Returns less than L bytes if the iterator runs dry.
1315 Returns less than L bytes if the iterator runs dry.
1309
1316
1310 If size parameter is omitted, read everything"""
1317 If size parameter is omitted, read everything"""
1311 if l is None:
1318 if l is None:
1312 return ''.join(self.iter)
1319 return ''.join(self.iter)
1313
1320
1314 left = l
1321 left = l
1315 buf = []
1322 buf = []
1316 queue = self._queue
1323 queue = self._queue
1317 while left > 0:
1324 while left > 0:
1318 # refill the queue
1325 # refill the queue
1319 if not queue:
1326 if not queue:
1320 target = 2**18
1327 target = 2**18
1321 for chunk in self.iter:
1328 for chunk in self.iter:
1322 queue.append(chunk)
1329 queue.append(chunk)
1323 target -= len(chunk)
1330 target -= len(chunk)
1324 if target <= 0:
1331 if target <= 0:
1325 break
1332 break
1326 if not queue:
1333 if not queue:
1327 break
1334 break
1328
1335
1329 # The easy way to do this would be to queue.popleft(), modify the
1336 # The easy way to do this would be to queue.popleft(), modify the
1330 # chunk (if necessary), then queue.appendleft(). However, for cases
1337 # chunk (if necessary), then queue.appendleft(). However, for cases
1331 # where we read partial chunk content, this incurs 2 dequeue
1338 # where we read partial chunk content, this incurs 2 dequeue
1332 # mutations and creates a new str for the remaining chunk in the
1339 # mutations and creates a new str for the remaining chunk in the
1333 # queue. Our code below avoids this overhead.
1340 # queue. Our code below avoids this overhead.
1334
1341
1335 chunk = queue[0]
1342 chunk = queue[0]
1336 chunkl = len(chunk)
1343 chunkl = len(chunk)
1337 offset = self._chunkoffset
1344 offset = self._chunkoffset
1338
1345
1339 # Use full chunk.
1346 # Use full chunk.
1340 if offset == 0 and left >= chunkl:
1347 if offset == 0 and left >= chunkl:
1341 left -= chunkl
1348 left -= chunkl
1342 queue.popleft()
1349 queue.popleft()
1343 buf.append(chunk)
1350 buf.append(chunk)
1344 # self._chunkoffset remains at 0.
1351 # self._chunkoffset remains at 0.
1345 continue
1352 continue
1346
1353
1347 chunkremaining = chunkl - offset
1354 chunkremaining = chunkl - offset
1348
1355
1349 # Use all of unconsumed part of chunk.
1356 # Use all of unconsumed part of chunk.
1350 if left >= chunkremaining:
1357 if left >= chunkremaining:
1351 left -= chunkremaining
1358 left -= chunkremaining
1352 queue.popleft()
1359 queue.popleft()
1353 # offset == 0 is enabled by block above, so this won't merely
1360 # offset == 0 is enabled by block above, so this won't merely
1354 # copy via ``chunk[0:]``.
1361 # copy via ``chunk[0:]``.
1355 buf.append(chunk[offset:])
1362 buf.append(chunk[offset:])
1356 self._chunkoffset = 0
1363 self._chunkoffset = 0
1357
1364
1358 # Partial chunk needed.
1365 # Partial chunk needed.
1359 else:
1366 else:
1360 buf.append(chunk[offset:offset + left])
1367 buf.append(chunk[offset:offset + left])
1361 self._chunkoffset += left
1368 self._chunkoffset += left
1362 left -= chunkremaining
1369 left -= chunkremaining
1363
1370
1364 return ''.join(buf)
1371 return ''.join(buf)
1365
1372
1366 def filechunkiter(f, size=65536, limit=None):
1373 def filechunkiter(f, size=65536, limit=None):
1367 """Create a generator that produces the data in the file size
1374 """Create a generator that produces the data in the file size
1368 (default 65536) bytes at a time, up to optional limit (default is
1375 (default 65536) bytes at a time, up to optional limit (default is
1369 to read all data). Chunks may be less than size bytes if the
1376 to read all data). Chunks may be less than size bytes if the
1370 chunk is the last chunk in the file, or the file is a socket or
1377 chunk is the last chunk in the file, or the file is a socket or
1371 some other type of file that sometimes reads less data than is
1378 some other type of file that sometimes reads less data than is
1372 requested."""
1379 requested."""
1373 assert size >= 0
1380 assert size >= 0
1374 assert limit is None or limit >= 0
1381 assert limit is None or limit >= 0
1375 while True:
1382 while True:
1376 if limit is None:
1383 if limit is None:
1377 nbytes = size
1384 nbytes = size
1378 else:
1385 else:
1379 nbytes = min(limit, size)
1386 nbytes = min(limit, size)
1380 s = nbytes and f.read(nbytes)
1387 s = nbytes and f.read(nbytes)
1381 if not s:
1388 if not s:
1382 break
1389 break
1383 if limit:
1390 if limit:
1384 limit -= len(s)
1391 limit -= len(s)
1385 yield s
1392 yield s
1386
1393
1387 def makedate(timestamp=None):
1394 def makedate(timestamp=None):
1388 '''Return a unix timestamp (or the current time) as a (unixtime,
1395 '''Return a unix timestamp (or the current time) as a (unixtime,
1389 offset) tuple based off the local timezone.'''
1396 offset) tuple based off the local timezone.'''
1390 if timestamp is None:
1397 if timestamp is None:
1391 timestamp = time.time()
1398 timestamp = time.time()
1392 if timestamp < 0:
1399 if timestamp < 0:
1393 hint = _("check your clock")
1400 hint = _("check your clock")
1394 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1401 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1395 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1402 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1396 datetime.datetime.fromtimestamp(timestamp))
1403 datetime.datetime.fromtimestamp(timestamp))
1397 tz = delta.days * 86400 + delta.seconds
1404 tz = delta.days * 86400 + delta.seconds
1398 return timestamp, tz
1405 return timestamp, tz
1399
1406
1400 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1407 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1401 """represent a (unixtime, offset) tuple as a localized time.
1408 """represent a (unixtime, offset) tuple as a localized time.
1402 unixtime is seconds since the epoch, and offset is the time zone's
1409 unixtime is seconds since the epoch, and offset is the time zone's
1403 number of seconds away from UTC. if timezone is false, do not
1410 number of seconds away from UTC. if timezone is false, do not
1404 append time zone to string."""
1411 append time zone to string."""
1405 t, tz = date or makedate()
1412 t, tz = date or makedate()
1406 if t < 0:
1413 if t < 0:
1407 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1414 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1408 tz = 0
1415 tz = 0
1409 if "%1" in format or "%2" in format or "%z" in format:
1416 if "%1" in format or "%2" in format or "%z" in format:
1410 sign = (tz > 0) and "-" or "+"
1417 sign = (tz > 0) and "-" or "+"
1411 minutes = abs(tz) // 60
1418 minutes = abs(tz) // 60
1412 format = format.replace("%z", "%1%2")
1419 format = format.replace("%z", "%1%2")
1413 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1420 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1414 format = format.replace("%2", "%02d" % (minutes % 60))
1421 format = format.replace("%2", "%02d" % (minutes % 60))
1415 try:
1422 try:
1416 t = time.gmtime(float(t) - tz)
1423 t = time.gmtime(float(t) - tz)
1417 except ValueError:
1424 except ValueError:
1418 # time was out of range
1425 # time was out of range
1419 t = time.gmtime(sys.maxint)
1426 t = time.gmtime(sys.maxint)
1420 s = time.strftime(format, t)
1427 s = time.strftime(format, t)
1421 return s
1428 return s
1422
1429
1423 def shortdate(date=None):
1430 def shortdate(date=None):
1424 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1431 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1425 return datestr(date, format='%Y-%m-%d')
1432 return datestr(date, format='%Y-%m-%d')
1426
1433
1427 def parsetimezone(tz):
1434 def parsetimezone(tz):
1428 """parse a timezone string and return an offset integer"""
1435 """parse a timezone string and return an offset integer"""
1429 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1436 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1430 sign = (tz[0] == "+") and 1 or -1
1437 sign = (tz[0] == "+") and 1 or -1
1431 hours = int(tz[1:3])
1438 hours = int(tz[1:3])
1432 minutes = int(tz[3:5])
1439 minutes = int(tz[3:5])
1433 return -sign * (hours * 60 + minutes) * 60
1440 return -sign * (hours * 60 + minutes) * 60
1434 if tz == "GMT" or tz == "UTC":
1441 if tz == "GMT" or tz == "UTC":
1435 return 0
1442 return 0
1436 return None
1443 return None
1437
1444
1438 def strdate(string, format, defaults=[]):
1445 def strdate(string, format, defaults=[]):
1439 """parse a localized time string and return a (unixtime, offset) tuple.
1446 """parse a localized time string and return a (unixtime, offset) tuple.
1440 if the string cannot be parsed, ValueError is raised."""
1447 if the string cannot be parsed, ValueError is raised."""
1441 # NOTE: unixtime = localunixtime + offset
1448 # NOTE: unixtime = localunixtime + offset
1442 offset, date = parsetimezone(string.split()[-1]), string
1449 offset, date = parsetimezone(string.split()[-1]), string
1443 if offset is not None:
1450 if offset is not None:
1444 date = " ".join(string.split()[:-1])
1451 date = " ".join(string.split()[:-1])
1445
1452
1446 # add missing elements from defaults
1453 # add missing elements from defaults
1447 usenow = False # default to using biased defaults
1454 usenow = False # default to using biased defaults
1448 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1455 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1449 found = [True for p in part if ("%"+p) in format]
1456 found = [True for p in part if ("%"+p) in format]
1450 if not found:
1457 if not found:
1451 date += "@" + defaults[part][usenow]
1458 date += "@" + defaults[part][usenow]
1452 format += "@%" + part[0]
1459 format += "@%" + part[0]
1453 else:
1460 else:
1454 # We've found a specific time element, less specific time
1461 # We've found a specific time element, less specific time
1455 # elements are relative to today
1462 # elements are relative to today
1456 usenow = True
1463 usenow = True
1457
1464
1458 timetuple = time.strptime(date, format)
1465 timetuple = time.strptime(date, format)
1459 localunixtime = int(calendar.timegm(timetuple))
1466 localunixtime = int(calendar.timegm(timetuple))
1460 if offset is None:
1467 if offset is None:
1461 # local timezone
1468 # local timezone
1462 unixtime = int(time.mktime(timetuple))
1469 unixtime = int(time.mktime(timetuple))
1463 offset = unixtime - localunixtime
1470 offset = unixtime - localunixtime
1464 else:
1471 else:
1465 unixtime = localunixtime + offset
1472 unixtime = localunixtime + offset
1466 return unixtime, offset
1473 return unixtime, offset
1467
1474
1468 def parsedate(date, formats=None, bias=None):
1475 def parsedate(date, formats=None, bias=None):
1469 """parse a localized date/time and return a (unixtime, offset) tuple.
1476 """parse a localized date/time and return a (unixtime, offset) tuple.
1470
1477
1471 The date may be a "unixtime offset" string or in one of the specified
1478 The date may be a "unixtime offset" string or in one of the specified
1472 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1479 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1473
1480
1474 >>> parsedate(' today ') == parsedate(\
1481 >>> parsedate(' today ') == parsedate(\
1475 datetime.date.today().strftime('%b %d'))
1482 datetime.date.today().strftime('%b %d'))
1476 True
1483 True
1477 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1484 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1478 datetime.timedelta(days=1)\
1485 datetime.timedelta(days=1)\
1479 ).strftime('%b %d'))
1486 ).strftime('%b %d'))
1480 True
1487 True
1481 >>> now, tz = makedate()
1488 >>> now, tz = makedate()
1482 >>> strnow, strtz = parsedate('now')
1489 >>> strnow, strtz = parsedate('now')
1483 >>> (strnow - now) < 1
1490 >>> (strnow - now) < 1
1484 True
1491 True
1485 >>> tz == strtz
1492 >>> tz == strtz
1486 True
1493 True
1487 """
1494 """
1488 if bias is None:
1495 if bias is None:
1489 bias = {}
1496 bias = {}
1490 if not date:
1497 if not date:
1491 return 0, 0
1498 return 0, 0
1492 if isinstance(date, tuple) and len(date) == 2:
1499 if isinstance(date, tuple) and len(date) == 2:
1493 return date
1500 return date
1494 if not formats:
1501 if not formats:
1495 formats = defaultdateformats
1502 formats = defaultdateformats
1496 date = date.strip()
1503 date = date.strip()
1497
1504
1498 if date == 'now' or date == _('now'):
1505 if date == 'now' or date == _('now'):
1499 return makedate()
1506 return makedate()
1500 if date == 'today' or date == _('today'):
1507 if date == 'today' or date == _('today'):
1501 date = datetime.date.today().strftime('%b %d')
1508 date = datetime.date.today().strftime('%b %d')
1502 elif date == 'yesterday' or date == _('yesterday'):
1509 elif date == 'yesterday' or date == _('yesterday'):
1503 date = (datetime.date.today() -
1510 date = (datetime.date.today() -
1504 datetime.timedelta(days=1)).strftime('%b %d')
1511 datetime.timedelta(days=1)).strftime('%b %d')
1505
1512
1506 try:
1513 try:
1507 when, offset = map(int, date.split(' '))
1514 when, offset = map(int, date.split(' '))
1508 except ValueError:
1515 except ValueError:
1509 # fill out defaults
1516 # fill out defaults
1510 now = makedate()
1517 now = makedate()
1511 defaults = {}
1518 defaults = {}
1512 for part in ("d", "mb", "yY", "HI", "M", "S"):
1519 for part in ("d", "mb", "yY", "HI", "M", "S"):
1513 # this piece is for rounding the specific end of unknowns
1520 # this piece is for rounding the specific end of unknowns
1514 b = bias.get(part)
1521 b = bias.get(part)
1515 if b is None:
1522 if b is None:
1516 if part[0] in "HMS":
1523 if part[0] in "HMS":
1517 b = "00"
1524 b = "00"
1518 else:
1525 else:
1519 b = "0"
1526 b = "0"
1520
1527
1521 # this piece is for matching the generic end to today's date
1528 # this piece is for matching the generic end to today's date
1522 n = datestr(now, "%" + part[0])
1529 n = datestr(now, "%" + part[0])
1523
1530
1524 defaults[part] = (b, n)
1531 defaults[part] = (b, n)
1525
1532
1526 for format in formats:
1533 for format in formats:
1527 try:
1534 try:
1528 when, offset = strdate(date, format, defaults)
1535 when, offset = strdate(date, format, defaults)
1529 except (ValueError, OverflowError):
1536 except (ValueError, OverflowError):
1530 pass
1537 pass
1531 else:
1538 else:
1532 break
1539 break
1533 else:
1540 else:
1534 raise Abort(_('invalid date: %r') % date)
1541 raise Abort(_('invalid date: %r') % date)
1535 # validate explicit (probably user-specified) date and
1542 # validate explicit (probably user-specified) date and
1536 # time zone offset. values must fit in signed 32 bits for
1543 # time zone offset. values must fit in signed 32 bits for
1537 # current 32-bit linux runtimes. timezones go from UTC-12
1544 # current 32-bit linux runtimes. timezones go from UTC-12
1538 # to UTC+14
1545 # to UTC+14
1539 if abs(when) > 0x7fffffff:
1546 if abs(when) > 0x7fffffff:
1540 raise Abort(_('date exceeds 32 bits: %d') % when)
1547 raise Abort(_('date exceeds 32 bits: %d') % when)
1541 if when < 0:
1548 if when < 0:
1542 raise Abort(_('negative date value: %d') % when)
1549 raise Abort(_('negative date value: %d') % when)
1543 if offset < -50400 or offset > 43200:
1550 if offset < -50400 or offset > 43200:
1544 raise Abort(_('impossible time zone offset: %d') % offset)
1551 raise Abort(_('impossible time zone offset: %d') % offset)
1545 return when, offset
1552 return when, offset
1546
1553
1547 def matchdate(date):
1554 def matchdate(date):
1548 """Return a function that matches a given date match specifier
1555 """Return a function that matches a given date match specifier
1549
1556
1550 Formats include:
1557 Formats include:
1551
1558
1552 '{date}' match a given date to the accuracy provided
1559 '{date}' match a given date to the accuracy provided
1553
1560
1554 '<{date}' on or before a given date
1561 '<{date}' on or before a given date
1555
1562
1556 '>{date}' on or after a given date
1563 '>{date}' on or after a given date
1557
1564
1558 >>> p1 = parsedate("10:29:59")
1565 >>> p1 = parsedate("10:29:59")
1559 >>> p2 = parsedate("10:30:00")
1566 >>> p2 = parsedate("10:30:00")
1560 >>> p3 = parsedate("10:30:59")
1567 >>> p3 = parsedate("10:30:59")
1561 >>> p4 = parsedate("10:31:00")
1568 >>> p4 = parsedate("10:31:00")
1562 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1569 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1563 >>> f = matchdate("10:30")
1570 >>> f = matchdate("10:30")
1564 >>> f(p1[0])
1571 >>> f(p1[0])
1565 False
1572 False
1566 >>> f(p2[0])
1573 >>> f(p2[0])
1567 True
1574 True
1568 >>> f(p3[0])
1575 >>> f(p3[0])
1569 True
1576 True
1570 >>> f(p4[0])
1577 >>> f(p4[0])
1571 False
1578 False
1572 >>> f(p5[0])
1579 >>> f(p5[0])
1573 False
1580 False
1574 """
1581 """
1575
1582
1576 def lower(date):
1583 def lower(date):
1577 d = {'mb': "1", 'd': "1"}
1584 d = {'mb': "1", 'd': "1"}
1578 return parsedate(date, extendeddateformats, d)[0]
1585 return parsedate(date, extendeddateformats, d)[0]
1579
1586
1580 def upper(date):
1587 def upper(date):
1581 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1588 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1582 for days in ("31", "30", "29"):
1589 for days in ("31", "30", "29"):
1583 try:
1590 try:
1584 d["d"] = days
1591 d["d"] = days
1585 return parsedate(date, extendeddateformats, d)[0]
1592 return parsedate(date, extendeddateformats, d)[0]
1586 except Abort:
1593 except Abort:
1587 pass
1594 pass
1588 d["d"] = "28"
1595 d["d"] = "28"
1589 return parsedate(date, extendeddateformats, d)[0]
1596 return parsedate(date, extendeddateformats, d)[0]
1590
1597
1591 date = date.strip()
1598 date = date.strip()
1592
1599
1593 if not date:
1600 if not date:
1594 raise Abort(_("dates cannot consist entirely of whitespace"))
1601 raise Abort(_("dates cannot consist entirely of whitespace"))
1595 elif date[0] == "<":
1602 elif date[0] == "<":
1596 if not date[1:]:
1603 if not date[1:]:
1597 raise Abort(_("invalid day spec, use '<DATE'"))
1604 raise Abort(_("invalid day spec, use '<DATE'"))
1598 when = upper(date[1:])
1605 when = upper(date[1:])
1599 return lambda x: x <= when
1606 return lambda x: x <= when
1600 elif date[0] == ">":
1607 elif date[0] == ">":
1601 if not date[1:]:
1608 if not date[1:]:
1602 raise Abort(_("invalid day spec, use '>DATE'"))
1609 raise Abort(_("invalid day spec, use '>DATE'"))
1603 when = lower(date[1:])
1610 when = lower(date[1:])
1604 return lambda x: x >= when
1611 return lambda x: x >= when
1605 elif date[0] == "-":
1612 elif date[0] == "-":
1606 try:
1613 try:
1607 days = int(date[1:])
1614 days = int(date[1:])
1608 except ValueError:
1615 except ValueError:
1609 raise Abort(_("invalid day spec: %s") % date[1:])
1616 raise Abort(_("invalid day spec: %s") % date[1:])
1610 if days < 0:
1617 if days < 0:
1611 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1618 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1612 % date[1:])
1619 % date[1:])
1613 when = makedate()[0] - days * 3600 * 24
1620 when = makedate()[0] - days * 3600 * 24
1614 return lambda x: x >= when
1621 return lambda x: x >= when
1615 elif " to " in date:
1622 elif " to " in date:
1616 a, b = date.split(" to ")
1623 a, b = date.split(" to ")
1617 start, stop = lower(a), upper(b)
1624 start, stop = lower(a), upper(b)
1618 return lambda x: x >= start and x <= stop
1625 return lambda x: x >= start and x <= stop
1619 else:
1626 else:
1620 start, stop = lower(date), upper(date)
1627 start, stop = lower(date), upper(date)
1621 return lambda x: x >= start and x <= stop
1628 return lambda x: x >= start and x <= stop
1622
1629
1623 def stringmatcher(pattern):
1630 def stringmatcher(pattern):
1624 """
1631 """
1625 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1632 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1626 returns the matcher name, pattern, and matcher function.
1633 returns the matcher name, pattern, and matcher function.
1627 missing or unknown prefixes are treated as literal matches.
1634 missing or unknown prefixes are treated as literal matches.
1628
1635
1629 helper for tests:
1636 helper for tests:
1630 >>> def test(pattern, *tests):
1637 >>> def test(pattern, *tests):
1631 ... kind, pattern, matcher = stringmatcher(pattern)
1638 ... kind, pattern, matcher = stringmatcher(pattern)
1632 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1639 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1633
1640
1634 exact matching (no prefix):
1641 exact matching (no prefix):
1635 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1642 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1636 ('literal', 'abcdefg', [False, False, True])
1643 ('literal', 'abcdefg', [False, False, True])
1637
1644
1638 regex matching ('re:' prefix)
1645 regex matching ('re:' prefix)
1639 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1646 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1640 ('re', 'a.+b', [False, False, True])
1647 ('re', 'a.+b', [False, False, True])
1641
1648
1642 force exact matches ('literal:' prefix)
1649 force exact matches ('literal:' prefix)
1643 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1650 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1644 ('literal', 're:foobar', [False, True])
1651 ('literal', 're:foobar', [False, True])
1645
1652
1646 unknown prefixes are ignored and treated as literals
1653 unknown prefixes are ignored and treated as literals
1647 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1654 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1648 ('literal', 'foo:bar', [False, False, True])
1655 ('literal', 'foo:bar', [False, False, True])
1649 """
1656 """
1650 if pattern.startswith('re:'):
1657 if pattern.startswith('re:'):
1651 pattern = pattern[3:]
1658 pattern = pattern[3:]
1652 try:
1659 try:
1653 regex = remod.compile(pattern)
1660 regex = remod.compile(pattern)
1654 except remod.error as e:
1661 except remod.error as e:
1655 raise error.ParseError(_('invalid regular expression: %s')
1662 raise error.ParseError(_('invalid regular expression: %s')
1656 % e)
1663 % e)
1657 return 're', pattern, regex.search
1664 return 're', pattern, regex.search
1658 elif pattern.startswith('literal:'):
1665 elif pattern.startswith('literal:'):
1659 pattern = pattern[8:]
1666 pattern = pattern[8:]
1660 return 'literal', pattern, pattern.__eq__
1667 return 'literal', pattern, pattern.__eq__
1661
1668
1662 def shortuser(user):
1669 def shortuser(user):
1663 """Return a short representation of a user name or email address."""
1670 """Return a short representation of a user name or email address."""
1664 f = user.find('@')
1671 f = user.find('@')
1665 if f >= 0:
1672 if f >= 0:
1666 user = user[:f]
1673 user = user[:f]
1667 f = user.find('<')
1674 f = user.find('<')
1668 if f >= 0:
1675 if f >= 0:
1669 user = user[f + 1:]
1676 user = user[f + 1:]
1670 f = user.find(' ')
1677 f = user.find(' ')
1671 if f >= 0:
1678 if f >= 0:
1672 user = user[:f]
1679 user = user[:f]
1673 f = user.find('.')
1680 f = user.find('.')
1674 if f >= 0:
1681 if f >= 0:
1675 user = user[:f]
1682 user = user[:f]
1676 return user
1683 return user
1677
1684
1678 def emailuser(user):
1685 def emailuser(user):
1679 """Return the user portion of an email address."""
1686 """Return the user portion of an email address."""
1680 f = user.find('@')
1687 f = user.find('@')
1681 if f >= 0:
1688 if f >= 0:
1682 user = user[:f]
1689 user = user[:f]
1683 f = user.find('<')
1690 f = user.find('<')
1684 if f >= 0:
1691 if f >= 0:
1685 user = user[f + 1:]
1692 user = user[f + 1:]
1686 return user
1693 return user
1687
1694
1688 def email(author):
1695 def email(author):
1689 '''get email of author.'''
1696 '''get email of author.'''
1690 r = author.find('>')
1697 r = author.find('>')
1691 if r == -1:
1698 if r == -1:
1692 r = None
1699 r = None
1693 return author[author.find('<') + 1:r]
1700 return author[author.find('<') + 1:r]
1694
1701
1695 def ellipsis(text, maxlength=400):
1702 def ellipsis(text, maxlength=400):
1696 """Trim string to at most maxlength (default: 400) columns in display."""
1703 """Trim string to at most maxlength (default: 400) columns in display."""
1697 return encoding.trim(text, maxlength, ellipsis='...')
1704 return encoding.trim(text, maxlength, ellipsis='...')
1698
1705
1699 def unitcountfn(*unittable):
1706 def unitcountfn(*unittable):
1700 '''return a function that renders a readable count of some quantity'''
1707 '''return a function that renders a readable count of some quantity'''
1701
1708
1702 def go(count):
1709 def go(count):
1703 for multiplier, divisor, format in unittable:
1710 for multiplier, divisor, format in unittable:
1704 if count >= divisor * multiplier:
1711 if count >= divisor * multiplier:
1705 return format % (count / float(divisor))
1712 return format % (count / float(divisor))
1706 return unittable[-1][2] % count
1713 return unittable[-1][2] % count
1707
1714
1708 return go
1715 return go
1709
1716
1710 bytecount = unitcountfn(
1717 bytecount = unitcountfn(
1711 (100, 1 << 30, _('%.0f GB')),
1718 (100, 1 << 30, _('%.0f GB')),
1712 (10, 1 << 30, _('%.1f GB')),
1719 (10, 1 << 30, _('%.1f GB')),
1713 (1, 1 << 30, _('%.2f GB')),
1720 (1, 1 << 30, _('%.2f GB')),
1714 (100, 1 << 20, _('%.0f MB')),
1721 (100, 1 << 20, _('%.0f MB')),
1715 (10, 1 << 20, _('%.1f MB')),
1722 (10, 1 << 20, _('%.1f MB')),
1716 (1, 1 << 20, _('%.2f MB')),
1723 (1, 1 << 20, _('%.2f MB')),
1717 (100, 1 << 10, _('%.0f KB')),
1724 (100, 1 << 10, _('%.0f KB')),
1718 (10, 1 << 10, _('%.1f KB')),
1725 (10, 1 << 10, _('%.1f KB')),
1719 (1, 1 << 10, _('%.2f KB')),
1726 (1, 1 << 10, _('%.2f KB')),
1720 (1, 1, _('%.0f bytes')),
1727 (1, 1, _('%.0f bytes')),
1721 )
1728 )
1722
1729
1723 def uirepr(s):
1730 def uirepr(s):
1724 # Avoid double backslash in Windows path repr()
1731 # Avoid double backslash in Windows path repr()
1725 return repr(s).replace('\\\\', '\\')
1732 return repr(s).replace('\\\\', '\\')
1726
1733
1727 # delay import of textwrap
1734 # delay import of textwrap
1728 def MBTextWrapper(**kwargs):
1735 def MBTextWrapper(**kwargs):
1729 class tw(textwrap.TextWrapper):
1736 class tw(textwrap.TextWrapper):
1730 """
1737 """
1731 Extend TextWrapper for width-awareness.
1738 Extend TextWrapper for width-awareness.
1732
1739
1733 Neither number of 'bytes' in any encoding nor 'characters' is
1740 Neither number of 'bytes' in any encoding nor 'characters' is
1734 appropriate to calculate terminal columns for specified string.
1741 appropriate to calculate terminal columns for specified string.
1735
1742
1736 Original TextWrapper implementation uses built-in 'len()' directly,
1743 Original TextWrapper implementation uses built-in 'len()' directly,
1737 so overriding is needed to use width information of each characters.
1744 so overriding is needed to use width information of each characters.
1738
1745
1739 In addition, characters classified into 'ambiguous' width are
1746 In addition, characters classified into 'ambiguous' width are
1740 treated as wide in East Asian area, but as narrow in other.
1747 treated as wide in East Asian area, but as narrow in other.
1741
1748
1742 This requires use decision to determine width of such characters.
1749 This requires use decision to determine width of such characters.
1743 """
1750 """
1744 def _cutdown(self, ucstr, space_left):
1751 def _cutdown(self, ucstr, space_left):
1745 l = 0
1752 l = 0
1746 colwidth = encoding.ucolwidth
1753 colwidth = encoding.ucolwidth
1747 for i in xrange(len(ucstr)):
1754 for i in xrange(len(ucstr)):
1748 l += colwidth(ucstr[i])
1755 l += colwidth(ucstr[i])
1749 if space_left < l:
1756 if space_left < l:
1750 return (ucstr[:i], ucstr[i:])
1757 return (ucstr[:i], ucstr[i:])
1751 return ucstr, ''
1758 return ucstr, ''
1752
1759
1753 # overriding of base class
1760 # overriding of base class
1754 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1761 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1755 space_left = max(width - cur_len, 1)
1762 space_left = max(width - cur_len, 1)
1756
1763
1757 if self.break_long_words:
1764 if self.break_long_words:
1758 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1765 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1759 cur_line.append(cut)
1766 cur_line.append(cut)
1760 reversed_chunks[-1] = res
1767 reversed_chunks[-1] = res
1761 elif not cur_line:
1768 elif not cur_line:
1762 cur_line.append(reversed_chunks.pop())
1769 cur_line.append(reversed_chunks.pop())
1763
1770
1764 # this overriding code is imported from TextWrapper of Python 2.6
1771 # this overriding code is imported from TextWrapper of Python 2.6
1765 # to calculate columns of string by 'encoding.ucolwidth()'
1772 # to calculate columns of string by 'encoding.ucolwidth()'
1766 def _wrap_chunks(self, chunks):
1773 def _wrap_chunks(self, chunks):
1767 colwidth = encoding.ucolwidth
1774 colwidth = encoding.ucolwidth
1768
1775
1769 lines = []
1776 lines = []
1770 if self.width <= 0:
1777 if self.width <= 0:
1771 raise ValueError("invalid width %r (must be > 0)" % self.width)
1778 raise ValueError("invalid width %r (must be > 0)" % self.width)
1772
1779
1773 # Arrange in reverse order so items can be efficiently popped
1780 # Arrange in reverse order so items can be efficiently popped
1774 # from a stack of chucks.
1781 # from a stack of chucks.
1775 chunks.reverse()
1782 chunks.reverse()
1776
1783
1777 while chunks:
1784 while chunks:
1778
1785
1779 # Start the list of chunks that will make up the current line.
1786 # Start the list of chunks that will make up the current line.
1780 # cur_len is just the length of all the chunks in cur_line.
1787 # cur_len is just the length of all the chunks in cur_line.
1781 cur_line = []
1788 cur_line = []
1782 cur_len = 0
1789 cur_len = 0
1783
1790
1784 # Figure out which static string will prefix this line.
1791 # Figure out which static string will prefix this line.
1785 if lines:
1792 if lines:
1786 indent = self.subsequent_indent
1793 indent = self.subsequent_indent
1787 else:
1794 else:
1788 indent = self.initial_indent
1795 indent = self.initial_indent
1789
1796
1790 # Maximum width for this line.
1797 # Maximum width for this line.
1791 width = self.width - len(indent)
1798 width = self.width - len(indent)
1792
1799
1793 # First chunk on line is whitespace -- drop it, unless this
1800 # First chunk on line is whitespace -- drop it, unless this
1794 # is the very beginning of the text (i.e. no lines started yet).
1801 # is the very beginning of the text (i.e. no lines started yet).
1795 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1802 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1796 del chunks[-1]
1803 del chunks[-1]
1797
1804
1798 while chunks:
1805 while chunks:
1799 l = colwidth(chunks[-1])
1806 l = colwidth(chunks[-1])
1800
1807
1801 # Can at least squeeze this chunk onto the current line.
1808 # Can at least squeeze this chunk onto the current line.
1802 if cur_len + l <= width:
1809 if cur_len + l <= width:
1803 cur_line.append(chunks.pop())
1810 cur_line.append(chunks.pop())
1804 cur_len += l
1811 cur_len += l
1805
1812
1806 # Nope, this line is full.
1813 # Nope, this line is full.
1807 else:
1814 else:
1808 break
1815 break
1809
1816
1810 # The current line is full, and the next chunk is too big to
1817 # The current line is full, and the next chunk is too big to
1811 # fit on *any* line (not just this one).
1818 # fit on *any* line (not just this one).
1812 if chunks and colwidth(chunks[-1]) > width:
1819 if chunks and colwidth(chunks[-1]) > width:
1813 self._handle_long_word(chunks, cur_line, cur_len, width)
1820 self._handle_long_word(chunks, cur_line, cur_len, width)
1814
1821
1815 # If the last chunk on this line is all whitespace, drop it.
1822 # If the last chunk on this line is all whitespace, drop it.
1816 if (self.drop_whitespace and
1823 if (self.drop_whitespace and
1817 cur_line and cur_line[-1].strip() == ''):
1824 cur_line and cur_line[-1].strip() == ''):
1818 del cur_line[-1]
1825 del cur_line[-1]
1819
1826
1820 # Convert current line back to a string and store it in list
1827 # Convert current line back to a string and store it in list
1821 # of all lines (return value).
1828 # of all lines (return value).
1822 if cur_line:
1829 if cur_line:
1823 lines.append(indent + ''.join(cur_line))
1830 lines.append(indent + ''.join(cur_line))
1824
1831
1825 return lines
1832 return lines
1826
1833
1827 global MBTextWrapper
1834 global MBTextWrapper
1828 MBTextWrapper = tw
1835 MBTextWrapper = tw
1829 return tw(**kwargs)
1836 return tw(**kwargs)
1830
1837
1831 def wrap(line, width, initindent='', hangindent=''):
1838 def wrap(line, width, initindent='', hangindent=''):
1832 maxindent = max(len(hangindent), len(initindent))
1839 maxindent = max(len(hangindent), len(initindent))
1833 if width <= maxindent:
1840 if width <= maxindent:
1834 # adjust for weird terminal size
1841 # adjust for weird terminal size
1835 width = max(78, maxindent + 1)
1842 width = max(78, maxindent + 1)
1836 line = line.decode(encoding.encoding, encoding.encodingmode)
1843 line = line.decode(encoding.encoding, encoding.encodingmode)
1837 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1844 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1838 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1845 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1839 wrapper = MBTextWrapper(width=width,
1846 wrapper = MBTextWrapper(width=width,
1840 initial_indent=initindent,
1847 initial_indent=initindent,
1841 subsequent_indent=hangindent)
1848 subsequent_indent=hangindent)
1842 return wrapper.fill(line).encode(encoding.encoding)
1849 return wrapper.fill(line).encode(encoding.encoding)
1843
1850
1844 def iterlines(iterator):
1851 def iterlines(iterator):
1845 for chunk in iterator:
1852 for chunk in iterator:
1846 for line in chunk.splitlines():
1853 for line in chunk.splitlines():
1847 yield line
1854 yield line
1848
1855
1849 def expandpath(path):
1856 def expandpath(path):
1850 return os.path.expanduser(os.path.expandvars(path))
1857 return os.path.expanduser(os.path.expandvars(path))
1851
1858
1852 def hgcmd():
1859 def hgcmd():
1853 """Return the command used to execute current hg
1860 """Return the command used to execute current hg
1854
1861
1855 This is different from hgexecutable() because on Windows we want
1862 This is different from hgexecutable() because on Windows we want
1856 to avoid things opening new shell windows like batch files, so we
1863 to avoid things opening new shell windows like batch files, so we
1857 get either the python call or current executable.
1864 get either the python call or current executable.
1858 """
1865 """
1859 if mainfrozen():
1866 if mainfrozen():
1860 return [sys.executable]
1867 return [sys.executable]
1861 return gethgcmd()
1868 return gethgcmd()
1862
1869
1863 def rundetached(args, condfn):
1870 def rundetached(args, condfn):
1864 """Execute the argument list in a detached process.
1871 """Execute the argument list in a detached process.
1865
1872
1866 condfn is a callable which is called repeatedly and should return
1873 condfn is a callable which is called repeatedly and should return
1867 True once the child process is known to have started successfully.
1874 True once the child process is known to have started successfully.
1868 At this point, the child process PID is returned. If the child
1875 At this point, the child process PID is returned. If the child
1869 process fails to start or finishes before condfn() evaluates to
1876 process fails to start or finishes before condfn() evaluates to
1870 True, return -1.
1877 True, return -1.
1871 """
1878 """
1872 # Windows case is easier because the child process is either
1879 # Windows case is easier because the child process is either
1873 # successfully starting and validating the condition or exiting
1880 # successfully starting and validating the condition or exiting
1874 # on failure. We just poll on its PID. On Unix, if the child
1881 # on failure. We just poll on its PID. On Unix, if the child
1875 # process fails to start, it will be left in a zombie state until
1882 # process fails to start, it will be left in a zombie state until
1876 # the parent wait on it, which we cannot do since we expect a long
1883 # the parent wait on it, which we cannot do since we expect a long
1877 # running process on success. Instead we listen for SIGCHLD telling
1884 # running process on success. Instead we listen for SIGCHLD telling
1878 # us our child process terminated.
1885 # us our child process terminated.
1879 terminated = set()
1886 terminated = set()
1880 def handler(signum, frame):
1887 def handler(signum, frame):
1881 terminated.add(os.wait())
1888 terminated.add(os.wait())
1882 prevhandler = None
1889 prevhandler = None
1883 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1890 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1884 if SIGCHLD is not None:
1891 if SIGCHLD is not None:
1885 prevhandler = signal.signal(SIGCHLD, handler)
1892 prevhandler = signal.signal(SIGCHLD, handler)
1886 try:
1893 try:
1887 pid = spawndetached(args)
1894 pid = spawndetached(args)
1888 while not condfn():
1895 while not condfn():
1889 if ((pid in terminated or not testpid(pid))
1896 if ((pid in terminated or not testpid(pid))
1890 and not condfn()):
1897 and not condfn()):
1891 return -1
1898 return -1
1892 time.sleep(0.1)
1899 time.sleep(0.1)
1893 return pid
1900 return pid
1894 finally:
1901 finally:
1895 if prevhandler is not None:
1902 if prevhandler is not None:
1896 signal.signal(signal.SIGCHLD, prevhandler)
1903 signal.signal(signal.SIGCHLD, prevhandler)
1897
1904
1898 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1905 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1899 """Return the result of interpolating items in the mapping into string s.
1906 """Return the result of interpolating items in the mapping into string s.
1900
1907
1901 prefix is a single character string, or a two character string with
1908 prefix is a single character string, or a two character string with
1902 a backslash as the first character if the prefix needs to be escaped in
1909 a backslash as the first character if the prefix needs to be escaped in
1903 a regular expression.
1910 a regular expression.
1904
1911
1905 fn is an optional function that will be applied to the replacement text
1912 fn is an optional function that will be applied to the replacement text
1906 just before replacement.
1913 just before replacement.
1907
1914
1908 escape_prefix is an optional flag that allows using doubled prefix for
1915 escape_prefix is an optional flag that allows using doubled prefix for
1909 its escaping.
1916 its escaping.
1910 """
1917 """
1911 fn = fn or (lambda s: s)
1918 fn = fn or (lambda s: s)
1912 patterns = '|'.join(mapping.keys())
1919 patterns = '|'.join(mapping.keys())
1913 if escape_prefix:
1920 if escape_prefix:
1914 patterns += '|' + prefix
1921 patterns += '|' + prefix
1915 if len(prefix) > 1:
1922 if len(prefix) > 1:
1916 prefix_char = prefix[1:]
1923 prefix_char = prefix[1:]
1917 else:
1924 else:
1918 prefix_char = prefix
1925 prefix_char = prefix
1919 mapping[prefix_char] = prefix_char
1926 mapping[prefix_char] = prefix_char
1920 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1927 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1921 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1928 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1922
1929
1923 def getport(port):
1930 def getport(port):
1924 """Return the port for a given network service.
1931 """Return the port for a given network service.
1925
1932
1926 If port is an integer, it's returned as is. If it's a string, it's
1933 If port is an integer, it's returned as is. If it's a string, it's
1927 looked up using socket.getservbyname(). If there's no matching
1934 looked up using socket.getservbyname(). If there's no matching
1928 service, error.Abort is raised.
1935 service, error.Abort is raised.
1929 """
1936 """
1930 try:
1937 try:
1931 return int(port)
1938 return int(port)
1932 except ValueError:
1939 except ValueError:
1933 pass
1940 pass
1934
1941
1935 try:
1942 try:
1936 return socket.getservbyname(port)
1943 return socket.getservbyname(port)
1937 except socket.error:
1944 except socket.error:
1938 raise Abort(_("no port number associated with service '%s'") % port)
1945 raise Abort(_("no port number associated with service '%s'") % port)
1939
1946
1940 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1947 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1941 '0': False, 'no': False, 'false': False, 'off': False,
1948 '0': False, 'no': False, 'false': False, 'off': False,
1942 'never': False}
1949 'never': False}
1943
1950
1944 def parsebool(s):
1951 def parsebool(s):
1945 """Parse s into a boolean.
1952 """Parse s into a boolean.
1946
1953
1947 If s is not a valid boolean, returns None.
1954 If s is not a valid boolean, returns None.
1948 """
1955 """
1949 return _booleans.get(s.lower(), None)
1956 return _booleans.get(s.lower(), None)
1950
1957
1951 _hexdig = '0123456789ABCDEFabcdef'
1958 _hexdig = '0123456789ABCDEFabcdef'
1952 _hextochr = dict((a + b, chr(int(a + b, 16)))
1959 _hextochr = dict((a + b, chr(int(a + b, 16)))
1953 for a in _hexdig for b in _hexdig)
1960 for a in _hexdig for b in _hexdig)
1954
1961
1955 def _urlunquote(s):
1962 def _urlunquote(s):
1956 """Decode HTTP/HTML % encoding.
1963 """Decode HTTP/HTML % encoding.
1957
1964
1958 >>> _urlunquote('abc%20def')
1965 >>> _urlunquote('abc%20def')
1959 'abc def'
1966 'abc def'
1960 """
1967 """
1961 res = s.split('%')
1968 res = s.split('%')
1962 # fastpath
1969 # fastpath
1963 if len(res) == 1:
1970 if len(res) == 1:
1964 return s
1971 return s
1965 s = res[0]
1972 s = res[0]
1966 for item in res[1:]:
1973 for item in res[1:]:
1967 try:
1974 try:
1968 s += _hextochr[item[:2]] + item[2:]
1975 s += _hextochr[item[:2]] + item[2:]
1969 except KeyError:
1976 except KeyError:
1970 s += '%' + item
1977 s += '%' + item
1971 except UnicodeDecodeError:
1978 except UnicodeDecodeError:
1972 s += unichr(int(item[:2], 16)) + item[2:]
1979 s += unichr(int(item[:2], 16)) + item[2:]
1973 return s
1980 return s
1974
1981
1975 class url(object):
1982 class url(object):
1976 r"""Reliable URL parser.
1983 r"""Reliable URL parser.
1977
1984
1978 This parses URLs and provides attributes for the following
1985 This parses URLs and provides attributes for the following
1979 components:
1986 components:
1980
1987
1981 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1988 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1982
1989
1983 Missing components are set to None. The only exception is
1990 Missing components are set to None. The only exception is
1984 fragment, which is set to '' if present but empty.
1991 fragment, which is set to '' if present but empty.
1985
1992
1986 If parsefragment is False, fragment is included in query. If
1993 If parsefragment is False, fragment is included in query. If
1987 parsequery is False, query is included in path. If both are
1994 parsequery is False, query is included in path. If both are
1988 False, both fragment and query are included in path.
1995 False, both fragment and query are included in path.
1989
1996
1990 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1997 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1991
1998
1992 Note that for backward compatibility reasons, bundle URLs do not
1999 Note that for backward compatibility reasons, bundle URLs do not
1993 take host names. That means 'bundle://../' has a path of '../'.
2000 take host names. That means 'bundle://../' has a path of '../'.
1994
2001
1995 Examples:
2002 Examples:
1996
2003
1997 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2004 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1998 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2005 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1999 >>> url('ssh://[::1]:2200//home/joe/repo')
2006 >>> url('ssh://[::1]:2200//home/joe/repo')
2000 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2007 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2001 >>> url('file:///home/joe/repo')
2008 >>> url('file:///home/joe/repo')
2002 <url scheme: 'file', path: '/home/joe/repo'>
2009 <url scheme: 'file', path: '/home/joe/repo'>
2003 >>> url('file:///c:/temp/foo/')
2010 >>> url('file:///c:/temp/foo/')
2004 <url scheme: 'file', path: 'c:/temp/foo/'>
2011 <url scheme: 'file', path: 'c:/temp/foo/'>
2005 >>> url('bundle:foo')
2012 >>> url('bundle:foo')
2006 <url scheme: 'bundle', path: 'foo'>
2013 <url scheme: 'bundle', path: 'foo'>
2007 >>> url('bundle://../foo')
2014 >>> url('bundle://../foo')
2008 <url scheme: 'bundle', path: '../foo'>
2015 <url scheme: 'bundle', path: '../foo'>
2009 >>> url(r'c:\foo\bar')
2016 >>> url(r'c:\foo\bar')
2010 <url path: 'c:\\foo\\bar'>
2017 <url path: 'c:\\foo\\bar'>
2011 >>> url(r'\\blah\blah\blah')
2018 >>> url(r'\\blah\blah\blah')
2012 <url path: '\\\\blah\\blah\\blah'>
2019 <url path: '\\\\blah\\blah\\blah'>
2013 >>> url(r'\\blah\blah\blah#baz')
2020 >>> url(r'\\blah\blah\blah#baz')
2014 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2021 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2015 >>> url(r'file:///C:\users\me')
2022 >>> url(r'file:///C:\users\me')
2016 <url scheme: 'file', path: 'C:\\users\\me'>
2023 <url scheme: 'file', path: 'C:\\users\\me'>
2017
2024
2018 Authentication credentials:
2025 Authentication credentials:
2019
2026
2020 >>> url('ssh://joe:xyz@x/repo')
2027 >>> url('ssh://joe:xyz@x/repo')
2021 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2028 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2022 >>> url('ssh://joe@x/repo')
2029 >>> url('ssh://joe@x/repo')
2023 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2030 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2024
2031
2025 Query strings and fragments:
2032 Query strings and fragments:
2026
2033
2027 >>> url('http://host/a?b#c')
2034 >>> url('http://host/a?b#c')
2028 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2035 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2029 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2036 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2030 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2037 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2031 """
2038 """
2032
2039
2033 _safechars = "!~*'()+"
2040 _safechars = "!~*'()+"
2034 _safepchars = "/!~*'()+:\\"
2041 _safepchars = "/!~*'()+:\\"
2035 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2042 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2036
2043
2037 def __init__(self, path, parsequery=True, parsefragment=True):
2044 def __init__(self, path, parsequery=True, parsefragment=True):
2038 # We slowly chomp away at path until we have only the path left
2045 # We slowly chomp away at path until we have only the path left
2039 self.scheme = self.user = self.passwd = self.host = None
2046 self.scheme = self.user = self.passwd = self.host = None
2040 self.port = self.path = self.query = self.fragment = None
2047 self.port = self.path = self.query = self.fragment = None
2041 self._localpath = True
2048 self._localpath = True
2042 self._hostport = ''
2049 self._hostport = ''
2043 self._origpath = path
2050 self._origpath = path
2044
2051
2045 if parsefragment and '#' in path:
2052 if parsefragment and '#' in path:
2046 path, self.fragment = path.split('#', 1)
2053 path, self.fragment = path.split('#', 1)
2047 if not path:
2054 if not path:
2048 path = None
2055 path = None
2049
2056
2050 # special case for Windows drive letters and UNC paths
2057 # special case for Windows drive letters and UNC paths
2051 if hasdriveletter(path) or path.startswith(r'\\'):
2058 if hasdriveletter(path) or path.startswith(r'\\'):
2052 self.path = path
2059 self.path = path
2053 return
2060 return
2054
2061
2055 # For compatibility reasons, we can't handle bundle paths as
2062 # For compatibility reasons, we can't handle bundle paths as
2056 # normal URLS
2063 # normal URLS
2057 if path.startswith('bundle:'):
2064 if path.startswith('bundle:'):
2058 self.scheme = 'bundle'
2065 self.scheme = 'bundle'
2059 path = path[7:]
2066 path = path[7:]
2060 if path.startswith('//'):
2067 if path.startswith('//'):
2061 path = path[2:]
2068 path = path[2:]
2062 self.path = path
2069 self.path = path
2063 return
2070 return
2064
2071
2065 if self._matchscheme(path):
2072 if self._matchscheme(path):
2066 parts = path.split(':', 1)
2073 parts = path.split(':', 1)
2067 if parts[0]:
2074 if parts[0]:
2068 self.scheme, path = parts
2075 self.scheme, path = parts
2069 self._localpath = False
2076 self._localpath = False
2070
2077
2071 if not path:
2078 if not path:
2072 path = None
2079 path = None
2073 if self._localpath:
2080 if self._localpath:
2074 self.path = ''
2081 self.path = ''
2075 return
2082 return
2076 else:
2083 else:
2077 if self._localpath:
2084 if self._localpath:
2078 self.path = path
2085 self.path = path
2079 return
2086 return
2080
2087
2081 if parsequery and '?' in path:
2088 if parsequery and '?' in path:
2082 path, self.query = path.split('?', 1)
2089 path, self.query = path.split('?', 1)
2083 if not path:
2090 if not path:
2084 path = None
2091 path = None
2085 if not self.query:
2092 if not self.query:
2086 self.query = None
2093 self.query = None
2087
2094
2088 # // is required to specify a host/authority
2095 # // is required to specify a host/authority
2089 if path and path.startswith('//'):
2096 if path and path.startswith('//'):
2090 parts = path[2:].split('/', 1)
2097 parts = path[2:].split('/', 1)
2091 if len(parts) > 1:
2098 if len(parts) > 1:
2092 self.host, path = parts
2099 self.host, path = parts
2093 else:
2100 else:
2094 self.host = parts[0]
2101 self.host = parts[0]
2095 path = None
2102 path = None
2096 if not self.host:
2103 if not self.host:
2097 self.host = None
2104 self.host = None
2098 # path of file:///d is /d
2105 # path of file:///d is /d
2099 # path of file:///d:/ is d:/, not /d:/
2106 # path of file:///d:/ is d:/, not /d:/
2100 if path and not hasdriveletter(path):
2107 if path and not hasdriveletter(path):
2101 path = '/' + path
2108 path = '/' + path
2102
2109
2103 if self.host and '@' in self.host:
2110 if self.host and '@' in self.host:
2104 self.user, self.host = self.host.rsplit('@', 1)
2111 self.user, self.host = self.host.rsplit('@', 1)
2105 if ':' in self.user:
2112 if ':' in self.user:
2106 self.user, self.passwd = self.user.split(':', 1)
2113 self.user, self.passwd = self.user.split(':', 1)
2107 if not self.host:
2114 if not self.host:
2108 self.host = None
2115 self.host = None
2109
2116
2110 # Don't split on colons in IPv6 addresses without ports
2117 # Don't split on colons in IPv6 addresses without ports
2111 if (self.host and ':' in self.host and
2118 if (self.host and ':' in self.host and
2112 not (self.host.startswith('[') and self.host.endswith(']'))):
2119 not (self.host.startswith('[') and self.host.endswith(']'))):
2113 self._hostport = self.host
2120 self._hostport = self.host
2114 self.host, self.port = self.host.rsplit(':', 1)
2121 self.host, self.port = self.host.rsplit(':', 1)
2115 if not self.host:
2122 if not self.host:
2116 self.host = None
2123 self.host = None
2117
2124
2118 if (self.host and self.scheme == 'file' and
2125 if (self.host and self.scheme == 'file' and
2119 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2126 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2120 raise Abort(_('file:// URLs can only refer to localhost'))
2127 raise Abort(_('file:// URLs can only refer to localhost'))
2121
2128
2122 self.path = path
2129 self.path = path
2123
2130
2124 # leave the query string escaped
2131 # leave the query string escaped
2125 for a in ('user', 'passwd', 'host', 'port',
2132 for a in ('user', 'passwd', 'host', 'port',
2126 'path', 'fragment'):
2133 'path', 'fragment'):
2127 v = getattr(self, a)
2134 v = getattr(self, a)
2128 if v is not None:
2135 if v is not None:
2129 setattr(self, a, _urlunquote(v))
2136 setattr(self, a, _urlunquote(v))
2130
2137
2131 def __repr__(self):
2138 def __repr__(self):
2132 attrs = []
2139 attrs = []
2133 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2140 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2134 'query', 'fragment'):
2141 'query', 'fragment'):
2135 v = getattr(self, a)
2142 v = getattr(self, a)
2136 if v is not None:
2143 if v is not None:
2137 attrs.append('%s: %r' % (a, v))
2144 attrs.append('%s: %r' % (a, v))
2138 return '<url %s>' % ', '.join(attrs)
2145 return '<url %s>' % ', '.join(attrs)
2139
2146
2140 def __str__(self):
2147 def __str__(self):
2141 r"""Join the URL's components back into a URL string.
2148 r"""Join the URL's components back into a URL string.
2142
2149
2143 Examples:
2150 Examples:
2144
2151
2145 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2152 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2146 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2153 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2147 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2154 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2148 'http://user:pw@host:80/?foo=bar&baz=42'
2155 'http://user:pw@host:80/?foo=bar&baz=42'
2149 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2156 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2150 'http://user:pw@host:80/?foo=bar%3dbaz'
2157 'http://user:pw@host:80/?foo=bar%3dbaz'
2151 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2158 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2152 'ssh://user:pw@[::1]:2200//home/joe#'
2159 'ssh://user:pw@[::1]:2200//home/joe#'
2153 >>> str(url('http://localhost:80//'))
2160 >>> str(url('http://localhost:80//'))
2154 'http://localhost:80//'
2161 'http://localhost:80//'
2155 >>> str(url('http://localhost:80/'))
2162 >>> str(url('http://localhost:80/'))
2156 'http://localhost:80/'
2163 'http://localhost:80/'
2157 >>> str(url('http://localhost:80'))
2164 >>> str(url('http://localhost:80'))
2158 'http://localhost:80/'
2165 'http://localhost:80/'
2159 >>> str(url('bundle:foo'))
2166 >>> str(url('bundle:foo'))
2160 'bundle:foo'
2167 'bundle:foo'
2161 >>> str(url('bundle://../foo'))
2168 >>> str(url('bundle://../foo'))
2162 'bundle:../foo'
2169 'bundle:../foo'
2163 >>> str(url('path'))
2170 >>> str(url('path'))
2164 'path'
2171 'path'
2165 >>> str(url('file:///tmp/foo/bar'))
2172 >>> str(url('file:///tmp/foo/bar'))
2166 'file:///tmp/foo/bar'
2173 'file:///tmp/foo/bar'
2167 >>> str(url('file:///c:/tmp/foo/bar'))
2174 >>> str(url('file:///c:/tmp/foo/bar'))
2168 'file:///c:/tmp/foo/bar'
2175 'file:///c:/tmp/foo/bar'
2169 >>> print url(r'bundle:foo\bar')
2176 >>> print url(r'bundle:foo\bar')
2170 bundle:foo\bar
2177 bundle:foo\bar
2171 >>> print url(r'file:///D:\data\hg')
2178 >>> print url(r'file:///D:\data\hg')
2172 file:///D:\data\hg
2179 file:///D:\data\hg
2173 """
2180 """
2174 if self._localpath:
2181 if self._localpath:
2175 s = self.path
2182 s = self.path
2176 if self.scheme == 'bundle':
2183 if self.scheme == 'bundle':
2177 s = 'bundle:' + s
2184 s = 'bundle:' + s
2178 if self.fragment:
2185 if self.fragment:
2179 s += '#' + self.fragment
2186 s += '#' + self.fragment
2180 return s
2187 return s
2181
2188
2182 s = self.scheme + ':'
2189 s = self.scheme + ':'
2183 if self.user or self.passwd or self.host:
2190 if self.user or self.passwd or self.host:
2184 s += '//'
2191 s += '//'
2185 elif self.scheme and (not self.path or self.path.startswith('/')
2192 elif self.scheme and (not self.path or self.path.startswith('/')
2186 or hasdriveletter(self.path)):
2193 or hasdriveletter(self.path)):
2187 s += '//'
2194 s += '//'
2188 if hasdriveletter(self.path):
2195 if hasdriveletter(self.path):
2189 s += '/'
2196 s += '/'
2190 if self.user:
2197 if self.user:
2191 s += urllib.quote(self.user, safe=self._safechars)
2198 s += urllib.quote(self.user, safe=self._safechars)
2192 if self.passwd:
2199 if self.passwd:
2193 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2200 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2194 if self.user or self.passwd:
2201 if self.user or self.passwd:
2195 s += '@'
2202 s += '@'
2196 if self.host:
2203 if self.host:
2197 if not (self.host.startswith('[') and self.host.endswith(']')):
2204 if not (self.host.startswith('[') and self.host.endswith(']')):
2198 s += urllib.quote(self.host)
2205 s += urllib.quote(self.host)
2199 else:
2206 else:
2200 s += self.host
2207 s += self.host
2201 if self.port:
2208 if self.port:
2202 s += ':' + urllib.quote(self.port)
2209 s += ':' + urllib.quote(self.port)
2203 if self.host:
2210 if self.host:
2204 s += '/'
2211 s += '/'
2205 if self.path:
2212 if self.path:
2206 # TODO: similar to the query string, we should not unescape the
2213 # TODO: similar to the query string, we should not unescape the
2207 # path when we store it, the path might contain '%2f' = '/',
2214 # path when we store it, the path might contain '%2f' = '/',
2208 # which we should *not* escape.
2215 # which we should *not* escape.
2209 s += urllib.quote(self.path, safe=self._safepchars)
2216 s += urllib.quote(self.path, safe=self._safepchars)
2210 if self.query:
2217 if self.query:
2211 # we store the query in escaped form.
2218 # we store the query in escaped form.
2212 s += '?' + self.query
2219 s += '?' + self.query
2213 if self.fragment is not None:
2220 if self.fragment is not None:
2214 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2221 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2215 return s
2222 return s
2216
2223
2217 def authinfo(self):
2224 def authinfo(self):
2218 user, passwd = self.user, self.passwd
2225 user, passwd = self.user, self.passwd
2219 try:
2226 try:
2220 self.user, self.passwd = None, None
2227 self.user, self.passwd = None, None
2221 s = str(self)
2228 s = str(self)
2222 finally:
2229 finally:
2223 self.user, self.passwd = user, passwd
2230 self.user, self.passwd = user, passwd
2224 if not self.user:
2231 if not self.user:
2225 return (s, None)
2232 return (s, None)
2226 # authinfo[1] is passed to urllib2 password manager, and its
2233 # authinfo[1] is passed to urllib2 password manager, and its
2227 # URIs must not contain credentials. The host is passed in the
2234 # URIs must not contain credentials. The host is passed in the
2228 # URIs list because Python < 2.4.3 uses only that to search for
2235 # URIs list because Python < 2.4.3 uses only that to search for
2229 # a password.
2236 # a password.
2230 return (s, (None, (s, self.host),
2237 return (s, (None, (s, self.host),
2231 self.user, self.passwd or ''))
2238 self.user, self.passwd or ''))
2232
2239
2233 def isabs(self):
2240 def isabs(self):
2234 if self.scheme and self.scheme != 'file':
2241 if self.scheme and self.scheme != 'file':
2235 return True # remote URL
2242 return True # remote URL
2236 if hasdriveletter(self.path):
2243 if hasdriveletter(self.path):
2237 return True # absolute for our purposes - can't be joined()
2244 return True # absolute for our purposes - can't be joined()
2238 if self.path.startswith(r'\\'):
2245 if self.path.startswith(r'\\'):
2239 return True # Windows UNC path
2246 return True # Windows UNC path
2240 if self.path.startswith('/'):
2247 if self.path.startswith('/'):
2241 return True # POSIX-style
2248 return True # POSIX-style
2242 return False
2249 return False
2243
2250
2244 def localpath(self):
2251 def localpath(self):
2245 if self.scheme == 'file' or self.scheme == 'bundle':
2252 if self.scheme == 'file' or self.scheme == 'bundle':
2246 path = self.path or '/'
2253 path = self.path or '/'
2247 # For Windows, we need to promote hosts containing drive
2254 # For Windows, we need to promote hosts containing drive
2248 # letters to paths with drive letters.
2255 # letters to paths with drive letters.
2249 if hasdriveletter(self._hostport):
2256 if hasdriveletter(self._hostport):
2250 path = self._hostport + '/' + self.path
2257 path = self._hostport + '/' + self.path
2251 elif (self.host is not None and self.path
2258 elif (self.host is not None and self.path
2252 and not hasdriveletter(path)):
2259 and not hasdriveletter(path)):
2253 path = '/' + path
2260 path = '/' + path
2254 return path
2261 return path
2255 return self._origpath
2262 return self._origpath
2256
2263
2257 def islocal(self):
2264 def islocal(self):
2258 '''whether localpath will return something that posixfile can open'''
2265 '''whether localpath will return something that posixfile can open'''
2259 return (not self.scheme or self.scheme == 'file'
2266 return (not self.scheme or self.scheme == 'file'
2260 or self.scheme == 'bundle')
2267 or self.scheme == 'bundle')
2261
2268
2262 def hasscheme(path):
2269 def hasscheme(path):
2263 return bool(url(path).scheme)
2270 return bool(url(path).scheme)
2264
2271
2265 def hasdriveletter(path):
2272 def hasdriveletter(path):
2266 return path and path[1:2] == ':' and path[0:1].isalpha()
2273 return path and path[1:2] == ':' and path[0:1].isalpha()
2267
2274
2268 def urllocalpath(path):
2275 def urllocalpath(path):
2269 return url(path, parsequery=False, parsefragment=False).localpath()
2276 return url(path, parsequery=False, parsefragment=False).localpath()
2270
2277
2271 def hidepassword(u):
2278 def hidepassword(u):
2272 '''hide user credential in a url string'''
2279 '''hide user credential in a url string'''
2273 u = url(u)
2280 u = url(u)
2274 if u.passwd:
2281 if u.passwd:
2275 u.passwd = '***'
2282 u.passwd = '***'
2276 return str(u)
2283 return str(u)
2277
2284
2278 def removeauth(u):
2285 def removeauth(u):
2279 '''remove all authentication information from a url string'''
2286 '''remove all authentication information from a url string'''
2280 u = url(u)
2287 u = url(u)
2281 u.user = u.passwd = None
2288 u.user = u.passwd = None
2282 return str(u)
2289 return str(u)
2283
2290
2284 def isatty(fd):
2291 def isatty(fd):
2285 try:
2292 try:
2286 return fd.isatty()
2293 return fd.isatty()
2287 except AttributeError:
2294 except AttributeError:
2288 return False
2295 return False
2289
2296
2290 timecount = unitcountfn(
2297 timecount = unitcountfn(
2291 (1, 1e3, _('%.0f s')),
2298 (1, 1e3, _('%.0f s')),
2292 (100, 1, _('%.1f s')),
2299 (100, 1, _('%.1f s')),
2293 (10, 1, _('%.2f s')),
2300 (10, 1, _('%.2f s')),
2294 (1, 1, _('%.3f s')),
2301 (1, 1, _('%.3f s')),
2295 (100, 0.001, _('%.1f ms')),
2302 (100, 0.001, _('%.1f ms')),
2296 (10, 0.001, _('%.2f ms')),
2303 (10, 0.001, _('%.2f ms')),
2297 (1, 0.001, _('%.3f ms')),
2304 (1, 0.001, _('%.3f ms')),
2298 (100, 0.000001, _('%.1f us')),
2305 (100, 0.000001, _('%.1f us')),
2299 (10, 0.000001, _('%.2f us')),
2306 (10, 0.000001, _('%.2f us')),
2300 (1, 0.000001, _('%.3f us')),
2307 (1, 0.000001, _('%.3f us')),
2301 (100, 0.000000001, _('%.1f ns')),
2308 (100, 0.000000001, _('%.1f ns')),
2302 (10, 0.000000001, _('%.2f ns')),
2309 (10, 0.000000001, _('%.2f ns')),
2303 (1, 0.000000001, _('%.3f ns')),
2310 (1, 0.000000001, _('%.3f ns')),
2304 )
2311 )
2305
2312
2306 _timenesting = [0]
2313 _timenesting = [0]
2307
2314
2308 def timed(func):
2315 def timed(func):
2309 '''Report the execution time of a function call to stderr.
2316 '''Report the execution time of a function call to stderr.
2310
2317
2311 During development, use as a decorator when you need to measure
2318 During development, use as a decorator when you need to measure
2312 the cost of a function, e.g. as follows:
2319 the cost of a function, e.g. as follows:
2313
2320
2314 @util.timed
2321 @util.timed
2315 def foo(a, b, c):
2322 def foo(a, b, c):
2316 pass
2323 pass
2317 '''
2324 '''
2318
2325
2319 def wrapper(*args, **kwargs):
2326 def wrapper(*args, **kwargs):
2320 start = time.time()
2327 start = time.time()
2321 indent = 2
2328 indent = 2
2322 _timenesting[0] += indent
2329 _timenesting[0] += indent
2323 try:
2330 try:
2324 return func(*args, **kwargs)
2331 return func(*args, **kwargs)
2325 finally:
2332 finally:
2326 elapsed = time.time() - start
2333 elapsed = time.time() - start
2327 _timenesting[0] -= indent
2334 _timenesting[0] -= indent
2328 sys.stderr.write('%s%s: %s\n' %
2335 sys.stderr.write('%s%s: %s\n' %
2329 (' ' * _timenesting[0], func.__name__,
2336 (' ' * _timenesting[0], func.__name__,
2330 timecount(elapsed)))
2337 timecount(elapsed)))
2331 return wrapper
2338 return wrapper
2332
2339
2333 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2340 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2334 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2341 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2335
2342
2336 def sizetoint(s):
2343 def sizetoint(s):
2337 '''Convert a space specifier to a byte count.
2344 '''Convert a space specifier to a byte count.
2338
2345
2339 >>> sizetoint('30')
2346 >>> sizetoint('30')
2340 30
2347 30
2341 >>> sizetoint('2.2kb')
2348 >>> sizetoint('2.2kb')
2342 2252
2349 2252
2343 >>> sizetoint('6M')
2350 >>> sizetoint('6M')
2344 6291456
2351 6291456
2345 '''
2352 '''
2346 t = s.strip().lower()
2353 t = s.strip().lower()
2347 try:
2354 try:
2348 for k, u in _sizeunits:
2355 for k, u in _sizeunits:
2349 if t.endswith(k):
2356 if t.endswith(k):
2350 return int(float(t[:-len(k)]) * u)
2357 return int(float(t[:-len(k)]) * u)
2351 return int(t)
2358 return int(t)
2352 except ValueError:
2359 except ValueError:
2353 raise error.ParseError(_("couldn't parse size: %s") % s)
2360 raise error.ParseError(_("couldn't parse size: %s") % s)
2354
2361
2355 class hooks(object):
2362 class hooks(object):
2356 '''A collection of hook functions that can be used to extend a
2363 '''A collection of hook functions that can be used to extend a
2357 function's behavior. Hooks are called in lexicographic order,
2364 function's behavior. Hooks are called in lexicographic order,
2358 based on the names of their sources.'''
2365 based on the names of their sources.'''
2359
2366
2360 def __init__(self):
2367 def __init__(self):
2361 self._hooks = []
2368 self._hooks = []
2362
2369
2363 def add(self, source, hook):
2370 def add(self, source, hook):
2364 self._hooks.append((source, hook))
2371 self._hooks.append((source, hook))
2365
2372
2366 def __call__(self, *args):
2373 def __call__(self, *args):
2367 self._hooks.sort(key=lambda x: x[0])
2374 self._hooks.sort(key=lambda x: x[0])
2368 results = []
2375 results = []
2369 for source, hook in self._hooks:
2376 for source, hook in self._hooks:
2370 results.append(hook(*args))
2377 results.append(hook(*args))
2371 return results
2378 return results
2372
2379
2373 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2380 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2374 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2381 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2375 Skips the 'skip' last entries. By default it will flush stdout first.
2382 Skips the 'skip' last entries. By default it will flush stdout first.
2376 It can be used everywhere and do intentionally not require an ui object.
2383 It can be used everywhere and do intentionally not require an ui object.
2377 Not be used in production code but very convenient while developing.
2384 Not be used in production code but very convenient while developing.
2378 '''
2385 '''
2379 if otherf:
2386 if otherf:
2380 otherf.flush()
2387 otherf.flush()
2381 f.write('%s at:\n' % msg)
2388 f.write('%s at:\n' % msg)
2382 entries = [('%s:%s' % (fn, ln), func)
2389 entries = [('%s:%s' % (fn, ln), func)
2383 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2390 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2384 if entries:
2391 if entries:
2385 fnmax = max(len(entry[0]) for entry in entries)
2392 fnmax = max(len(entry[0]) for entry in entries)
2386 for fnln, func in entries:
2393 for fnln, func in entries:
2387 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2394 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2388 f.flush()
2395 f.flush()
2389
2396
2390 class dirs(object):
2397 class dirs(object):
2391 '''a multiset of directory names from a dirstate or manifest'''
2398 '''a multiset of directory names from a dirstate or manifest'''
2392
2399
2393 def __init__(self, map, skip=None):
2400 def __init__(self, map, skip=None):
2394 self._dirs = {}
2401 self._dirs = {}
2395 addpath = self.addpath
2402 addpath = self.addpath
2396 if safehasattr(map, 'iteritems') and skip is not None:
2403 if safehasattr(map, 'iteritems') and skip is not None:
2397 for f, s in map.iteritems():
2404 for f, s in map.iteritems():
2398 if s[0] != skip:
2405 if s[0] != skip:
2399 addpath(f)
2406 addpath(f)
2400 else:
2407 else:
2401 for f in map:
2408 for f in map:
2402 addpath(f)
2409 addpath(f)
2403
2410
2404 def addpath(self, path):
2411 def addpath(self, path):
2405 dirs = self._dirs
2412 dirs = self._dirs
2406 for base in finddirs(path):
2413 for base in finddirs(path):
2407 if base in dirs:
2414 if base in dirs:
2408 dirs[base] += 1
2415 dirs[base] += 1
2409 return
2416 return
2410 dirs[base] = 1
2417 dirs[base] = 1
2411
2418
2412 def delpath(self, path):
2419 def delpath(self, path):
2413 dirs = self._dirs
2420 dirs = self._dirs
2414 for base in finddirs(path):
2421 for base in finddirs(path):
2415 if dirs[base] > 1:
2422 if dirs[base] > 1:
2416 dirs[base] -= 1
2423 dirs[base] -= 1
2417 return
2424 return
2418 del dirs[base]
2425 del dirs[base]
2419
2426
2420 def __iter__(self):
2427 def __iter__(self):
2421 return self._dirs.iterkeys()
2428 return self._dirs.iterkeys()
2422
2429
2423 def __contains__(self, d):
2430 def __contains__(self, d):
2424 return d in self._dirs
2431 return d in self._dirs
2425
2432
2426 if safehasattr(parsers, 'dirs'):
2433 if safehasattr(parsers, 'dirs'):
2427 dirs = parsers.dirs
2434 dirs = parsers.dirs
2428
2435
2429 def finddirs(path):
2436 def finddirs(path):
2430 pos = path.rfind('/')
2437 pos = path.rfind('/')
2431 while pos != -1:
2438 while pos != -1:
2432 yield path[:pos]
2439 yield path[:pos]
2433 pos = path.rfind('/', 0, pos)
2440 pos = path.rfind('/', 0, pos)
2434
2441
2435 # compression utility
2442 # compression utility
2436
2443
2437 class nocompress(object):
2444 class nocompress(object):
2438 def compress(self, x):
2445 def compress(self, x):
2439 return x
2446 return x
2440 def flush(self):
2447 def flush(self):
2441 return ""
2448 return ""
2442
2449
2443 compressors = {
2450 compressors = {
2444 None: nocompress,
2451 None: nocompress,
2445 # lambda to prevent early import
2452 # lambda to prevent early import
2446 'BZ': lambda: bz2.BZ2Compressor(),
2453 'BZ': lambda: bz2.BZ2Compressor(),
2447 'GZ': lambda: zlib.compressobj(),
2454 'GZ': lambda: zlib.compressobj(),
2448 }
2455 }
2449 # also support the old form by courtesies
2456 # also support the old form by courtesies
2450 compressors['UN'] = compressors[None]
2457 compressors['UN'] = compressors[None]
2451
2458
2452 def _makedecompressor(decompcls):
2459 def _makedecompressor(decompcls):
2453 def generator(f):
2460 def generator(f):
2454 d = decompcls()
2461 d = decompcls()
2455 for chunk in filechunkiter(f):
2462 for chunk in filechunkiter(f):
2456 yield d.decompress(chunk)
2463 yield d.decompress(chunk)
2457 def func(fh):
2464 def func(fh):
2458 return chunkbuffer(generator(fh))
2465 return chunkbuffer(generator(fh))
2459 return func
2466 return func
2460
2467
2461 def _bz2():
2468 def _bz2():
2462 d = bz2.BZ2Decompressor()
2469 d = bz2.BZ2Decompressor()
2463 # Bzip2 stream start with BZ, but we stripped it.
2470 # Bzip2 stream start with BZ, but we stripped it.
2464 # we put it back for good measure.
2471 # we put it back for good measure.
2465 d.decompress('BZ')
2472 d.decompress('BZ')
2466 return d
2473 return d
2467
2474
2468 decompressors = {None: lambda fh: fh,
2475 decompressors = {None: lambda fh: fh,
2469 '_truncatedBZ': _makedecompressor(_bz2),
2476 '_truncatedBZ': _makedecompressor(_bz2),
2470 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2477 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2471 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2478 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2472 }
2479 }
2473 # also support the old form by courtesies
2480 # also support the old form by courtesies
2474 decompressors['UN'] = decompressors[None]
2481 decompressors['UN'] = decompressors[None]
2475
2482
2476 # convenient shortcut
2483 # convenient shortcut
2477 dst = debugstacktrace
2484 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now