##// END OF EJS Templates
date: fix boundary check of negative integer
Florent Gallaire -
r28864:b0811a9f default
parent child Browse files
Show More
@@ -1,2741 +1,2741
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import urllib
37 import urllib
38 import zlib
38 import zlib
39
39
40 from . import (
40 from . import (
41 encoding,
41 encoding,
42 error,
42 error,
43 i18n,
43 i18n,
44 osutil,
44 osutil,
45 parsers,
45 parsers,
46 pycompat,
46 pycompat,
47 )
47 )
48
48
49 for attr in (
49 for attr in (
50 'empty',
50 'empty',
51 'queue',
51 'queue',
52 'stringio',
52 'stringio',
53 ):
53 ):
54 globals()[attr] = getattr(pycompat, attr)
54 globals()[attr] = getattr(pycompat, attr)
55
55
56 if os.name == 'nt':
56 if os.name == 'nt':
57 from . import windows as platform
57 from . import windows as platform
58 else:
58 else:
59 from . import posix as platform
59 from . import posix as platform
60
60
61 md5 = hashlib.md5
61 md5 = hashlib.md5
62 sha1 = hashlib.sha1
62 sha1 = hashlib.sha1
63 sha512 = hashlib.sha512
63 sha512 = hashlib.sha512
64 _ = i18n._
64 _ = i18n._
65
65
66 cachestat = platform.cachestat
66 cachestat = platform.cachestat
67 checkexec = platform.checkexec
67 checkexec = platform.checkexec
68 checklink = platform.checklink
68 checklink = platform.checklink
69 copymode = platform.copymode
69 copymode = platform.copymode
70 executablepath = platform.executablepath
70 executablepath = platform.executablepath
71 expandglobs = platform.expandglobs
71 expandglobs = platform.expandglobs
72 explainexit = platform.explainexit
72 explainexit = platform.explainexit
73 findexe = platform.findexe
73 findexe = platform.findexe
74 gethgcmd = platform.gethgcmd
74 gethgcmd = platform.gethgcmd
75 getuser = platform.getuser
75 getuser = platform.getuser
76 getpid = os.getpid
76 getpid = os.getpid
77 groupmembers = platform.groupmembers
77 groupmembers = platform.groupmembers
78 groupname = platform.groupname
78 groupname = platform.groupname
79 hidewindow = platform.hidewindow
79 hidewindow = platform.hidewindow
80 isexec = platform.isexec
80 isexec = platform.isexec
81 isowner = platform.isowner
81 isowner = platform.isowner
82 localpath = platform.localpath
82 localpath = platform.localpath
83 lookupreg = platform.lookupreg
83 lookupreg = platform.lookupreg
84 makedir = platform.makedir
84 makedir = platform.makedir
85 nlinks = platform.nlinks
85 nlinks = platform.nlinks
86 normpath = platform.normpath
86 normpath = platform.normpath
87 normcase = platform.normcase
87 normcase = platform.normcase
88 normcasespec = platform.normcasespec
88 normcasespec = platform.normcasespec
89 normcasefallback = platform.normcasefallback
89 normcasefallback = platform.normcasefallback
90 openhardlinks = platform.openhardlinks
90 openhardlinks = platform.openhardlinks
91 oslink = platform.oslink
91 oslink = platform.oslink
92 parsepatchoutput = platform.parsepatchoutput
92 parsepatchoutput = platform.parsepatchoutput
93 pconvert = platform.pconvert
93 pconvert = platform.pconvert
94 poll = platform.poll
94 poll = platform.poll
95 popen = platform.popen
95 popen = platform.popen
96 posixfile = platform.posixfile
96 posixfile = platform.posixfile
97 quotecommand = platform.quotecommand
97 quotecommand = platform.quotecommand
98 readpipe = platform.readpipe
98 readpipe = platform.readpipe
99 rename = platform.rename
99 rename = platform.rename
100 removedirs = platform.removedirs
100 removedirs = platform.removedirs
101 samedevice = platform.samedevice
101 samedevice = platform.samedevice
102 samefile = platform.samefile
102 samefile = platform.samefile
103 samestat = platform.samestat
103 samestat = platform.samestat
104 setbinary = platform.setbinary
104 setbinary = platform.setbinary
105 setflags = platform.setflags
105 setflags = platform.setflags
106 setsignalhandler = platform.setsignalhandler
106 setsignalhandler = platform.setsignalhandler
107 shellquote = platform.shellquote
107 shellquote = platform.shellquote
108 spawndetached = platform.spawndetached
108 spawndetached = platform.spawndetached
109 split = platform.split
109 split = platform.split
110 sshargs = platform.sshargs
110 sshargs = platform.sshargs
111 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
111 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
112 statisexec = platform.statisexec
112 statisexec = platform.statisexec
113 statislink = platform.statislink
113 statislink = platform.statislink
114 termwidth = platform.termwidth
114 termwidth = platform.termwidth
115 testpid = platform.testpid
115 testpid = platform.testpid
116 umask = platform.umask
116 umask = platform.umask
117 unlink = platform.unlink
117 unlink = platform.unlink
118 unlinkpath = platform.unlinkpath
118 unlinkpath = platform.unlinkpath
119 username = platform.username
119 username = platform.username
120
120
121 # Python compatibility
121 # Python compatibility
122
122
123 _notset = object()
123 _notset = object()
124
124
125 # disable Python's problematic floating point timestamps (issue4836)
125 # disable Python's problematic floating point timestamps (issue4836)
126 # (Python hypocritically says you shouldn't change this behavior in
126 # (Python hypocritically says you shouldn't change this behavior in
127 # libraries, and sure enough Mercurial is not a library.)
127 # libraries, and sure enough Mercurial is not a library.)
128 os.stat_float_times(False)
128 os.stat_float_times(False)
129
129
130 def safehasattr(thing, attr):
130 def safehasattr(thing, attr):
131 return getattr(thing, attr, _notset) is not _notset
131 return getattr(thing, attr, _notset) is not _notset
132
132
133 DIGESTS = {
133 DIGESTS = {
134 'md5': md5,
134 'md5': md5,
135 'sha1': sha1,
135 'sha1': sha1,
136 'sha512': sha512,
136 'sha512': sha512,
137 }
137 }
138 # List of digest types from strongest to weakest
138 # List of digest types from strongest to weakest
139 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
139 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
140
140
141 for k in DIGESTS_BY_STRENGTH:
141 for k in DIGESTS_BY_STRENGTH:
142 assert k in DIGESTS
142 assert k in DIGESTS
143
143
144 class digester(object):
144 class digester(object):
145 """helper to compute digests.
145 """helper to compute digests.
146
146
147 This helper can be used to compute one or more digests given their name.
147 This helper can be used to compute one or more digests given their name.
148
148
149 >>> d = digester(['md5', 'sha1'])
149 >>> d = digester(['md5', 'sha1'])
150 >>> d.update('foo')
150 >>> d.update('foo')
151 >>> [k for k in sorted(d)]
151 >>> [k for k in sorted(d)]
152 ['md5', 'sha1']
152 ['md5', 'sha1']
153 >>> d['md5']
153 >>> d['md5']
154 'acbd18db4cc2f85cedef654fccc4a4d8'
154 'acbd18db4cc2f85cedef654fccc4a4d8'
155 >>> d['sha1']
155 >>> d['sha1']
156 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
156 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
157 >>> digester.preferred(['md5', 'sha1'])
157 >>> digester.preferred(['md5', 'sha1'])
158 'sha1'
158 'sha1'
159 """
159 """
160
160
161 def __init__(self, digests, s=''):
161 def __init__(self, digests, s=''):
162 self._hashes = {}
162 self._hashes = {}
163 for k in digests:
163 for k in digests:
164 if k not in DIGESTS:
164 if k not in DIGESTS:
165 raise Abort(_('unknown digest type: %s') % k)
165 raise Abort(_('unknown digest type: %s') % k)
166 self._hashes[k] = DIGESTS[k]()
166 self._hashes[k] = DIGESTS[k]()
167 if s:
167 if s:
168 self.update(s)
168 self.update(s)
169
169
170 def update(self, data):
170 def update(self, data):
171 for h in self._hashes.values():
171 for h in self._hashes.values():
172 h.update(data)
172 h.update(data)
173
173
174 def __getitem__(self, key):
174 def __getitem__(self, key):
175 if key not in DIGESTS:
175 if key not in DIGESTS:
176 raise Abort(_('unknown digest type: %s') % k)
176 raise Abort(_('unknown digest type: %s') % k)
177 return self._hashes[key].hexdigest()
177 return self._hashes[key].hexdigest()
178
178
179 def __iter__(self):
179 def __iter__(self):
180 return iter(self._hashes)
180 return iter(self._hashes)
181
181
182 @staticmethod
182 @staticmethod
183 def preferred(supported):
183 def preferred(supported):
184 """returns the strongest digest type in both supported and DIGESTS."""
184 """returns the strongest digest type in both supported and DIGESTS."""
185
185
186 for k in DIGESTS_BY_STRENGTH:
186 for k in DIGESTS_BY_STRENGTH:
187 if k in supported:
187 if k in supported:
188 return k
188 return k
189 return None
189 return None
190
190
191 class digestchecker(object):
191 class digestchecker(object):
192 """file handle wrapper that additionally checks content against a given
192 """file handle wrapper that additionally checks content against a given
193 size and digests.
193 size and digests.
194
194
195 d = digestchecker(fh, size, {'md5': '...'})
195 d = digestchecker(fh, size, {'md5': '...'})
196
196
197 When multiple digests are given, all of them are validated.
197 When multiple digests are given, all of them are validated.
198 """
198 """
199
199
200 def __init__(self, fh, size, digests):
200 def __init__(self, fh, size, digests):
201 self._fh = fh
201 self._fh = fh
202 self._size = size
202 self._size = size
203 self._got = 0
203 self._got = 0
204 self._digests = dict(digests)
204 self._digests = dict(digests)
205 self._digester = digester(self._digests.keys())
205 self._digester = digester(self._digests.keys())
206
206
207 def read(self, length=-1):
207 def read(self, length=-1):
208 content = self._fh.read(length)
208 content = self._fh.read(length)
209 self._digester.update(content)
209 self._digester.update(content)
210 self._got += len(content)
210 self._got += len(content)
211 return content
211 return content
212
212
213 def validate(self):
213 def validate(self):
214 if self._size != self._got:
214 if self._size != self._got:
215 raise Abort(_('size mismatch: expected %d, got %d') %
215 raise Abort(_('size mismatch: expected %d, got %d') %
216 (self._size, self._got))
216 (self._size, self._got))
217 for k, v in self._digests.items():
217 for k, v in self._digests.items():
218 if v != self._digester[k]:
218 if v != self._digester[k]:
219 # i18n: first parameter is a digest name
219 # i18n: first parameter is a digest name
220 raise Abort(_('%s mismatch: expected %s, got %s') %
220 raise Abort(_('%s mismatch: expected %s, got %s') %
221 (k, v, self._digester[k]))
221 (k, v, self._digester[k]))
222
222
223 try:
223 try:
224 buffer = buffer
224 buffer = buffer
225 except NameError:
225 except NameError:
226 if sys.version_info[0] < 3:
226 if sys.version_info[0] < 3:
227 def buffer(sliceable, offset=0):
227 def buffer(sliceable, offset=0):
228 return sliceable[offset:]
228 return sliceable[offset:]
229 else:
229 else:
230 def buffer(sliceable, offset=0):
230 def buffer(sliceable, offset=0):
231 return memoryview(sliceable)[offset:]
231 return memoryview(sliceable)[offset:]
232
232
233 closefds = os.name == 'posix'
233 closefds = os.name == 'posix'
234
234
235 _chunksize = 4096
235 _chunksize = 4096
236
236
237 class bufferedinputpipe(object):
237 class bufferedinputpipe(object):
238 """a manually buffered input pipe
238 """a manually buffered input pipe
239
239
240 Python will not let us use buffered IO and lazy reading with 'polling' at
240 Python will not let us use buffered IO and lazy reading with 'polling' at
241 the same time. We cannot probe the buffer state and select will not detect
241 the same time. We cannot probe the buffer state and select will not detect
242 that data are ready to read if they are already buffered.
242 that data are ready to read if they are already buffered.
243
243
244 This class let us work around that by implementing its own buffering
244 This class let us work around that by implementing its own buffering
245 (allowing efficient readline) while offering a way to know if the buffer is
245 (allowing efficient readline) while offering a way to know if the buffer is
246 empty from the output (allowing collaboration of the buffer with polling).
246 empty from the output (allowing collaboration of the buffer with polling).
247
247
248 This class lives in the 'util' module because it makes use of the 'os'
248 This class lives in the 'util' module because it makes use of the 'os'
249 module from the python stdlib.
249 module from the python stdlib.
250 """
250 """
251
251
252 def __init__(self, input):
252 def __init__(self, input):
253 self._input = input
253 self._input = input
254 self._buffer = []
254 self._buffer = []
255 self._eof = False
255 self._eof = False
256 self._lenbuf = 0
256 self._lenbuf = 0
257
257
258 @property
258 @property
259 def hasbuffer(self):
259 def hasbuffer(self):
260 """True is any data is currently buffered
260 """True is any data is currently buffered
261
261
262 This will be used externally a pre-step for polling IO. If there is
262 This will be used externally a pre-step for polling IO. If there is
263 already data then no polling should be set in place."""
263 already data then no polling should be set in place."""
264 return bool(self._buffer)
264 return bool(self._buffer)
265
265
266 @property
266 @property
267 def closed(self):
267 def closed(self):
268 return self._input.closed
268 return self._input.closed
269
269
270 def fileno(self):
270 def fileno(self):
271 return self._input.fileno()
271 return self._input.fileno()
272
272
273 def close(self):
273 def close(self):
274 return self._input.close()
274 return self._input.close()
275
275
276 def read(self, size):
276 def read(self, size):
277 while (not self._eof) and (self._lenbuf < size):
277 while (not self._eof) and (self._lenbuf < size):
278 self._fillbuffer()
278 self._fillbuffer()
279 return self._frombuffer(size)
279 return self._frombuffer(size)
280
280
281 def readline(self, *args, **kwargs):
281 def readline(self, *args, **kwargs):
282 if 1 < len(self._buffer):
282 if 1 < len(self._buffer):
283 # this should not happen because both read and readline end with a
283 # this should not happen because both read and readline end with a
284 # _frombuffer call that collapse it.
284 # _frombuffer call that collapse it.
285 self._buffer = [''.join(self._buffer)]
285 self._buffer = [''.join(self._buffer)]
286 self._lenbuf = len(self._buffer[0])
286 self._lenbuf = len(self._buffer[0])
287 lfi = -1
287 lfi = -1
288 if self._buffer:
288 if self._buffer:
289 lfi = self._buffer[-1].find('\n')
289 lfi = self._buffer[-1].find('\n')
290 while (not self._eof) and lfi < 0:
290 while (not self._eof) and lfi < 0:
291 self._fillbuffer()
291 self._fillbuffer()
292 if self._buffer:
292 if self._buffer:
293 lfi = self._buffer[-1].find('\n')
293 lfi = self._buffer[-1].find('\n')
294 size = lfi + 1
294 size = lfi + 1
295 if lfi < 0: # end of file
295 if lfi < 0: # end of file
296 size = self._lenbuf
296 size = self._lenbuf
297 elif 1 < len(self._buffer):
297 elif 1 < len(self._buffer):
298 # we need to take previous chunks into account
298 # we need to take previous chunks into account
299 size += self._lenbuf - len(self._buffer[-1])
299 size += self._lenbuf - len(self._buffer[-1])
300 return self._frombuffer(size)
300 return self._frombuffer(size)
301
301
302 def _frombuffer(self, size):
302 def _frombuffer(self, size):
303 """return at most 'size' data from the buffer
303 """return at most 'size' data from the buffer
304
304
305 The data are removed from the buffer."""
305 The data are removed from the buffer."""
306 if size == 0 or not self._buffer:
306 if size == 0 or not self._buffer:
307 return ''
307 return ''
308 buf = self._buffer[0]
308 buf = self._buffer[0]
309 if 1 < len(self._buffer):
309 if 1 < len(self._buffer):
310 buf = ''.join(self._buffer)
310 buf = ''.join(self._buffer)
311
311
312 data = buf[:size]
312 data = buf[:size]
313 buf = buf[len(data):]
313 buf = buf[len(data):]
314 if buf:
314 if buf:
315 self._buffer = [buf]
315 self._buffer = [buf]
316 self._lenbuf = len(buf)
316 self._lenbuf = len(buf)
317 else:
317 else:
318 self._buffer = []
318 self._buffer = []
319 self._lenbuf = 0
319 self._lenbuf = 0
320 return data
320 return data
321
321
322 def _fillbuffer(self):
322 def _fillbuffer(self):
323 """read data to the buffer"""
323 """read data to the buffer"""
324 data = os.read(self._input.fileno(), _chunksize)
324 data = os.read(self._input.fileno(), _chunksize)
325 if not data:
325 if not data:
326 self._eof = True
326 self._eof = True
327 else:
327 else:
328 self._lenbuf += len(data)
328 self._lenbuf += len(data)
329 self._buffer.append(data)
329 self._buffer.append(data)
330
330
331 def popen2(cmd, env=None, newlines=False):
331 def popen2(cmd, env=None, newlines=False):
332 # Setting bufsize to -1 lets the system decide the buffer size.
332 # Setting bufsize to -1 lets the system decide the buffer size.
333 # The default for bufsize is 0, meaning unbuffered. This leads to
333 # The default for bufsize is 0, meaning unbuffered. This leads to
334 # poor performance on Mac OS X: http://bugs.python.org/issue4194
334 # poor performance on Mac OS X: http://bugs.python.org/issue4194
335 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
335 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
336 close_fds=closefds,
336 close_fds=closefds,
337 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
337 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
338 universal_newlines=newlines,
338 universal_newlines=newlines,
339 env=env)
339 env=env)
340 return p.stdin, p.stdout
340 return p.stdin, p.stdout
341
341
342 def popen3(cmd, env=None, newlines=False):
342 def popen3(cmd, env=None, newlines=False):
343 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
343 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
344 return stdin, stdout, stderr
344 return stdin, stdout, stderr
345
345
346 def popen4(cmd, env=None, newlines=False, bufsize=-1):
346 def popen4(cmd, env=None, newlines=False, bufsize=-1):
347 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
347 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
348 close_fds=closefds,
348 close_fds=closefds,
349 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
349 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
350 stderr=subprocess.PIPE,
350 stderr=subprocess.PIPE,
351 universal_newlines=newlines,
351 universal_newlines=newlines,
352 env=env)
352 env=env)
353 return p.stdin, p.stdout, p.stderr, p
353 return p.stdin, p.stdout, p.stderr, p
354
354
355 def version():
355 def version():
356 """Return version information if available."""
356 """Return version information if available."""
357 try:
357 try:
358 from . import __version__
358 from . import __version__
359 return __version__.version
359 return __version__.version
360 except ImportError:
360 except ImportError:
361 return 'unknown'
361 return 'unknown'
362
362
363 def versiontuple(v=None, n=4):
363 def versiontuple(v=None, n=4):
364 """Parses a Mercurial version string into an N-tuple.
364 """Parses a Mercurial version string into an N-tuple.
365
365
366 The version string to be parsed is specified with the ``v`` argument.
366 The version string to be parsed is specified with the ``v`` argument.
367 If it isn't defined, the current Mercurial version string will be parsed.
367 If it isn't defined, the current Mercurial version string will be parsed.
368
368
369 ``n`` can be 2, 3, or 4. Here is how some version strings map to
369 ``n`` can be 2, 3, or 4. Here is how some version strings map to
370 returned values:
370 returned values:
371
371
372 >>> v = '3.6.1+190-df9b73d2d444'
372 >>> v = '3.6.1+190-df9b73d2d444'
373 >>> versiontuple(v, 2)
373 >>> versiontuple(v, 2)
374 (3, 6)
374 (3, 6)
375 >>> versiontuple(v, 3)
375 >>> versiontuple(v, 3)
376 (3, 6, 1)
376 (3, 6, 1)
377 >>> versiontuple(v, 4)
377 >>> versiontuple(v, 4)
378 (3, 6, 1, '190-df9b73d2d444')
378 (3, 6, 1, '190-df9b73d2d444')
379
379
380 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
380 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
381 (3, 6, 1, '190-df9b73d2d444+20151118')
381 (3, 6, 1, '190-df9b73d2d444+20151118')
382
382
383 >>> v = '3.6'
383 >>> v = '3.6'
384 >>> versiontuple(v, 2)
384 >>> versiontuple(v, 2)
385 (3, 6)
385 (3, 6)
386 >>> versiontuple(v, 3)
386 >>> versiontuple(v, 3)
387 (3, 6, None)
387 (3, 6, None)
388 >>> versiontuple(v, 4)
388 >>> versiontuple(v, 4)
389 (3, 6, None, None)
389 (3, 6, None, None)
390 """
390 """
391 if not v:
391 if not v:
392 v = version()
392 v = version()
393 parts = v.split('+', 1)
393 parts = v.split('+', 1)
394 if len(parts) == 1:
394 if len(parts) == 1:
395 vparts, extra = parts[0], None
395 vparts, extra = parts[0], None
396 else:
396 else:
397 vparts, extra = parts
397 vparts, extra = parts
398
398
399 vints = []
399 vints = []
400 for i in vparts.split('.'):
400 for i in vparts.split('.'):
401 try:
401 try:
402 vints.append(int(i))
402 vints.append(int(i))
403 except ValueError:
403 except ValueError:
404 break
404 break
405 # (3, 6) -> (3, 6, None)
405 # (3, 6) -> (3, 6, None)
406 while len(vints) < 3:
406 while len(vints) < 3:
407 vints.append(None)
407 vints.append(None)
408
408
409 if n == 2:
409 if n == 2:
410 return (vints[0], vints[1])
410 return (vints[0], vints[1])
411 if n == 3:
411 if n == 3:
412 return (vints[0], vints[1], vints[2])
412 return (vints[0], vints[1], vints[2])
413 if n == 4:
413 if n == 4:
414 return (vints[0], vints[1], vints[2], extra)
414 return (vints[0], vints[1], vints[2], extra)
415
415
416 # used by parsedate
416 # used by parsedate
417 defaultdateformats = (
417 defaultdateformats = (
418 '%Y-%m-%d %H:%M:%S',
418 '%Y-%m-%d %H:%M:%S',
419 '%Y-%m-%d %I:%M:%S%p',
419 '%Y-%m-%d %I:%M:%S%p',
420 '%Y-%m-%d %H:%M',
420 '%Y-%m-%d %H:%M',
421 '%Y-%m-%d %I:%M%p',
421 '%Y-%m-%d %I:%M%p',
422 '%Y-%m-%d',
422 '%Y-%m-%d',
423 '%m-%d',
423 '%m-%d',
424 '%m/%d',
424 '%m/%d',
425 '%m/%d/%y',
425 '%m/%d/%y',
426 '%m/%d/%Y',
426 '%m/%d/%Y',
427 '%a %b %d %H:%M:%S %Y',
427 '%a %b %d %H:%M:%S %Y',
428 '%a %b %d %I:%M:%S%p %Y',
428 '%a %b %d %I:%M:%S%p %Y',
429 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
429 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
430 '%b %d %H:%M:%S %Y',
430 '%b %d %H:%M:%S %Y',
431 '%b %d %I:%M:%S%p %Y',
431 '%b %d %I:%M:%S%p %Y',
432 '%b %d %H:%M:%S',
432 '%b %d %H:%M:%S',
433 '%b %d %I:%M:%S%p',
433 '%b %d %I:%M:%S%p',
434 '%b %d %H:%M',
434 '%b %d %H:%M',
435 '%b %d %I:%M%p',
435 '%b %d %I:%M%p',
436 '%b %d %Y',
436 '%b %d %Y',
437 '%b %d',
437 '%b %d',
438 '%H:%M:%S',
438 '%H:%M:%S',
439 '%I:%M:%S%p',
439 '%I:%M:%S%p',
440 '%H:%M',
440 '%H:%M',
441 '%I:%M%p',
441 '%I:%M%p',
442 )
442 )
443
443
444 extendeddateformats = defaultdateformats + (
444 extendeddateformats = defaultdateformats + (
445 "%Y",
445 "%Y",
446 "%Y-%m",
446 "%Y-%m",
447 "%b",
447 "%b",
448 "%b %Y",
448 "%b %Y",
449 )
449 )
450
450
451 def cachefunc(func):
451 def cachefunc(func):
452 '''cache the result of function calls'''
452 '''cache the result of function calls'''
453 # XXX doesn't handle keywords args
453 # XXX doesn't handle keywords args
454 if func.__code__.co_argcount == 0:
454 if func.__code__.co_argcount == 0:
455 cache = []
455 cache = []
456 def f():
456 def f():
457 if len(cache) == 0:
457 if len(cache) == 0:
458 cache.append(func())
458 cache.append(func())
459 return cache[0]
459 return cache[0]
460 return f
460 return f
461 cache = {}
461 cache = {}
462 if func.__code__.co_argcount == 1:
462 if func.__code__.co_argcount == 1:
463 # we gain a small amount of time because
463 # we gain a small amount of time because
464 # we don't need to pack/unpack the list
464 # we don't need to pack/unpack the list
465 def f(arg):
465 def f(arg):
466 if arg not in cache:
466 if arg not in cache:
467 cache[arg] = func(arg)
467 cache[arg] = func(arg)
468 return cache[arg]
468 return cache[arg]
469 else:
469 else:
470 def f(*args):
470 def f(*args):
471 if args not in cache:
471 if args not in cache:
472 cache[args] = func(*args)
472 cache[args] = func(*args)
473 return cache[args]
473 return cache[args]
474
474
475 return f
475 return f
476
476
477 class sortdict(dict):
477 class sortdict(dict):
478 '''a simple sorted dictionary'''
478 '''a simple sorted dictionary'''
479 def __init__(self, data=None):
479 def __init__(self, data=None):
480 self._list = []
480 self._list = []
481 if data:
481 if data:
482 self.update(data)
482 self.update(data)
483 def copy(self):
483 def copy(self):
484 return sortdict(self)
484 return sortdict(self)
485 def __setitem__(self, key, val):
485 def __setitem__(self, key, val):
486 if key in self:
486 if key in self:
487 self._list.remove(key)
487 self._list.remove(key)
488 self._list.append(key)
488 self._list.append(key)
489 dict.__setitem__(self, key, val)
489 dict.__setitem__(self, key, val)
490 def __iter__(self):
490 def __iter__(self):
491 return self._list.__iter__()
491 return self._list.__iter__()
492 def update(self, src):
492 def update(self, src):
493 if isinstance(src, dict):
493 if isinstance(src, dict):
494 src = src.iteritems()
494 src = src.iteritems()
495 for k, v in src:
495 for k, v in src:
496 self[k] = v
496 self[k] = v
497 def clear(self):
497 def clear(self):
498 dict.clear(self)
498 dict.clear(self)
499 self._list = []
499 self._list = []
500 def items(self):
500 def items(self):
501 return [(k, self[k]) for k in self._list]
501 return [(k, self[k]) for k in self._list]
502 def __delitem__(self, key):
502 def __delitem__(self, key):
503 dict.__delitem__(self, key)
503 dict.__delitem__(self, key)
504 self._list.remove(key)
504 self._list.remove(key)
505 def pop(self, key, *args, **kwargs):
505 def pop(self, key, *args, **kwargs):
506 dict.pop(self, key, *args, **kwargs)
506 dict.pop(self, key, *args, **kwargs)
507 try:
507 try:
508 self._list.remove(key)
508 self._list.remove(key)
509 except ValueError:
509 except ValueError:
510 pass
510 pass
511 def keys(self):
511 def keys(self):
512 return self._list
512 return self._list
513 def iterkeys(self):
513 def iterkeys(self):
514 return self._list.__iter__()
514 return self._list.__iter__()
515 def iteritems(self):
515 def iteritems(self):
516 for k in self._list:
516 for k in self._list:
517 yield k, self[k]
517 yield k, self[k]
518 def insert(self, index, key, val):
518 def insert(self, index, key, val):
519 self._list.insert(index, key)
519 self._list.insert(index, key)
520 dict.__setitem__(self, key, val)
520 dict.__setitem__(self, key, val)
521
521
522 class _lrucachenode(object):
522 class _lrucachenode(object):
523 """A node in a doubly linked list.
523 """A node in a doubly linked list.
524
524
525 Holds a reference to nodes on either side as well as a key-value
525 Holds a reference to nodes on either side as well as a key-value
526 pair for the dictionary entry.
526 pair for the dictionary entry.
527 """
527 """
528 __slots__ = ('next', 'prev', 'key', 'value')
528 __slots__ = ('next', 'prev', 'key', 'value')
529
529
530 def __init__(self):
530 def __init__(self):
531 self.next = None
531 self.next = None
532 self.prev = None
532 self.prev = None
533
533
534 self.key = _notset
534 self.key = _notset
535 self.value = None
535 self.value = None
536
536
537 def markempty(self):
537 def markempty(self):
538 """Mark the node as emptied."""
538 """Mark the node as emptied."""
539 self.key = _notset
539 self.key = _notset
540
540
541 class lrucachedict(object):
541 class lrucachedict(object):
542 """Dict that caches most recent accesses and sets.
542 """Dict that caches most recent accesses and sets.
543
543
544 The dict consists of an actual backing dict - indexed by original
544 The dict consists of an actual backing dict - indexed by original
545 key - and a doubly linked circular list defining the order of entries in
545 key - and a doubly linked circular list defining the order of entries in
546 the cache.
546 the cache.
547
547
548 The head node is the newest entry in the cache. If the cache is full,
548 The head node is the newest entry in the cache. If the cache is full,
549 we recycle head.prev and make it the new head. Cache accesses result in
549 we recycle head.prev and make it the new head. Cache accesses result in
550 the node being moved to before the existing head and being marked as the
550 the node being moved to before the existing head and being marked as the
551 new head node.
551 new head node.
552 """
552 """
553 def __init__(self, max):
553 def __init__(self, max):
554 self._cache = {}
554 self._cache = {}
555
555
556 self._head = head = _lrucachenode()
556 self._head = head = _lrucachenode()
557 head.prev = head
557 head.prev = head
558 head.next = head
558 head.next = head
559 self._size = 1
559 self._size = 1
560 self._capacity = max
560 self._capacity = max
561
561
562 def __len__(self):
562 def __len__(self):
563 return len(self._cache)
563 return len(self._cache)
564
564
565 def __contains__(self, k):
565 def __contains__(self, k):
566 return k in self._cache
566 return k in self._cache
567
567
568 def __iter__(self):
568 def __iter__(self):
569 # We don't have to iterate in cache order, but why not.
569 # We don't have to iterate in cache order, but why not.
570 n = self._head
570 n = self._head
571 for i in range(len(self._cache)):
571 for i in range(len(self._cache)):
572 yield n.key
572 yield n.key
573 n = n.next
573 n = n.next
574
574
575 def __getitem__(self, k):
575 def __getitem__(self, k):
576 node = self._cache[k]
576 node = self._cache[k]
577 self._movetohead(node)
577 self._movetohead(node)
578 return node.value
578 return node.value
579
579
580 def __setitem__(self, k, v):
580 def __setitem__(self, k, v):
581 node = self._cache.get(k)
581 node = self._cache.get(k)
582 # Replace existing value and mark as newest.
582 # Replace existing value and mark as newest.
583 if node is not None:
583 if node is not None:
584 node.value = v
584 node.value = v
585 self._movetohead(node)
585 self._movetohead(node)
586 return
586 return
587
587
588 if self._size < self._capacity:
588 if self._size < self._capacity:
589 node = self._addcapacity()
589 node = self._addcapacity()
590 else:
590 else:
591 # Grab the last/oldest item.
591 # Grab the last/oldest item.
592 node = self._head.prev
592 node = self._head.prev
593
593
594 # At capacity. Kill the old entry.
594 # At capacity. Kill the old entry.
595 if node.key is not _notset:
595 if node.key is not _notset:
596 del self._cache[node.key]
596 del self._cache[node.key]
597
597
598 node.key = k
598 node.key = k
599 node.value = v
599 node.value = v
600 self._cache[k] = node
600 self._cache[k] = node
601 # And mark it as newest entry. No need to adjust order since it
601 # And mark it as newest entry. No need to adjust order since it
602 # is already self._head.prev.
602 # is already self._head.prev.
603 self._head = node
603 self._head = node
604
604
605 def __delitem__(self, k):
605 def __delitem__(self, k):
606 node = self._cache.pop(k)
606 node = self._cache.pop(k)
607 node.markempty()
607 node.markempty()
608
608
609 # Temporarily mark as newest item before re-adjusting head to make
609 # Temporarily mark as newest item before re-adjusting head to make
610 # this node the oldest item.
610 # this node the oldest item.
611 self._movetohead(node)
611 self._movetohead(node)
612 self._head = node.next
612 self._head = node.next
613
613
614 # Additional dict methods.
614 # Additional dict methods.
615
615
616 def get(self, k, default=None):
616 def get(self, k, default=None):
617 try:
617 try:
618 return self._cache[k]
618 return self._cache[k]
619 except KeyError:
619 except KeyError:
620 return default
620 return default
621
621
622 def clear(self):
622 def clear(self):
623 n = self._head
623 n = self._head
624 while n.key is not _notset:
624 while n.key is not _notset:
625 n.markempty()
625 n.markempty()
626 n = n.next
626 n = n.next
627
627
628 self._cache.clear()
628 self._cache.clear()
629
629
630 def copy(self):
630 def copy(self):
631 result = lrucachedict(self._capacity)
631 result = lrucachedict(self._capacity)
632 n = self._head.prev
632 n = self._head.prev
633 # Iterate in oldest-to-newest order, so the copy has the right ordering
633 # Iterate in oldest-to-newest order, so the copy has the right ordering
634 for i in range(len(self._cache)):
634 for i in range(len(self._cache)):
635 result[n.key] = n.value
635 result[n.key] = n.value
636 n = n.prev
636 n = n.prev
637 return result
637 return result
638
638
639 def _movetohead(self, node):
639 def _movetohead(self, node):
640 """Mark a node as the newest, making it the new head.
640 """Mark a node as the newest, making it the new head.
641
641
642 When a node is accessed, it becomes the freshest entry in the LRU
642 When a node is accessed, it becomes the freshest entry in the LRU
643 list, which is denoted by self._head.
643 list, which is denoted by self._head.
644
644
645 Visually, let's make ``N`` the new head node (* denotes head):
645 Visually, let's make ``N`` the new head node (* denotes head):
646
646
647 previous/oldest <-> head <-> next/next newest
647 previous/oldest <-> head <-> next/next newest
648
648
649 ----<->--- A* ---<->-----
649 ----<->--- A* ---<->-----
650 | |
650 | |
651 E <-> D <-> N <-> C <-> B
651 E <-> D <-> N <-> C <-> B
652
652
653 To:
653 To:
654
654
655 ----<->--- N* ---<->-----
655 ----<->--- N* ---<->-----
656 | |
656 | |
657 E <-> D <-> C <-> B <-> A
657 E <-> D <-> C <-> B <-> A
658
658
659 This requires the following moves:
659 This requires the following moves:
660
660
661 C.next = D (node.prev.next = node.next)
661 C.next = D (node.prev.next = node.next)
662 D.prev = C (node.next.prev = node.prev)
662 D.prev = C (node.next.prev = node.prev)
663 E.next = N (head.prev.next = node)
663 E.next = N (head.prev.next = node)
664 N.prev = E (node.prev = head.prev)
664 N.prev = E (node.prev = head.prev)
665 N.next = A (node.next = head)
665 N.next = A (node.next = head)
666 A.prev = N (head.prev = node)
666 A.prev = N (head.prev = node)
667 """
667 """
668 head = self._head
668 head = self._head
669 # C.next = D
669 # C.next = D
670 node.prev.next = node.next
670 node.prev.next = node.next
671 # D.prev = C
671 # D.prev = C
672 node.next.prev = node.prev
672 node.next.prev = node.prev
673 # N.prev = E
673 # N.prev = E
674 node.prev = head.prev
674 node.prev = head.prev
675 # N.next = A
675 # N.next = A
676 # It is tempting to do just "head" here, however if node is
676 # It is tempting to do just "head" here, however if node is
677 # adjacent to head, this will do bad things.
677 # adjacent to head, this will do bad things.
678 node.next = head.prev.next
678 node.next = head.prev.next
679 # E.next = N
679 # E.next = N
680 node.next.prev = node
680 node.next.prev = node
681 # A.prev = N
681 # A.prev = N
682 node.prev.next = node
682 node.prev.next = node
683
683
684 self._head = node
684 self._head = node
685
685
686 def _addcapacity(self):
686 def _addcapacity(self):
687 """Add a node to the circular linked list.
687 """Add a node to the circular linked list.
688
688
689 The new node is inserted before the head node.
689 The new node is inserted before the head node.
690 """
690 """
691 head = self._head
691 head = self._head
692 node = _lrucachenode()
692 node = _lrucachenode()
693 head.prev.next = node
693 head.prev.next = node
694 node.prev = head.prev
694 node.prev = head.prev
695 node.next = head
695 node.next = head
696 head.prev = node
696 head.prev = node
697 self._size += 1
697 self._size += 1
698 return node
698 return node
699
699
700 def lrucachefunc(func):
700 def lrucachefunc(func):
701 '''cache most recent results of function calls'''
701 '''cache most recent results of function calls'''
702 cache = {}
702 cache = {}
703 order = collections.deque()
703 order = collections.deque()
704 if func.__code__.co_argcount == 1:
704 if func.__code__.co_argcount == 1:
705 def f(arg):
705 def f(arg):
706 if arg not in cache:
706 if arg not in cache:
707 if len(cache) > 20:
707 if len(cache) > 20:
708 del cache[order.popleft()]
708 del cache[order.popleft()]
709 cache[arg] = func(arg)
709 cache[arg] = func(arg)
710 else:
710 else:
711 order.remove(arg)
711 order.remove(arg)
712 order.append(arg)
712 order.append(arg)
713 return cache[arg]
713 return cache[arg]
714 else:
714 else:
715 def f(*args):
715 def f(*args):
716 if args not in cache:
716 if args not in cache:
717 if len(cache) > 20:
717 if len(cache) > 20:
718 del cache[order.popleft()]
718 del cache[order.popleft()]
719 cache[args] = func(*args)
719 cache[args] = func(*args)
720 else:
720 else:
721 order.remove(args)
721 order.remove(args)
722 order.append(args)
722 order.append(args)
723 return cache[args]
723 return cache[args]
724
724
725 return f
725 return f
726
726
727 class propertycache(object):
727 class propertycache(object):
728 def __init__(self, func):
728 def __init__(self, func):
729 self.func = func
729 self.func = func
730 self.name = func.__name__
730 self.name = func.__name__
731 def __get__(self, obj, type=None):
731 def __get__(self, obj, type=None):
732 result = self.func(obj)
732 result = self.func(obj)
733 self.cachevalue(obj, result)
733 self.cachevalue(obj, result)
734 return result
734 return result
735
735
736 def cachevalue(self, obj, value):
736 def cachevalue(self, obj, value):
737 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
737 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
738 obj.__dict__[self.name] = value
738 obj.__dict__[self.name] = value
739
739
740 def pipefilter(s, cmd):
740 def pipefilter(s, cmd):
741 '''filter string S through command CMD, returning its output'''
741 '''filter string S through command CMD, returning its output'''
742 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
742 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
743 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
743 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
744 pout, perr = p.communicate(s)
744 pout, perr = p.communicate(s)
745 return pout
745 return pout
746
746
747 def tempfilter(s, cmd):
747 def tempfilter(s, cmd):
748 '''filter string S through a pair of temporary files with CMD.
748 '''filter string S through a pair of temporary files with CMD.
749 CMD is used as a template to create the real command to be run,
749 CMD is used as a template to create the real command to be run,
750 with the strings INFILE and OUTFILE replaced by the real names of
750 with the strings INFILE and OUTFILE replaced by the real names of
751 the temporary files generated.'''
751 the temporary files generated.'''
752 inname, outname = None, None
752 inname, outname = None, None
753 try:
753 try:
754 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
754 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
755 fp = os.fdopen(infd, 'wb')
755 fp = os.fdopen(infd, 'wb')
756 fp.write(s)
756 fp.write(s)
757 fp.close()
757 fp.close()
758 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
758 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
759 os.close(outfd)
759 os.close(outfd)
760 cmd = cmd.replace('INFILE', inname)
760 cmd = cmd.replace('INFILE', inname)
761 cmd = cmd.replace('OUTFILE', outname)
761 cmd = cmd.replace('OUTFILE', outname)
762 code = os.system(cmd)
762 code = os.system(cmd)
763 if sys.platform == 'OpenVMS' and code & 1:
763 if sys.platform == 'OpenVMS' and code & 1:
764 code = 0
764 code = 0
765 if code:
765 if code:
766 raise Abort(_("command '%s' failed: %s") %
766 raise Abort(_("command '%s' failed: %s") %
767 (cmd, explainexit(code)))
767 (cmd, explainexit(code)))
768 return readfile(outname)
768 return readfile(outname)
769 finally:
769 finally:
770 try:
770 try:
771 if inname:
771 if inname:
772 os.unlink(inname)
772 os.unlink(inname)
773 except OSError:
773 except OSError:
774 pass
774 pass
775 try:
775 try:
776 if outname:
776 if outname:
777 os.unlink(outname)
777 os.unlink(outname)
778 except OSError:
778 except OSError:
779 pass
779 pass
780
780
781 filtertable = {
781 filtertable = {
782 'tempfile:': tempfilter,
782 'tempfile:': tempfilter,
783 'pipe:': pipefilter,
783 'pipe:': pipefilter,
784 }
784 }
785
785
786 def filter(s, cmd):
786 def filter(s, cmd):
787 "filter a string through a command that transforms its input to its output"
787 "filter a string through a command that transforms its input to its output"
788 for name, fn in filtertable.iteritems():
788 for name, fn in filtertable.iteritems():
789 if cmd.startswith(name):
789 if cmd.startswith(name):
790 return fn(s, cmd[len(name):].lstrip())
790 return fn(s, cmd[len(name):].lstrip())
791 return pipefilter(s, cmd)
791 return pipefilter(s, cmd)
792
792
793 def binary(s):
793 def binary(s):
794 """return true if a string is binary data"""
794 """return true if a string is binary data"""
795 return bool(s and '\0' in s)
795 return bool(s and '\0' in s)
796
796
797 def increasingchunks(source, min=1024, max=65536):
797 def increasingchunks(source, min=1024, max=65536):
798 '''return no less than min bytes per chunk while data remains,
798 '''return no less than min bytes per chunk while data remains,
799 doubling min after each chunk until it reaches max'''
799 doubling min after each chunk until it reaches max'''
800 def log2(x):
800 def log2(x):
801 if not x:
801 if not x:
802 return 0
802 return 0
803 i = 0
803 i = 0
804 while x:
804 while x:
805 x >>= 1
805 x >>= 1
806 i += 1
806 i += 1
807 return i - 1
807 return i - 1
808
808
809 buf = []
809 buf = []
810 blen = 0
810 blen = 0
811 for chunk in source:
811 for chunk in source:
812 buf.append(chunk)
812 buf.append(chunk)
813 blen += len(chunk)
813 blen += len(chunk)
814 if blen >= min:
814 if blen >= min:
815 if min < max:
815 if min < max:
816 min = min << 1
816 min = min << 1
817 nmin = 1 << log2(blen)
817 nmin = 1 << log2(blen)
818 if nmin > min:
818 if nmin > min:
819 min = nmin
819 min = nmin
820 if min > max:
820 if min > max:
821 min = max
821 min = max
822 yield ''.join(buf)
822 yield ''.join(buf)
823 blen = 0
823 blen = 0
824 buf = []
824 buf = []
825 if buf:
825 if buf:
826 yield ''.join(buf)
826 yield ''.join(buf)
827
827
828 Abort = error.Abort
828 Abort = error.Abort
829
829
830 def always(fn):
830 def always(fn):
831 return True
831 return True
832
832
833 def never(fn):
833 def never(fn):
834 return False
834 return False
835
835
836 def nogc(func):
836 def nogc(func):
837 """disable garbage collector
837 """disable garbage collector
838
838
839 Python's garbage collector triggers a GC each time a certain number of
839 Python's garbage collector triggers a GC each time a certain number of
840 container objects (the number being defined by gc.get_threshold()) are
840 container objects (the number being defined by gc.get_threshold()) are
841 allocated even when marked not to be tracked by the collector. Tracking has
841 allocated even when marked not to be tracked by the collector. Tracking has
842 no effect on when GCs are triggered, only on what objects the GC looks
842 no effect on when GCs are triggered, only on what objects the GC looks
843 into. As a workaround, disable GC while building complex (huge)
843 into. As a workaround, disable GC while building complex (huge)
844 containers.
844 containers.
845
845
846 This garbage collector issue have been fixed in 2.7.
846 This garbage collector issue have been fixed in 2.7.
847 """
847 """
848 def wrapper(*args, **kwargs):
848 def wrapper(*args, **kwargs):
849 gcenabled = gc.isenabled()
849 gcenabled = gc.isenabled()
850 gc.disable()
850 gc.disable()
851 try:
851 try:
852 return func(*args, **kwargs)
852 return func(*args, **kwargs)
853 finally:
853 finally:
854 if gcenabled:
854 if gcenabled:
855 gc.enable()
855 gc.enable()
856 return wrapper
856 return wrapper
857
857
858 def pathto(root, n1, n2):
858 def pathto(root, n1, n2):
859 '''return the relative path from one place to another.
859 '''return the relative path from one place to another.
860 root should use os.sep to separate directories
860 root should use os.sep to separate directories
861 n1 should use os.sep to separate directories
861 n1 should use os.sep to separate directories
862 n2 should use "/" to separate directories
862 n2 should use "/" to separate directories
863 returns an os.sep-separated path.
863 returns an os.sep-separated path.
864
864
865 If n1 is a relative path, it's assumed it's
865 If n1 is a relative path, it's assumed it's
866 relative to root.
866 relative to root.
867 n2 should always be relative to root.
867 n2 should always be relative to root.
868 '''
868 '''
869 if not n1:
869 if not n1:
870 return localpath(n2)
870 return localpath(n2)
871 if os.path.isabs(n1):
871 if os.path.isabs(n1):
872 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
872 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
873 return os.path.join(root, localpath(n2))
873 return os.path.join(root, localpath(n2))
874 n2 = '/'.join((pconvert(root), n2))
874 n2 = '/'.join((pconvert(root), n2))
875 a, b = splitpath(n1), n2.split('/')
875 a, b = splitpath(n1), n2.split('/')
876 a.reverse()
876 a.reverse()
877 b.reverse()
877 b.reverse()
878 while a and b and a[-1] == b[-1]:
878 while a and b and a[-1] == b[-1]:
879 a.pop()
879 a.pop()
880 b.pop()
880 b.pop()
881 b.reverse()
881 b.reverse()
882 return os.sep.join((['..'] * len(a)) + b) or '.'
882 return os.sep.join((['..'] * len(a)) + b) or '.'
883
883
884 def mainfrozen():
884 def mainfrozen():
885 """return True if we are a frozen executable.
885 """return True if we are a frozen executable.
886
886
887 The code supports py2exe (most common, Windows only) and tools/freeze
887 The code supports py2exe (most common, Windows only) and tools/freeze
888 (portable, not much used).
888 (portable, not much used).
889 """
889 """
890 return (safehasattr(sys, "frozen") or # new py2exe
890 return (safehasattr(sys, "frozen") or # new py2exe
891 safehasattr(sys, "importers") or # old py2exe
891 safehasattr(sys, "importers") or # old py2exe
892 imp.is_frozen("__main__")) # tools/freeze
892 imp.is_frozen("__main__")) # tools/freeze
893
893
894 # the location of data files matching the source code
894 # the location of data files matching the source code
895 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
895 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
896 # executable version (py2exe) doesn't support __file__
896 # executable version (py2exe) doesn't support __file__
897 datapath = os.path.dirname(sys.executable)
897 datapath = os.path.dirname(sys.executable)
898 else:
898 else:
899 datapath = os.path.dirname(__file__)
899 datapath = os.path.dirname(__file__)
900
900
901 i18n.setdatapath(datapath)
901 i18n.setdatapath(datapath)
902
902
903 _hgexecutable = None
903 _hgexecutable = None
904
904
905 def hgexecutable():
905 def hgexecutable():
906 """return location of the 'hg' executable.
906 """return location of the 'hg' executable.
907
907
908 Defaults to $HG or 'hg' in the search path.
908 Defaults to $HG or 'hg' in the search path.
909 """
909 """
910 if _hgexecutable is None:
910 if _hgexecutable is None:
911 hg = os.environ.get('HG')
911 hg = os.environ.get('HG')
912 mainmod = sys.modules['__main__']
912 mainmod = sys.modules['__main__']
913 if hg:
913 if hg:
914 _sethgexecutable(hg)
914 _sethgexecutable(hg)
915 elif mainfrozen():
915 elif mainfrozen():
916 if getattr(sys, 'frozen', None) == 'macosx_app':
916 if getattr(sys, 'frozen', None) == 'macosx_app':
917 # Env variable set by py2app
917 # Env variable set by py2app
918 _sethgexecutable(os.environ['EXECUTABLEPATH'])
918 _sethgexecutable(os.environ['EXECUTABLEPATH'])
919 else:
919 else:
920 _sethgexecutable(sys.executable)
920 _sethgexecutable(sys.executable)
921 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
921 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
922 _sethgexecutable(mainmod.__file__)
922 _sethgexecutable(mainmod.__file__)
923 else:
923 else:
924 exe = findexe('hg') or os.path.basename(sys.argv[0])
924 exe = findexe('hg') or os.path.basename(sys.argv[0])
925 _sethgexecutable(exe)
925 _sethgexecutable(exe)
926 return _hgexecutable
926 return _hgexecutable
927
927
928 def _sethgexecutable(path):
928 def _sethgexecutable(path):
929 """set location of the 'hg' executable"""
929 """set location of the 'hg' executable"""
930 global _hgexecutable
930 global _hgexecutable
931 _hgexecutable = path
931 _hgexecutable = path
932
932
933 def _isstdout(f):
933 def _isstdout(f):
934 fileno = getattr(f, 'fileno', None)
934 fileno = getattr(f, 'fileno', None)
935 return fileno and fileno() == sys.__stdout__.fileno()
935 return fileno and fileno() == sys.__stdout__.fileno()
936
936
937 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
937 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
938 '''enhanced shell command execution.
938 '''enhanced shell command execution.
939 run with environment maybe modified, maybe in different dir.
939 run with environment maybe modified, maybe in different dir.
940
940
941 if command fails and onerr is None, return status, else raise onerr
941 if command fails and onerr is None, return status, else raise onerr
942 object as exception.
942 object as exception.
943
943
944 if out is specified, it is assumed to be a file-like object that has a
944 if out is specified, it is assumed to be a file-like object that has a
945 write() method. stdout and stderr will be redirected to out.'''
945 write() method. stdout and stderr will be redirected to out.'''
946 if environ is None:
946 if environ is None:
947 environ = {}
947 environ = {}
948 try:
948 try:
949 sys.stdout.flush()
949 sys.stdout.flush()
950 except Exception:
950 except Exception:
951 pass
951 pass
952 def py2shell(val):
952 def py2shell(val):
953 'convert python object into string that is useful to shell'
953 'convert python object into string that is useful to shell'
954 if val is None or val is False:
954 if val is None or val is False:
955 return '0'
955 return '0'
956 if val is True:
956 if val is True:
957 return '1'
957 return '1'
958 return str(val)
958 return str(val)
959 origcmd = cmd
959 origcmd = cmd
960 cmd = quotecommand(cmd)
960 cmd = quotecommand(cmd)
961 if sys.platform == 'plan9' and (sys.version_info[0] == 2
961 if sys.platform == 'plan9' and (sys.version_info[0] == 2
962 and sys.version_info[1] < 7):
962 and sys.version_info[1] < 7):
963 # subprocess kludge to work around issues in half-baked Python
963 # subprocess kludge to work around issues in half-baked Python
964 # ports, notably bichued/python:
964 # ports, notably bichued/python:
965 if not cwd is None:
965 if not cwd is None:
966 os.chdir(cwd)
966 os.chdir(cwd)
967 rc = os.system(cmd)
967 rc = os.system(cmd)
968 else:
968 else:
969 env = dict(os.environ)
969 env = dict(os.environ)
970 env.update((k, py2shell(v)) for k, v in environ.iteritems())
970 env.update((k, py2shell(v)) for k, v in environ.iteritems())
971 env['HG'] = hgexecutable()
971 env['HG'] = hgexecutable()
972 if out is None or _isstdout(out):
972 if out is None or _isstdout(out):
973 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
973 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
974 env=env, cwd=cwd)
974 env=env, cwd=cwd)
975 else:
975 else:
976 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
976 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
977 env=env, cwd=cwd, stdout=subprocess.PIPE,
977 env=env, cwd=cwd, stdout=subprocess.PIPE,
978 stderr=subprocess.STDOUT)
978 stderr=subprocess.STDOUT)
979 while True:
979 while True:
980 line = proc.stdout.readline()
980 line = proc.stdout.readline()
981 if not line:
981 if not line:
982 break
982 break
983 out.write(line)
983 out.write(line)
984 proc.wait()
984 proc.wait()
985 rc = proc.returncode
985 rc = proc.returncode
986 if sys.platform == 'OpenVMS' and rc & 1:
986 if sys.platform == 'OpenVMS' and rc & 1:
987 rc = 0
987 rc = 0
988 if rc and onerr:
988 if rc and onerr:
989 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
989 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
990 explainexit(rc)[0])
990 explainexit(rc)[0])
991 if errprefix:
991 if errprefix:
992 errmsg = '%s: %s' % (errprefix, errmsg)
992 errmsg = '%s: %s' % (errprefix, errmsg)
993 raise onerr(errmsg)
993 raise onerr(errmsg)
994 return rc
994 return rc
995
995
996 def checksignature(func):
996 def checksignature(func):
997 '''wrap a function with code to check for calling errors'''
997 '''wrap a function with code to check for calling errors'''
998 def check(*args, **kwargs):
998 def check(*args, **kwargs):
999 try:
999 try:
1000 return func(*args, **kwargs)
1000 return func(*args, **kwargs)
1001 except TypeError:
1001 except TypeError:
1002 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1002 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1003 raise error.SignatureError
1003 raise error.SignatureError
1004 raise
1004 raise
1005
1005
1006 return check
1006 return check
1007
1007
1008 def copyfile(src, dest, hardlink=False, copystat=False):
1008 def copyfile(src, dest, hardlink=False, copystat=False):
1009 '''copy a file, preserving mode and optionally other stat info like
1009 '''copy a file, preserving mode and optionally other stat info like
1010 atime/mtime'''
1010 atime/mtime'''
1011 if os.path.lexists(dest):
1011 if os.path.lexists(dest):
1012 unlink(dest)
1012 unlink(dest)
1013 # hardlinks are problematic on CIFS, quietly ignore this flag
1013 # hardlinks are problematic on CIFS, quietly ignore this flag
1014 # until we find a way to work around it cleanly (issue4546)
1014 # until we find a way to work around it cleanly (issue4546)
1015 if False and hardlink:
1015 if False and hardlink:
1016 try:
1016 try:
1017 oslink(src, dest)
1017 oslink(src, dest)
1018 return
1018 return
1019 except (IOError, OSError):
1019 except (IOError, OSError):
1020 pass # fall back to normal copy
1020 pass # fall back to normal copy
1021 if os.path.islink(src):
1021 if os.path.islink(src):
1022 os.symlink(os.readlink(src), dest)
1022 os.symlink(os.readlink(src), dest)
1023 # copytime is ignored for symlinks, but in general copytime isn't needed
1023 # copytime is ignored for symlinks, but in general copytime isn't needed
1024 # for them anyway
1024 # for them anyway
1025 else:
1025 else:
1026 try:
1026 try:
1027 shutil.copyfile(src, dest)
1027 shutil.copyfile(src, dest)
1028 if copystat:
1028 if copystat:
1029 # copystat also copies mode
1029 # copystat also copies mode
1030 shutil.copystat(src, dest)
1030 shutil.copystat(src, dest)
1031 else:
1031 else:
1032 shutil.copymode(src, dest)
1032 shutil.copymode(src, dest)
1033 except shutil.Error as inst:
1033 except shutil.Error as inst:
1034 raise Abort(str(inst))
1034 raise Abort(str(inst))
1035
1035
1036 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1036 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1037 """Copy a directory tree using hardlinks if possible."""
1037 """Copy a directory tree using hardlinks if possible."""
1038 num = 0
1038 num = 0
1039
1039
1040 if hardlink is None:
1040 if hardlink is None:
1041 hardlink = (os.stat(src).st_dev ==
1041 hardlink = (os.stat(src).st_dev ==
1042 os.stat(os.path.dirname(dst)).st_dev)
1042 os.stat(os.path.dirname(dst)).st_dev)
1043 if hardlink:
1043 if hardlink:
1044 topic = _('linking')
1044 topic = _('linking')
1045 else:
1045 else:
1046 topic = _('copying')
1046 topic = _('copying')
1047
1047
1048 if os.path.isdir(src):
1048 if os.path.isdir(src):
1049 os.mkdir(dst)
1049 os.mkdir(dst)
1050 for name, kind in osutil.listdir(src):
1050 for name, kind in osutil.listdir(src):
1051 srcname = os.path.join(src, name)
1051 srcname = os.path.join(src, name)
1052 dstname = os.path.join(dst, name)
1052 dstname = os.path.join(dst, name)
1053 def nprog(t, pos):
1053 def nprog(t, pos):
1054 if pos is not None:
1054 if pos is not None:
1055 return progress(t, pos + num)
1055 return progress(t, pos + num)
1056 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1056 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1057 num += n
1057 num += n
1058 else:
1058 else:
1059 if hardlink:
1059 if hardlink:
1060 try:
1060 try:
1061 oslink(src, dst)
1061 oslink(src, dst)
1062 except (IOError, OSError):
1062 except (IOError, OSError):
1063 hardlink = False
1063 hardlink = False
1064 shutil.copy(src, dst)
1064 shutil.copy(src, dst)
1065 else:
1065 else:
1066 shutil.copy(src, dst)
1066 shutil.copy(src, dst)
1067 num += 1
1067 num += 1
1068 progress(topic, num)
1068 progress(topic, num)
1069 progress(topic, None)
1069 progress(topic, None)
1070
1070
1071 return hardlink, num
1071 return hardlink, num
1072
1072
1073 _winreservednames = '''con prn aux nul
1073 _winreservednames = '''con prn aux nul
1074 com1 com2 com3 com4 com5 com6 com7 com8 com9
1074 com1 com2 com3 com4 com5 com6 com7 com8 com9
1075 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1075 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1076 _winreservedchars = ':*?"<>|'
1076 _winreservedchars = ':*?"<>|'
1077 def checkwinfilename(path):
1077 def checkwinfilename(path):
1078 r'''Check that the base-relative path is a valid filename on Windows.
1078 r'''Check that the base-relative path is a valid filename on Windows.
1079 Returns None if the path is ok, or a UI string describing the problem.
1079 Returns None if the path is ok, or a UI string describing the problem.
1080
1080
1081 >>> checkwinfilename("just/a/normal/path")
1081 >>> checkwinfilename("just/a/normal/path")
1082 >>> checkwinfilename("foo/bar/con.xml")
1082 >>> checkwinfilename("foo/bar/con.xml")
1083 "filename contains 'con', which is reserved on Windows"
1083 "filename contains 'con', which is reserved on Windows"
1084 >>> checkwinfilename("foo/con.xml/bar")
1084 >>> checkwinfilename("foo/con.xml/bar")
1085 "filename contains 'con', which is reserved on Windows"
1085 "filename contains 'con', which is reserved on Windows"
1086 >>> checkwinfilename("foo/bar/xml.con")
1086 >>> checkwinfilename("foo/bar/xml.con")
1087 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1087 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1088 "filename contains 'AUX', which is reserved on Windows"
1088 "filename contains 'AUX', which is reserved on Windows"
1089 >>> checkwinfilename("foo/bar/bla:.txt")
1089 >>> checkwinfilename("foo/bar/bla:.txt")
1090 "filename contains ':', which is reserved on Windows"
1090 "filename contains ':', which is reserved on Windows"
1091 >>> checkwinfilename("foo/bar/b\07la.txt")
1091 >>> checkwinfilename("foo/bar/b\07la.txt")
1092 "filename contains '\\x07', which is invalid on Windows"
1092 "filename contains '\\x07', which is invalid on Windows"
1093 >>> checkwinfilename("foo/bar/bla ")
1093 >>> checkwinfilename("foo/bar/bla ")
1094 "filename ends with ' ', which is not allowed on Windows"
1094 "filename ends with ' ', which is not allowed on Windows"
1095 >>> checkwinfilename("../bar")
1095 >>> checkwinfilename("../bar")
1096 >>> checkwinfilename("foo\\")
1096 >>> checkwinfilename("foo\\")
1097 "filename ends with '\\', which is invalid on Windows"
1097 "filename ends with '\\', which is invalid on Windows"
1098 >>> checkwinfilename("foo\\/bar")
1098 >>> checkwinfilename("foo\\/bar")
1099 "directory name ends with '\\', which is invalid on Windows"
1099 "directory name ends with '\\', which is invalid on Windows"
1100 '''
1100 '''
1101 if path.endswith('\\'):
1101 if path.endswith('\\'):
1102 return _("filename ends with '\\', which is invalid on Windows")
1102 return _("filename ends with '\\', which is invalid on Windows")
1103 if '\\/' in path:
1103 if '\\/' in path:
1104 return _("directory name ends with '\\', which is invalid on Windows")
1104 return _("directory name ends with '\\', which is invalid on Windows")
1105 for n in path.replace('\\', '/').split('/'):
1105 for n in path.replace('\\', '/').split('/'):
1106 if not n:
1106 if not n:
1107 continue
1107 continue
1108 for c in n:
1108 for c in n:
1109 if c in _winreservedchars:
1109 if c in _winreservedchars:
1110 return _("filename contains '%s', which is reserved "
1110 return _("filename contains '%s', which is reserved "
1111 "on Windows") % c
1111 "on Windows") % c
1112 if ord(c) <= 31:
1112 if ord(c) <= 31:
1113 return _("filename contains %r, which is invalid "
1113 return _("filename contains %r, which is invalid "
1114 "on Windows") % c
1114 "on Windows") % c
1115 base = n.split('.')[0]
1115 base = n.split('.')[0]
1116 if base and base.lower() in _winreservednames:
1116 if base and base.lower() in _winreservednames:
1117 return _("filename contains '%s', which is reserved "
1117 return _("filename contains '%s', which is reserved "
1118 "on Windows") % base
1118 "on Windows") % base
1119 t = n[-1]
1119 t = n[-1]
1120 if t in '. ' and n not in '..':
1120 if t in '. ' and n not in '..':
1121 return _("filename ends with '%s', which is not allowed "
1121 return _("filename ends with '%s', which is not allowed "
1122 "on Windows") % t
1122 "on Windows") % t
1123
1123
1124 if os.name == 'nt':
1124 if os.name == 'nt':
1125 checkosfilename = checkwinfilename
1125 checkosfilename = checkwinfilename
1126 else:
1126 else:
1127 checkosfilename = platform.checkosfilename
1127 checkosfilename = platform.checkosfilename
1128
1128
1129 def makelock(info, pathname):
1129 def makelock(info, pathname):
1130 try:
1130 try:
1131 return os.symlink(info, pathname)
1131 return os.symlink(info, pathname)
1132 except OSError as why:
1132 except OSError as why:
1133 if why.errno == errno.EEXIST:
1133 if why.errno == errno.EEXIST:
1134 raise
1134 raise
1135 except AttributeError: # no symlink in os
1135 except AttributeError: # no symlink in os
1136 pass
1136 pass
1137
1137
1138 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1138 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1139 os.write(ld, info)
1139 os.write(ld, info)
1140 os.close(ld)
1140 os.close(ld)
1141
1141
1142 def readlock(pathname):
1142 def readlock(pathname):
1143 try:
1143 try:
1144 return os.readlink(pathname)
1144 return os.readlink(pathname)
1145 except OSError as why:
1145 except OSError as why:
1146 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1146 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1147 raise
1147 raise
1148 except AttributeError: # no symlink in os
1148 except AttributeError: # no symlink in os
1149 pass
1149 pass
1150 fp = posixfile(pathname)
1150 fp = posixfile(pathname)
1151 r = fp.read()
1151 r = fp.read()
1152 fp.close()
1152 fp.close()
1153 return r
1153 return r
1154
1154
1155 def fstat(fp):
1155 def fstat(fp):
1156 '''stat file object that may not have fileno method.'''
1156 '''stat file object that may not have fileno method.'''
1157 try:
1157 try:
1158 return os.fstat(fp.fileno())
1158 return os.fstat(fp.fileno())
1159 except AttributeError:
1159 except AttributeError:
1160 return os.stat(fp.name)
1160 return os.stat(fp.name)
1161
1161
1162 # File system features
1162 # File system features
1163
1163
1164 def checkcase(path):
1164 def checkcase(path):
1165 """
1165 """
1166 Return true if the given path is on a case-sensitive filesystem
1166 Return true if the given path is on a case-sensitive filesystem
1167
1167
1168 Requires a path (like /foo/.hg) ending with a foldable final
1168 Requires a path (like /foo/.hg) ending with a foldable final
1169 directory component.
1169 directory component.
1170 """
1170 """
1171 s1 = os.lstat(path)
1171 s1 = os.lstat(path)
1172 d, b = os.path.split(path)
1172 d, b = os.path.split(path)
1173 b2 = b.upper()
1173 b2 = b.upper()
1174 if b == b2:
1174 if b == b2:
1175 b2 = b.lower()
1175 b2 = b.lower()
1176 if b == b2:
1176 if b == b2:
1177 return True # no evidence against case sensitivity
1177 return True # no evidence against case sensitivity
1178 p2 = os.path.join(d, b2)
1178 p2 = os.path.join(d, b2)
1179 try:
1179 try:
1180 s2 = os.lstat(p2)
1180 s2 = os.lstat(p2)
1181 if s2 == s1:
1181 if s2 == s1:
1182 return False
1182 return False
1183 return True
1183 return True
1184 except OSError:
1184 except OSError:
1185 return True
1185 return True
1186
1186
1187 try:
1187 try:
1188 import re2
1188 import re2
1189 _re2 = None
1189 _re2 = None
1190 except ImportError:
1190 except ImportError:
1191 _re2 = False
1191 _re2 = False
1192
1192
1193 class _re(object):
1193 class _re(object):
1194 def _checkre2(self):
1194 def _checkre2(self):
1195 global _re2
1195 global _re2
1196 try:
1196 try:
1197 # check if match works, see issue3964
1197 # check if match works, see issue3964
1198 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1198 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1199 except ImportError:
1199 except ImportError:
1200 _re2 = False
1200 _re2 = False
1201
1201
1202 def compile(self, pat, flags=0):
1202 def compile(self, pat, flags=0):
1203 '''Compile a regular expression, using re2 if possible
1203 '''Compile a regular expression, using re2 if possible
1204
1204
1205 For best performance, use only re2-compatible regexp features. The
1205 For best performance, use only re2-compatible regexp features. The
1206 only flags from the re module that are re2-compatible are
1206 only flags from the re module that are re2-compatible are
1207 IGNORECASE and MULTILINE.'''
1207 IGNORECASE and MULTILINE.'''
1208 if _re2 is None:
1208 if _re2 is None:
1209 self._checkre2()
1209 self._checkre2()
1210 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1210 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1211 if flags & remod.IGNORECASE:
1211 if flags & remod.IGNORECASE:
1212 pat = '(?i)' + pat
1212 pat = '(?i)' + pat
1213 if flags & remod.MULTILINE:
1213 if flags & remod.MULTILINE:
1214 pat = '(?m)' + pat
1214 pat = '(?m)' + pat
1215 try:
1215 try:
1216 return re2.compile(pat)
1216 return re2.compile(pat)
1217 except re2.error:
1217 except re2.error:
1218 pass
1218 pass
1219 return remod.compile(pat, flags)
1219 return remod.compile(pat, flags)
1220
1220
1221 @propertycache
1221 @propertycache
1222 def escape(self):
1222 def escape(self):
1223 '''Return the version of escape corresponding to self.compile.
1223 '''Return the version of escape corresponding to self.compile.
1224
1224
1225 This is imperfect because whether re2 or re is used for a particular
1225 This is imperfect because whether re2 or re is used for a particular
1226 function depends on the flags, etc, but it's the best we can do.
1226 function depends on the flags, etc, but it's the best we can do.
1227 '''
1227 '''
1228 global _re2
1228 global _re2
1229 if _re2 is None:
1229 if _re2 is None:
1230 self._checkre2()
1230 self._checkre2()
1231 if _re2:
1231 if _re2:
1232 return re2.escape
1232 return re2.escape
1233 else:
1233 else:
1234 return remod.escape
1234 return remod.escape
1235
1235
1236 re = _re()
1236 re = _re()
1237
1237
1238 _fspathcache = {}
1238 _fspathcache = {}
1239 def fspath(name, root):
1239 def fspath(name, root):
1240 '''Get name in the case stored in the filesystem
1240 '''Get name in the case stored in the filesystem
1241
1241
1242 The name should be relative to root, and be normcase-ed for efficiency.
1242 The name should be relative to root, and be normcase-ed for efficiency.
1243
1243
1244 Note that this function is unnecessary, and should not be
1244 Note that this function is unnecessary, and should not be
1245 called, for case-sensitive filesystems (simply because it's expensive).
1245 called, for case-sensitive filesystems (simply because it's expensive).
1246
1246
1247 The root should be normcase-ed, too.
1247 The root should be normcase-ed, too.
1248 '''
1248 '''
1249 def _makefspathcacheentry(dir):
1249 def _makefspathcacheentry(dir):
1250 return dict((normcase(n), n) for n in os.listdir(dir))
1250 return dict((normcase(n), n) for n in os.listdir(dir))
1251
1251
1252 seps = os.sep
1252 seps = os.sep
1253 if os.altsep:
1253 if os.altsep:
1254 seps = seps + os.altsep
1254 seps = seps + os.altsep
1255 # Protect backslashes. This gets silly very quickly.
1255 # Protect backslashes. This gets silly very quickly.
1256 seps.replace('\\','\\\\')
1256 seps.replace('\\','\\\\')
1257 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1257 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1258 dir = os.path.normpath(root)
1258 dir = os.path.normpath(root)
1259 result = []
1259 result = []
1260 for part, sep in pattern.findall(name):
1260 for part, sep in pattern.findall(name):
1261 if sep:
1261 if sep:
1262 result.append(sep)
1262 result.append(sep)
1263 continue
1263 continue
1264
1264
1265 if dir not in _fspathcache:
1265 if dir not in _fspathcache:
1266 _fspathcache[dir] = _makefspathcacheentry(dir)
1266 _fspathcache[dir] = _makefspathcacheentry(dir)
1267 contents = _fspathcache[dir]
1267 contents = _fspathcache[dir]
1268
1268
1269 found = contents.get(part)
1269 found = contents.get(part)
1270 if not found:
1270 if not found:
1271 # retry "once per directory" per "dirstate.walk" which
1271 # retry "once per directory" per "dirstate.walk" which
1272 # may take place for each patches of "hg qpush", for example
1272 # may take place for each patches of "hg qpush", for example
1273 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1273 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1274 found = contents.get(part)
1274 found = contents.get(part)
1275
1275
1276 result.append(found or part)
1276 result.append(found or part)
1277 dir = os.path.join(dir, part)
1277 dir = os.path.join(dir, part)
1278
1278
1279 return ''.join(result)
1279 return ''.join(result)
1280
1280
1281 def checknlink(testfile):
1281 def checknlink(testfile):
1282 '''check whether hardlink count reporting works properly'''
1282 '''check whether hardlink count reporting works properly'''
1283
1283
1284 # testfile may be open, so we need a separate file for checking to
1284 # testfile may be open, so we need a separate file for checking to
1285 # work around issue2543 (or testfile may get lost on Samba shares)
1285 # work around issue2543 (or testfile may get lost on Samba shares)
1286 f1 = testfile + ".hgtmp1"
1286 f1 = testfile + ".hgtmp1"
1287 if os.path.lexists(f1):
1287 if os.path.lexists(f1):
1288 return False
1288 return False
1289 try:
1289 try:
1290 posixfile(f1, 'w').close()
1290 posixfile(f1, 'w').close()
1291 except IOError:
1291 except IOError:
1292 return False
1292 return False
1293
1293
1294 f2 = testfile + ".hgtmp2"
1294 f2 = testfile + ".hgtmp2"
1295 fd = None
1295 fd = None
1296 try:
1296 try:
1297 oslink(f1, f2)
1297 oslink(f1, f2)
1298 # nlinks() may behave differently for files on Windows shares if
1298 # nlinks() may behave differently for files on Windows shares if
1299 # the file is open.
1299 # the file is open.
1300 fd = posixfile(f2)
1300 fd = posixfile(f2)
1301 return nlinks(f2) > 1
1301 return nlinks(f2) > 1
1302 except OSError:
1302 except OSError:
1303 return False
1303 return False
1304 finally:
1304 finally:
1305 if fd is not None:
1305 if fd is not None:
1306 fd.close()
1306 fd.close()
1307 for f in (f1, f2):
1307 for f in (f1, f2):
1308 try:
1308 try:
1309 os.unlink(f)
1309 os.unlink(f)
1310 except OSError:
1310 except OSError:
1311 pass
1311 pass
1312
1312
1313 def endswithsep(path):
1313 def endswithsep(path):
1314 '''Check path ends with os.sep or os.altsep.'''
1314 '''Check path ends with os.sep or os.altsep.'''
1315 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1315 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1316
1316
1317 def splitpath(path):
1317 def splitpath(path):
1318 '''Split path by os.sep.
1318 '''Split path by os.sep.
1319 Note that this function does not use os.altsep because this is
1319 Note that this function does not use os.altsep because this is
1320 an alternative of simple "xxx.split(os.sep)".
1320 an alternative of simple "xxx.split(os.sep)".
1321 It is recommended to use os.path.normpath() before using this
1321 It is recommended to use os.path.normpath() before using this
1322 function if need.'''
1322 function if need.'''
1323 return path.split(os.sep)
1323 return path.split(os.sep)
1324
1324
1325 def gui():
1325 def gui():
1326 '''Are we running in a GUI?'''
1326 '''Are we running in a GUI?'''
1327 if sys.platform == 'darwin':
1327 if sys.platform == 'darwin':
1328 if 'SSH_CONNECTION' in os.environ:
1328 if 'SSH_CONNECTION' in os.environ:
1329 # handle SSH access to a box where the user is logged in
1329 # handle SSH access to a box where the user is logged in
1330 return False
1330 return False
1331 elif getattr(osutil, 'isgui', None):
1331 elif getattr(osutil, 'isgui', None):
1332 # check if a CoreGraphics session is available
1332 # check if a CoreGraphics session is available
1333 return osutil.isgui()
1333 return osutil.isgui()
1334 else:
1334 else:
1335 # pure build; use a safe default
1335 # pure build; use a safe default
1336 return True
1336 return True
1337 else:
1337 else:
1338 return os.name == "nt" or os.environ.get("DISPLAY")
1338 return os.name == "nt" or os.environ.get("DISPLAY")
1339
1339
1340 def mktempcopy(name, emptyok=False, createmode=None):
1340 def mktempcopy(name, emptyok=False, createmode=None):
1341 """Create a temporary file with the same contents from name
1341 """Create a temporary file with the same contents from name
1342
1342
1343 The permission bits are copied from the original file.
1343 The permission bits are copied from the original file.
1344
1344
1345 If the temporary file is going to be truncated immediately, you
1345 If the temporary file is going to be truncated immediately, you
1346 can use emptyok=True as an optimization.
1346 can use emptyok=True as an optimization.
1347
1347
1348 Returns the name of the temporary file.
1348 Returns the name of the temporary file.
1349 """
1349 """
1350 d, fn = os.path.split(name)
1350 d, fn = os.path.split(name)
1351 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1351 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1352 os.close(fd)
1352 os.close(fd)
1353 # Temporary files are created with mode 0600, which is usually not
1353 # Temporary files are created with mode 0600, which is usually not
1354 # what we want. If the original file already exists, just copy
1354 # what we want. If the original file already exists, just copy
1355 # its mode. Otherwise, manually obey umask.
1355 # its mode. Otherwise, manually obey umask.
1356 copymode(name, temp, createmode)
1356 copymode(name, temp, createmode)
1357 if emptyok:
1357 if emptyok:
1358 return temp
1358 return temp
1359 try:
1359 try:
1360 try:
1360 try:
1361 ifp = posixfile(name, "rb")
1361 ifp = posixfile(name, "rb")
1362 except IOError as inst:
1362 except IOError as inst:
1363 if inst.errno == errno.ENOENT:
1363 if inst.errno == errno.ENOENT:
1364 return temp
1364 return temp
1365 if not getattr(inst, 'filename', None):
1365 if not getattr(inst, 'filename', None):
1366 inst.filename = name
1366 inst.filename = name
1367 raise
1367 raise
1368 ofp = posixfile(temp, "wb")
1368 ofp = posixfile(temp, "wb")
1369 for chunk in filechunkiter(ifp):
1369 for chunk in filechunkiter(ifp):
1370 ofp.write(chunk)
1370 ofp.write(chunk)
1371 ifp.close()
1371 ifp.close()
1372 ofp.close()
1372 ofp.close()
1373 except: # re-raises
1373 except: # re-raises
1374 try: os.unlink(temp)
1374 try: os.unlink(temp)
1375 except OSError: pass
1375 except OSError: pass
1376 raise
1376 raise
1377 return temp
1377 return temp
1378
1378
1379 class atomictempfile(object):
1379 class atomictempfile(object):
1380 '''writable file object that atomically updates a file
1380 '''writable file object that atomically updates a file
1381
1381
1382 All writes will go to a temporary copy of the original file. Call
1382 All writes will go to a temporary copy of the original file. Call
1383 close() when you are done writing, and atomictempfile will rename
1383 close() when you are done writing, and atomictempfile will rename
1384 the temporary copy to the original name, making the changes
1384 the temporary copy to the original name, making the changes
1385 visible. If the object is destroyed without being closed, all your
1385 visible. If the object is destroyed without being closed, all your
1386 writes are discarded.
1386 writes are discarded.
1387 '''
1387 '''
1388 def __init__(self, name, mode='w+b', createmode=None):
1388 def __init__(self, name, mode='w+b', createmode=None):
1389 self.__name = name # permanent name
1389 self.__name = name # permanent name
1390 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1390 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1391 createmode=createmode)
1391 createmode=createmode)
1392 self._fp = posixfile(self._tempname, mode)
1392 self._fp = posixfile(self._tempname, mode)
1393
1393
1394 # delegated methods
1394 # delegated methods
1395 self.write = self._fp.write
1395 self.write = self._fp.write
1396 self.seek = self._fp.seek
1396 self.seek = self._fp.seek
1397 self.tell = self._fp.tell
1397 self.tell = self._fp.tell
1398 self.fileno = self._fp.fileno
1398 self.fileno = self._fp.fileno
1399
1399
1400 def close(self):
1400 def close(self):
1401 if not self._fp.closed:
1401 if not self._fp.closed:
1402 self._fp.close()
1402 self._fp.close()
1403 rename(self._tempname, localpath(self.__name))
1403 rename(self._tempname, localpath(self.__name))
1404
1404
1405 def discard(self):
1405 def discard(self):
1406 if not self._fp.closed:
1406 if not self._fp.closed:
1407 try:
1407 try:
1408 os.unlink(self._tempname)
1408 os.unlink(self._tempname)
1409 except OSError:
1409 except OSError:
1410 pass
1410 pass
1411 self._fp.close()
1411 self._fp.close()
1412
1412
1413 def __del__(self):
1413 def __del__(self):
1414 if safehasattr(self, '_fp'): # constructor actually did something
1414 if safehasattr(self, '_fp'): # constructor actually did something
1415 self.discard()
1415 self.discard()
1416
1416
1417 def makedirs(name, mode=None, notindexed=False):
1417 def makedirs(name, mode=None, notindexed=False):
1418 """recursive directory creation with parent mode inheritance"""
1418 """recursive directory creation with parent mode inheritance"""
1419 try:
1419 try:
1420 makedir(name, notindexed)
1420 makedir(name, notindexed)
1421 except OSError as err:
1421 except OSError as err:
1422 if err.errno == errno.EEXIST:
1422 if err.errno == errno.EEXIST:
1423 return
1423 return
1424 if err.errno != errno.ENOENT or not name:
1424 if err.errno != errno.ENOENT or not name:
1425 raise
1425 raise
1426 parent = os.path.dirname(os.path.abspath(name))
1426 parent = os.path.dirname(os.path.abspath(name))
1427 if parent == name:
1427 if parent == name:
1428 raise
1428 raise
1429 makedirs(parent, mode, notindexed)
1429 makedirs(parent, mode, notindexed)
1430 makedir(name, notindexed)
1430 makedir(name, notindexed)
1431 if mode is not None:
1431 if mode is not None:
1432 os.chmod(name, mode)
1432 os.chmod(name, mode)
1433
1433
1434 def ensuredirs(name, mode=None, notindexed=False):
1434 def ensuredirs(name, mode=None, notindexed=False):
1435 """race-safe recursive directory creation
1435 """race-safe recursive directory creation
1436
1436
1437 Newly created directories are marked as "not to be indexed by
1437 Newly created directories are marked as "not to be indexed by
1438 the content indexing service", if ``notindexed`` is specified
1438 the content indexing service", if ``notindexed`` is specified
1439 for "write" mode access.
1439 for "write" mode access.
1440 """
1440 """
1441 if os.path.isdir(name):
1441 if os.path.isdir(name):
1442 return
1442 return
1443 parent = os.path.dirname(os.path.abspath(name))
1443 parent = os.path.dirname(os.path.abspath(name))
1444 if parent != name:
1444 if parent != name:
1445 ensuredirs(parent, mode, notindexed)
1445 ensuredirs(parent, mode, notindexed)
1446 try:
1446 try:
1447 makedir(name, notindexed)
1447 makedir(name, notindexed)
1448 except OSError as err:
1448 except OSError as err:
1449 if err.errno == errno.EEXIST and os.path.isdir(name):
1449 if err.errno == errno.EEXIST and os.path.isdir(name):
1450 # someone else seems to have won a directory creation race
1450 # someone else seems to have won a directory creation race
1451 return
1451 return
1452 raise
1452 raise
1453 if mode is not None:
1453 if mode is not None:
1454 os.chmod(name, mode)
1454 os.chmod(name, mode)
1455
1455
1456 def readfile(path):
1456 def readfile(path):
1457 with open(path, 'rb') as fp:
1457 with open(path, 'rb') as fp:
1458 return fp.read()
1458 return fp.read()
1459
1459
1460 def writefile(path, text):
1460 def writefile(path, text):
1461 with open(path, 'wb') as fp:
1461 with open(path, 'wb') as fp:
1462 fp.write(text)
1462 fp.write(text)
1463
1463
1464 def appendfile(path, text):
1464 def appendfile(path, text):
1465 with open(path, 'ab') as fp:
1465 with open(path, 'ab') as fp:
1466 fp.write(text)
1466 fp.write(text)
1467
1467
1468 class chunkbuffer(object):
1468 class chunkbuffer(object):
1469 """Allow arbitrary sized chunks of data to be efficiently read from an
1469 """Allow arbitrary sized chunks of data to be efficiently read from an
1470 iterator over chunks of arbitrary size."""
1470 iterator over chunks of arbitrary size."""
1471
1471
1472 def __init__(self, in_iter):
1472 def __init__(self, in_iter):
1473 """in_iter is the iterator that's iterating over the input chunks.
1473 """in_iter is the iterator that's iterating over the input chunks.
1474 targetsize is how big a buffer to try to maintain."""
1474 targetsize is how big a buffer to try to maintain."""
1475 def splitbig(chunks):
1475 def splitbig(chunks):
1476 for chunk in chunks:
1476 for chunk in chunks:
1477 if len(chunk) > 2**20:
1477 if len(chunk) > 2**20:
1478 pos = 0
1478 pos = 0
1479 while pos < len(chunk):
1479 while pos < len(chunk):
1480 end = pos + 2 ** 18
1480 end = pos + 2 ** 18
1481 yield chunk[pos:end]
1481 yield chunk[pos:end]
1482 pos = end
1482 pos = end
1483 else:
1483 else:
1484 yield chunk
1484 yield chunk
1485 self.iter = splitbig(in_iter)
1485 self.iter = splitbig(in_iter)
1486 self._queue = collections.deque()
1486 self._queue = collections.deque()
1487 self._chunkoffset = 0
1487 self._chunkoffset = 0
1488
1488
1489 def read(self, l=None):
1489 def read(self, l=None):
1490 """Read L bytes of data from the iterator of chunks of data.
1490 """Read L bytes of data from the iterator of chunks of data.
1491 Returns less than L bytes if the iterator runs dry.
1491 Returns less than L bytes if the iterator runs dry.
1492
1492
1493 If size parameter is omitted, read everything"""
1493 If size parameter is omitted, read everything"""
1494 if l is None:
1494 if l is None:
1495 return ''.join(self.iter)
1495 return ''.join(self.iter)
1496
1496
1497 left = l
1497 left = l
1498 buf = []
1498 buf = []
1499 queue = self._queue
1499 queue = self._queue
1500 while left > 0:
1500 while left > 0:
1501 # refill the queue
1501 # refill the queue
1502 if not queue:
1502 if not queue:
1503 target = 2**18
1503 target = 2**18
1504 for chunk in self.iter:
1504 for chunk in self.iter:
1505 queue.append(chunk)
1505 queue.append(chunk)
1506 target -= len(chunk)
1506 target -= len(chunk)
1507 if target <= 0:
1507 if target <= 0:
1508 break
1508 break
1509 if not queue:
1509 if not queue:
1510 break
1510 break
1511
1511
1512 # The easy way to do this would be to queue.popleft(), modify the
1512 # The easy way to do this would be to queue.popleft(), modify the
1513 # chunk (if necessary), then queue.appendleft(). However, for cases
1513 # chunk (if necessary), then queue.appendleft(). However, for cases
1514 # where we read partial chunk content, this incurs 2 dequeue
1514 # where we read partial chunk content, this incurs 2 dequeue
1515 # mutations and creates a new str for the remaining chunk in the
1515 # mutations and creates a new str for the remaining chunk in the
1516 # queue. Our code below avoids this overhead.
1516 # queue. Our code below avoids this overhead.
1517
1517
1518 chunk = queue[0]
1518 chunk = queue[0]
1519 chunkl = len(chunk)
1519 chunkl = len(chunk)
1520 offset = self._chunkoffset
1520 offset = self._chunkoffset
1521
1521
1522 # Use full chunk.
1522 # Use full chunk.
1523 if offset == 0 and left >= chunkl:
1523 if offset == 0 and left >= chunkl:
1524 left -= chunkl
1524 left -= chunkl
1525 queue.popleft()
1525 queue.popleft()
1526 buf.append(chunk)
1526 buf.append(chunk)
1527 # self._chunkoffset remains at 0.
1527 # self._chunkoffset remains at 0.
1528 continue
1528 continue
1529
1529
1530 chunkremaining = chunkl - offset
1530 chunkremaining = chunkl - offset
1531
1531
1532 # Use all of unconsumed part of chunk.
1532 # Use all of unconsumed part of chunk.
1533 if left >= chunkremaining:
1533 if left >= chunkremaining:
1534 left -= chunkremaining
1534 left -= chunkremaining
1535 queue.popleft()
1535 queue.popleft()
1536 # offset == 0 is enabled by block above, so this won't merely
1536 # offset == 0 is enabled by block above, so this won't merely
1537 # copy via ``chunk[0:]``.
1537 # copy via ``chunk[0:]``.
1538 buf.append(chunk[offset:])
1538 buf.append(chunk[offset:])
1539 self._chunkoffset = 0
1539 self._chunkoffset = 0
1540
1540
1541 # Partial chunk needed.
1541 # Partial chunk needed.
1542 else:
1542 else:
1543 buf.append(chunk[offset:offset + left])
1543 buf.append(chunk[offset:offset + left])
1544 self._chunkoffset += left
1544 self._chunkoffset += left
1545 left -= chunkremaining
1545 left -= chunkremaining
1546
1546
1547 return ''.join(buf)
1547 return ''.join(buf)
1548
1548
1549 def filechunkiter(f, size=65536, limit=None):
1549 def filechunkiter(f, size=65536, limit=None):
1550 """Create a generator that produces the data in the file size
1550 """Create a generator that produces the data in the file size
1551 (default 65536) bytes at a time, up to optional limit (default is
1551 (default 65536) bytes at a time, up to optional limit (default is
1552 to read all data). Chunks may be less than size bytes if the
1552 to read all data). Chunks may be less than size bytes if the
1553 chunk is the last chunk in the file, or the file is a socket or
1553 chunk is the last chunk in the file, or the file is a socket or
1554 some other type of file that sometimes reads less data than is
1554 some other type of file that sometimes reads less data than is
1555 requested."""
1555 requested."""
1556 assert size >= 0
1556 assert size >= 0
1557 assert limit is None or limit >= 0
1557 assert limit is None or limit >= 0
1558 while True:
1558 while True:
1559 if limit is None:
1559 if limit is None:
1560 nbytes = size
1560 nbytes = size
1561 else:
1561 else:
1562 nbytes = min(limit, size)
1562 nbytes = min(limit, size)
1563 s = nbytes and f.read(nbytes)
1563 s = nbytes and f.read(nbytes)
1564 if not s:
1564 if not s:
1565 break
1565 break
1566 if limit:
1566 if limit:
1567 limit -= len(s)
1567 limit -= len(s)
1568 yield s
1568 yield s
1569
1569
1570 def makedate(timestamp=None):
1570 def makedate(timestamp=None):
1571 '''Return a unix timestamp (or the current time) as a (unixtime,
1571 '''Return a unix timestamp (or the current time) as a (unixtime,
1572 offset) tuple based off the local timezone.'''
1572 offset) tuple based off the local timezone.'''
1573 if timestamp is None:
1573 if timestamp is None:
1574 timestamp = time.time()
1574 timestamp = time.time()
1575 if timestamp < 0:
1575 if timestamp < 0:
1576 hint = _("check your clock")
1576 hint = _("check your clock")
1577 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1577 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1578 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1578 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1579 datetime.datetime.fromtimestamp(timestamp))
1579 datetime.datetime.fromtimestamp(timestamp))
1580 tz = delta.days * 86400 + delta.seconds
1580 tz = delta.days * 86400 + delta.seconds
1581 return timestamp, tz
1581 return timestamp, tz
1582
1582
1583 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1583 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1584 """represent a (unixtime, offset) tuple as a localized time.
1584 """represent a (unixtime, offset) tuple as a localized time.
1585 unixtime is seconds since the epoch, and offset is the time zone's
1585 unixtime is seconds since the epoch, and offset is the time zone's
1586 number of seconds away from UTC."""
1586 number of seconds away from UTC."""
1587 t, tz = date or makedate()
1587 t, tz = date or makedate()
1588 if "%1" in format or "%2" in format or "%z" in format:
1588 if "%1" in format or "%2" in format or "%z" in format:
1589 sign = (tz > 0) and "-" or "+"
1589 sign = (tz > 0) and "-" or "+"
1590 minutes = abs(tz) // 60
1590 minutes = abs(tz) // 60
1591 q, r = divmod(minutes, 60)
1591 q, r = divmod(minutes, 60)
1592 format = format.replace("%z", "%1%2")
1592 format = format.replace("%z", "%1%2")
1593 format = format.replace("%1", "%c%02d" % (sign, q))
1593 format = format.replace("%1", "%c%02d" % (sign, q))
1594 format = format.replace("%2", "%02d" % r)
1594 format = format.replace("%2", "%02d" % r)
1595 d = t - tz
1595 d = t - tz
1596 if d > 0x7fffffff:
1596 if d > 0x7fffffff:
1597 d = 0x7fffffff
1597 d = 0x7fffffff
1598 elif d < -0x7fffffff:
1598 elif d < -0x80000000:
1599 d = -0x7fffffff
1599 d = -0x80000000
1600 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1600 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1601 # because they use the gmtime() system call which is buggy on Windows
1601 # because they use the gmtime() system call which is buggy on Windows
1602 # for negative values.
1602 # for negative values.
1603 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1603 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1604 s = t.strftime(format)
1604 s = t.strftime(format)
1605 return s
1605 return s
1606
1606
1607 def shortdate(date=None):
1607 def shortdate(date=None):
1608 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1608 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1609 return datestr(date, format='%Y-%m-%d')
1609 return datestr(date, format='%Y-%m-%d')
1610
1610
1611 def parsetimezone(tz):
1611 def parsetimezone(tz):
1612 """parse a timezone string and return an offset integer"""
1612 """parse a timezone string and return an offset integer"""
1613 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1613 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1614 sign = (tz[0] == "+") and 1 or -1
1614 sign = (tz[0] == "+") and 1 or -1
1615 hours = int(tz[1:3])
1615 hours = int(tz[1:3])
1616 minutes = int(tz[3:5])
1616 minutes = int(tz[3:5])
1617 return -sign * (hours * 60 + minutes) * 60
1617 return -sign * (hours * 60 + minutes) * 60
1618 if tz == "GMT" or tz == "UTC":
1618 if tz == "GMT" or tz == "UTC":
1619 return 0
1619 return 0
1620 return None
1620 return None
1621
1621
1622 def strdate(string, format, defaults=[]):
1622 def strdate(string, format, defaults=[]):
1623 """parse a localized time string and return a (unixtime, offset) tuple.
1623 """parse a localized time string and return a (unixtime, offset) tuple.
1624 if the string cannot be parsed, ValueError is raised."""
1624 if the string cannot be parsed, ValueError is raised."""
1625 # NOTE: unixtime = localunixtime + offset
1625 # NOTE: unixtime = localunixtime + offset
1626 offset, date = parsetimezone(string.split()[-1]), string
1626 offset, date = parsetimezone(string.split()[-1]), string
1627 if offset is not None:
1627 if offset is not None:
1628 date = " ".join(string.split()[:-1])
1628 date = " ".join(string.split()[:-1])
1629
1629
1630 # add missing elements from defaults
1630 # add missing elements from defaults
1631 usenow = False # default to using biased defaults
1631 usenow = False # default to using biased defaults
1632 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1632 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1633 found = [True for p in part if ("%"+p) in format]
1633 found = [True for p in part if ("%"+p) in format]
1634 if not found:
1634 if not found:
1635 date += "@" + defaults[part][usenow]
1635 date += "@" + defaults[part][usenow]
1636 format += "@%" + part[0]
1636 format += "@%" + part[0]
1637 else:
1637 else:
1638 # We've found a specific time element, less specific time
1638 # We've found a specific time element, less specific time
1639 # elements are relative to today
1639 # elements are relative to today
1640 usenow = True
1640 usenow = True
1641
1641
1642 timetuple = time.strptime(date, format)
1642 timetuple = time.strptime(date, format)
1643 localunixtime = int(calendar.timegm(timetuple))
1643 localunixtime = int(calendar.timegm(timetuple))
1644 if offset is None:
1644 if offset is None:
1645 # local timezone
1645 # local timezone
1646 unixtime = int(time.mktime(timetuple))
1646 unixtime = int(time.mktime(timetuple))
1647 offset = unixtime - localunixtime
1647 offset = unixtime - localunixtime
1648 else:
1648 else:
1649 unixtime = localunixtime + offset
1649 unixtime = localunixtime + offset
1650 return unixtime, offset
1650 return unixtime, offset
1651
1651
1652 def parsedate(date, formats=None, bias=None):
1652 def parsedate(date, formats=None, bias=None):
1653 """parse a localized date/time and return a (unixtime, offset) tuple.
1653 """parse a localized date/time and return a (unixtime, offset) tuple.
1654
1654
1655 The date may be a "unixtime offset" string or in one of the specified
1655 The date may be a "unixtime offset" string or in one of the specified
1656 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1656 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1657
1657
1658 >>> parsedate(' today ') == parsedate(\
1658 >>> parsedate(' today ') == parsedate(\
1659 datetime.date.today().strftime('%b %d'))
1659 datetime.date.today().strftime('%b %d'))
1660 True
1660 True
1661 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1661 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1662 datetime.timedelta(days=1)\
1662 datetime.timedelta(days=1)\
1663 ).strftime('%b %d'))
1663 ).strftime('%b %d'))
1664 True
1664 True
1665 >>> now, tz = makedate()
1665 >>> now, tz = makedate()
1666 >>> strnow, strtz = parsedate('now')
1666 >>> strnow, strtz = parsedate('now')
1667 >>> (strnow - now) < 1
1667 >>> (strnow - now) < 1
1668 True
1668 True
1669 >>> tz == strtz
1669 >>> tz == strtz
1670 True
1670 True
1671 """
1671 """
1672 if bias is None:
1672 if bias is None:
1673 bias = {}
1673 bias = {}
1674 if not date:
1674 if not date:
1675 return 0, 0
1675 return 0, 0
1676 if isinstance(date, tuple) and len(date) == 2:
1676 if isinstance(date, tuple) and len(date) == 2:
1677 return date
1677 return date
1678 if not formats:
1678 if not formats:
1679 formats = defaultdateformats
1679 formats = defaultdateformats
1680 date = date.strip()
1680 date = date.strip()
1681
1681
1682 if date == 'now' or date == _('now'):
1682 if date == 'now' or date == _('now'):
1683 return makedate()
1683 return makedate()
1684 if date == 'today' or date == _('today'):
1684 if date == 'today' or date == _('today'):
1685 date = datetime.date.today().strftime('%b %d')
1685 date = datetime.date.today().strftime('%b %d')
1686 elif date == 'yesterday' or date == _('yesterday'):
1686 elif date == 'yesterday' or date == _('yesterday'):
1687 date = (datetime.date.today() -
1687 date = (datetime.date.today() -
1688 datetime.timedelta(days=1)).strftime('%b %d')
1688 datetime.timedelta(days=1)).strftime('%b %d')
1689
1689
1690 try:
1690 try:
1691 when, offset = map(int, date.split(' '))
1691 when, offset = map(int, date.split(' '))
1692 except ValueError:
1692 except ValueError:
1693 # fill out defaults
1693 # fill out defaults
1694 now = makedate()
1694 now = makedate()
1695 defaults = {}
1695 defaults = {}
1696 for part in ("d", "mb", "yY", "HI", "M", "S"):
1696 for part in ("d", "mb", "yY", "HI", "M", "S"):
1697 # this piece is for rounding the specific end of unknowns
1697 # this piece is for rounding the specific end of unknowns
1698 b = bias.get(part)
1698 b = bias.get(part)
1699 if b is None:
1699 if b is None:
1700 if part[0] in "HMS":
1700 if part[0] in "HMS":
1701 b = "00"
1701 b = "00"
1702 else:
1702 else:
1703 b = "0"
1703 b = "0"
1704
1704
1705 # this piece is for matching the generic end to today's date
1705 # this piece is for matching the generic end to today's date
1706 n = datestr(now, "%" + part[0])
1706 n = datestr(now, "%" + part[0])
1707
1707
1708 defaults[part] = (b, n)
1708 defaults[part] = (b, n)
1709
1709
1710 for format in formats:
1710 for format in formats:
1711 try:
1711 try:
1712 when, offset = strdate(date, format, defaults)
1712 when, offset = strdate(date, format, defaults)
1713 except (ValueError, OverflowError):
1713 except (ValueError, OverflowError):
1714 pass
1714 pass
1715 else:
1715 else:
1716 break
1716 break
1717 else:
1717 else:
1718 raise Abort(_('invalid date: %r') % date)
1718 raise Abort(_('invalid date: %r') % date)
1719 # validate explicit (probably user-specified) date and
1719 # validate explicit (probably user-specified) date and
1720 # time zone offset. values must fit in signed 32 bits for
1720 # time zone offset. values must fit in signed 32 bits for
1721 # current 32-bit linux runtimes. timezones go from UTC-12
1721 # current 32-bit linux runtimes. timezones go from UTC-12
1722 # to UTC+14
1722 # to UTC+14
1723 if abs(when) > 0x7fffffff:
1723 if when < -0x80000000 or when > 0x7fffffff:
1724 raise Abort(_('date exceeds 32 bits: %d') % when)
1724 raise Abort(_('date exceeds 32 bits: %d') % when)
1725 if offset < -50400 or offset > 43200:
1725 if offset < -50400 or offset > 43200:
1726 raise Abort(_('impossible time zone offset: %d') % offset)
1726 raise Abort(_('impossible time zone offset: %d') % offset)
1727 return when, offset
1727 return when, offset
1728
1728
1729 def matchdate(date):
1729 def matchdate(date):
1730 """Return a function that matches a given date match specifier
1730 """Return a function that matches a given date match specifier
1731
1731
1732 Formats include:
1732 Formats include:
1733
1733
1734 '{date}' match a given date to the accuracy provided
1734 '{date}' match a given date to the accuracy provided
1735
1735
1736 '<{date}' on or before a given date
1736 '<{date}' on or before a given date
1737
1737
1738 '>{date}' on or after a given date
1738 '>{date}' on or after a given date
1739
1739
1740 >>> p1 = parsedate("10:29:59")
1740 >>> p1 = parsedate("10:29:59")
1741 >>> p2 = parsedate("10:30:00")
1741 >>> p2 = parsedate("10:30:00")
1742 >>> p3 = parsedate("10:30:59")
1742 >>> p3 = parsedate("10:30:59")
1743 >>> p4 = parsedate("10:31:00")
1743 >>> p4 = parsedate("10:31:00")
1744 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1744 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1745 >>> f = matchdate("10:30")
1745 >>> f = matchdate("10:30")
1746 >>> f(p1[0])
1746 >>> f(p1[0])
1747 False
1747 False
1748 >>> f(p2[0])
1748 >>> f(p2[0])
1749 True
1749 True
1750 >>> f(p3[0])
1750 >>> f(p3[0])
1751 True
1751 True
1752 >>> f(p4[0])
1752 >>> f(p4[0])
1753 False
1753 False
1754 >>> f(p5[0])
1754 >>> f(p5[0])
1755 False
1755 False
1756 """
1756 """
1757
1757
1758 def lower(date):
1758 def lower(date):
1759 d = {'mb': "1", 'd': "1"}
1759 d = {'mb': "1", 'd': "1"}
1760 return parsedate(date, extendeddateformats, d)[0]
1760 return parsedate(date, extendeddateformats, d)[0]
1761
1761
1762 def upper(date):
1762 def upper(date):
1763 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1763 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1764 for days in ("31", "30", "29"):
1764 for days in ("31", "30", "29"):
1765 try:
1765 try:
1766 d["d"] = days
1766 d["d"] = days
1767 return parsedate(date, extendeddateformats, d)[0]
1767 return parsedate(date, extendeddateformats, d)[0]
1768 except Abort:
1768 except Abort:
1769 pass
1769 pass
1770 d["d"] = "28"
1770 d["d"] = "28"
1771 return parsedate(date, extendeddateformats, d)[0]
1771 return parsedate(date, extendeddateformats, d)[0]
1772
1772
1773 date = date.strip()
1773 date = date.strip()
1774
1774
1775 if not date:
1775 if not date:
1776 raise Abort(_("dates cannot consist entirely of whitespace"))
1776 raise Abort(_("dates cannot consist entirely of whitespace"))
1777 elif date[0] == "<":
1777 elif date[0] == "<":
1778 if not date[1:]:
1778 if not date[1:]:
1779 raise Abort(_("invalid day spec, use '<DATE'"))
1779 raise Abort(_("invalid day spec, use '<DATE'"))
1780 when = upper(date[1:])
1780 when = upper(date[1:])
1781 return lambda x: x <= when
1781 return lambda x: x <= when
1782 elif date[0] == ">":
1782 elif date[0] == ">":
1783 if not date[1:]:
1783 if not date[1:]:
1784 raise Abort(_("invalid day spec, use '>DATE'"))
1784 raise Abort(_("invalid day spec, use '>DATE'"))
1785 when = lower(date[1:])
1785 when = lower(date[1:])
1786 return lambda x: x >= when
1786 return lambda x: x >= when
1787 elif date[0] == "-":
1787 elif date[0] == "-":
1788 try:
1788 try:
1789 days = int(date[1:])
1789 days = int(date[1:])
1790 except ValueError:
1790 except ValueError:
1791 raise Abort(_("invalid day spec: %s") % date[1:])
1791 raise Abort(_("invalid day spec: %s") % date[1:])
1792 if days < 0:
1792 if days < 0:
1793 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1793 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1794 % date[1:])
1794 % date[1:])
1795 when = makedate()[0] - days * 3600 * 24
1795 when = makedate()[0] - days * 3600 * 24
1796 return lambda x: x >= when
1796 return lambda x: x >= when
1797 elif " to " in date:
1797 elif " to " in date:
1798 a, b = date.split(" to ")
1798 a, b = date.split(" to ")
1799 start, stop = lower(a), upper(b)
1799 start, stop = lower(a), upper(b)
1800 return lambda x: x >= start and x <= stop
1800 return lambda x: x >= start and x <= stop
1801 else:
1801 else:
1802 start, stop = lower(date), upper(date)
1802 start, stop = lower(date), upper(date)
1803 return lambda x: x >= start and x <= stop
1803 return lambda x: x >= start and x <= stop
1804
1804
1805 def stringmatcher(pattern):
1805 def stringmatcher(pattern):
1806 """
1806 """
1807 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1807 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1808 returns the matcher name, pattern, and matcher function.
1808 returns the matcher name, pattern, and matcher function.
1809 missing or unknown prefixes are treated as literal matches.
1809 missing or unknown prefixes are treated as literal matches.
1810
1810
1811 helper for tests:
1811 helper for tests:
1812 >>> def test(pattern, *tests):
1812 >>> def test(pattern, *tests):
1813 ... kind, pattern, matcher = stringmatcher(pattern)
1813 ... kind, pattern, matcher = stringmatcher(pattern)
1814 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1814 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1815
1815
1816 exact matching (no prefix):
1816 exact matching (no prefix):
1817 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1817 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1818 ('literal', 'abcdefg', [False, False, True])
1818 ('literal', 'abcdefg', [False, False, True])
1819
1819
1820 regex matching ('re:' prefix)
1820 regex matching ('re:' prefix)
1821 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1821 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1822 ('re', 'a.+b', [False, False, True])
1822 ('re', 'a.+b', [False, False, True])
1823
1823
1824 force exact matches ('literal:' prefix)
1824 force exact matches ('literal:' prefix)
1825 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1825 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1826 ('literal', 're:foobar', [False, True])
1826 ('literal', 're:foobar', [False, True])
1827
1827
1828 unknown prefixes are ignored and treated as literals
1828 unknown prefixes are ignored and treated as literals
1829 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1829 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1830 ('literal', 'foo:bar', [False, False, True])
1830 ('literal', 'foo:bar', [False, False, True])
1831 """
1831 """
1832 if pattern.startswith('re:'):
1832 if pattern.startswith('re:'):
1833 pattern = pattern[3:]
1833 pattern = pattern[3:]
1834 try:
1834 try:
1835 regex = remod.compile(pattern)
1835 regex = remod.compile(pattern)
1836 except remod.error as e:
1836 except remod.error as e:
1837 raise error.ParseError(_('invalid regular expression: %s')
1837 raise error.ParseError(_('invalid regular expression: %s')
1838 % e)
1838 % e)
1839 return 're', pattern, regex.search
1839 return 're', pattern, regex.search
1840 elif pattern.startswith('literal:'):
1840 elif pattern.startswith('literal:'):
1841 pattern = pattern[8:]
1841 pattern = pattern[8:]
1842 return 'literal', pattern, pattern.__eq__
1842 return 'literal', pattern, pattern.__eq__
1843
1843
1844 def shortuser(user):
1844 def shortuser(user):
1845 """Return a short representation of a user name or email address."""
1845 """Return a short representation of a user name or email address."""
1846 f = user.find('@')
1846 f = user.find('@')
1847 if f >= 0:
1847 if f >= 0:
1848 user = user[:f]
1848 user = user[:f]
1849 f = user.find('<')
1849 f = user.find('<')
1850 if f >= 0:
1850 if f >= 0:
1851 user = user[f + 1:]
1851 user = user[f + 1:]
1852 f = user.find(' ')
1852 f = user.find(' ')
1853 if f >= 0:
1853 if f >= 0:
1854 user = user[:f]
1854 user = user[:f]
1855 f = user.find('.')
1855 f = user.find('.')
1856 if f >= 0:
1856 if f >= 0:
1857 user = user[:f]
1857 user = user[:f]
1858 return user
1858 return user
1859
1859
1860 def emailuser(user):
1860 def emailuser(user):
1861 """Return the user portion of an email address."""
1861 """Return the user portion of an email address."""
1862 f = user.find('@')
1862 f = user.find('@')
1863 if f >= 0:
1863 if f >= 0:
1864 user = user[:f]
1864 user = user[:f]
1865 f = user.find('<')
1865 f = user.find('<')
1866 if f >= 0:
1866 if f >= 0:
1867 user = user[f + 1:]
1867 user = user[f + 1:]
1868 return user
1868 return user
1869
1869
1870 def email(author):
1870 def email(author):
1871 '''get email of author.'''
1871 '''get email of author.'''
1872 r = author.find('>')
1872 r = author.find('>')
1873 if r == -1:
1873 if r == -1:
1874 r = None
1874 r = None
1875 return author[author.find('<') + 1:r]
1875 return author[author.find('<') + 1:r]
1876
1876
1877 def ellipsis(text, maxlength=400):
1877 def ellipsis(text, maxlength=400):
1878 """Trim string to at most maxlength (default: 400) columns in display."""
1878 """Trim string to at most maxlength (default: 400) columns in display."""
1879 return encoding.trim(text, maxlength, ellipsis='...')
1879 return encoding.trim(text, maxlength, ellipsis='...')
1880
1880
1881 def unitcountfn(*unittable):
1881 def unitcountfn(*unittable):
1882 '''return a function that renders a readable count of some quantity'''
1882 '''return a function that renders a readable count of some quantity'''
1883
1883
1884 def go(count):
1884 def go(count):
1885 for multiplier, divisor, format in unittable:
1885 for multiplier, divisor, format in unittable:
1886 if count >= divisor * multiplier:
1886 if count >= divisor * multiplier:
1887 return format % (count / float(divisor))
1887 return format % (count / float(divisor))
1888 return unittable[-1][2] % count
1888 return unittable[-1][2] % count
1889
1889
1890 return go
1890 return go
1891
1891
1892 bytecount = unitcountfn(
1892 bytecount = unitcountfn(
1893 (100, 1 << 30, _('%.0f GB')),
1893 (100, 1 << 30, _('%.0f GB')),
1894 (10, 1 << 30, _('%.1f GB')),
1894 (10, 1 << 30, _('%.1f GB')),
1895 (1, 1 << 30, _('%.2f GB')),
1895 (1, 1 << 30, _('%.2f GB')),
1896 (100, 1 << 20, _('%.0f MB')),
1896 (100, 1 << 20, _('%.0f MB')),
1897 (10, 1 << 20, _('%.1f MB')),
1897 (10, 1 << 20, _('%.1f MB')),
1898 (1, 1 << 20, _('%.2f MB')),
1898 (1, 1 << 20, _('%.2f MB')),
1899 (100, 1 << 10, _('%.0f KB')),
1899 (100, 1 << 10, _('%.0f KB')),
1900 (10, 1 << 10, _('%.1f KB')),
1900 (10, 1 << 10, _('%.1f KB')),
1901 (1, 1 << 10, _('%.2f KB')),
1901 (1, 1 << 10, _('%.2f KB')),
1902 (1, 1, _('%.0f bytes')),
1902 (1, 1, _('%.0f bytes')),
1903 )
1903 )
1904
1904
1905 def uirepr(s):
1905 def uirepr(s):
1906 # Avoid double backslash in Windows path repr()
1906 # Avoid double backslash in Windows path repr()
1907 return repr(s).replace('\\\\', '\\')
1907 return repr(s).replace('\\\\', '\\')
1908
1908
1909 # delay import of textwrap
1909 # delay import of textwrap
1910 def MBTextWrapper(**kwargs):
1910 def MBTextWrapper(**kwargs):
1911 class tw(textwrap.TextWrapper):
1911 class tw(textwrap.TextWrapper):
1912 """
1912 """
1913 Extend TextWrapper for width-awareness.
1913 Extend TextWrapper for width-awareness.
1914
1914
1915 Neither number of 'bytes' in any encoding nor 'characters' is
1915 Neither number of 'bytes' in any encoding nor 'characters' is
1916 appropriate to calculate terminal columns for specified string.
1916 appropriate to calculate terminal columns for specified string.
1917
1917
1918 Original TextWrapper implementation uses built-in 'len()' directly,
1918 Original TextWrapper implementation uses built-in 'len()' directly,
1919 so overriding is needed to use width information of each characters.
1919 so overriding is needed to use width information of each characters.
1920
1920
1921 In addition, characters classified into 'ambiguous' width are
1921 In addition, characters classified into 'ambiguous' width are
1922 treated as wide in East Asian area, but as narrow in other.
1922 treated as wide in East Asian area, but as narrow in other.
1923
1923
1924 This requires use decision to determine width of such characters.
1924 This requires use decision to determine width of such characters.
1925 """
1925 """
1926 def _cutdown(self, ucstr, space_left):
1926 def _cutdown(self, ucstr, space_left):
1927 l = 0
1927 l = 0
1928 colwidth = encoding.ucolwidth
1928 colwidth = encoding.ucolwidth
1929 for i in xrange(len(ucstr)):
1929 for i in xrange(len(ucstr)):
1930 l += colwidth(ucstr[i])
1930 l += colwidth(ucstr[i])
1931 if space_left < l:
1931 if space_left < l:
1932 return (ucstr[:i], ucstr[i:])
1932 return (ucstr[:i], ucstr[i:])
1933 return ucstr, ''
1933 return ucstr, ''
1934
1934
1935 # overriding of base class
1935 # overriding of base class
1936 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1936 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1937 space_left = max(width - cur_len, 1)
1937 space_left = max(width - cur_len, 1)
1938
1938
1939 if self.break_long_words:
1939 if self.break_long_words:
1940 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1940 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1941 cur_line.append(cut)
1941 cur_line.append(cut)
1942 reversed_chunks[-1] = res
1942 reversed_chunks[-1] = res
1943 elif not cur_line:
1943 elif not cur_line:
1944 cur_line.append(reversed_chunks.pop())
1944 cur_line.append(reversed_chunks.pop())
1945
1945
1946 # this overriding code is imported from TextWrapper of Python 2.6
1946 # this overriding code is imported from TextWrapper of Python 2.6
1947 # to calculate columns of string by 'encoding.ucolwidth()'
1947 # to calculate columns of string by 'encoding.ucolwidth()'
1948 def _wrap_chunks(self, chunks):
1948 def _wrap_chunks(self, chunks):
1949 colwidth = encoding.ucolwidth
1949 colwidth = encoding.ucolwidth
1950
1950
1951 lines = []
1951 lines = []
1952 if self.width <= 0:
1952 if self.width <= 0:
1953 raise ValueError("invalid width %r (must be > 0)" % self.width)
1953 raise ValueError("invalid width %r (must be > 0)" % self.width)
1954
1954
1955 # Arrange in reverse order so items can be efficiently popped
1955 # Arrange in reverse order so items can be efficiently popped
1956 # from a stack of chucks.
1956 # from a stack of chucks.
1957 chunks.reverse()
1957 chunks.reverse()
1958
1958
1959 while chunks:
1959 while chunks:
1960
1960
1961 # Start the list of chunks that will make up the current line.
1961 # Start the list of chunks that will make up the current line.
1962 # cur_len is just the length of all the chunks in cur_line.
1962 # cur_len is just the length of all the chunks in cur_line.
1963 cur_line = []
1963 cur_line = []
1964 cur_len = 0
1964 cur_len = 0
1965
1965
1966 # Figure out which static string will prefix this line.
1966 # Figure out which static string will prefix this line.
1967 if lines:
1967 if lines:
1968 indent = self.subsequent_indent
1968 indent = self.subsequent_indent
1969 else:
1969 else:
1970 indent = self.initial_indent
1970 indent = self.initial_indent
1971
1971
1972 # Maximum width for this line.
1972 # Maximum width for this line.
1973 width = self.width - len(indent)
1973 width = self.width - len(indent)
1974
1974
1975 # First chunk on line is whitespace -- drop it, unless this
1975 # First chunk on line is whitespace -- drop it, unless this
1976 # is the very beginning of the text (i.e. no lines started yet).
1976 # is the very beginning of the text (i.e. no lines started yet).
1977 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1977 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1978 del chunks[-1]
1978 del chunks[-1]
1979
1979
1980 while chunks:
1980 while chunks:
1981 l = colwidth(chunks[-1])
1981 l = colwidth(chunks[-1])
1982
1982
1983 # Can at least squeeze this chunk onto the current line.
1983 # Can at least squeeze this chunk onto the current line.
1984 if cur_len + l <= width:
1984 if cur_len + l <= width:
1985 cur_line.append(chunks.pop())
1985 cur_line.append(chunks.pop())
1986 cur_len += l
1986 cur_len += l
1987
1987
1988 # Nope, this line is full.
1988 # Nope, this line is full.
1989 else:
1989 else:
1990 break
1990 break
1991
1991
1992 # The current line is full, and the next chunk is too big to
1992 # The current line is full, and the next chunk is too big to
1993 # fit on *any* line (not just this one).
1993 # fit on *any* line (not just this one).
1994 if chunks and colwidth(chunks[-1]) > width:
1994 if chunks and colwidth(chunks[-1]) > width:
1995 self._handle_long_word(chunks, cur_line, cur_len, width)
1995 self._handle_long_word(chunks, cur_line, cur_len, width)
1996
1996
1997 # If the last chunk on this line is all whitespace, drop it.
1997 # If the last chunk on this line is all whitespace, drop it.
1998 if (self.drop_whitespace and
1998 if (self.drop_whitespace and
1999 cur_line and cur_line[-1].strip() == ''):
1999 cur_line and cur_line[-1].strip() == ''):
2000 del cur_line[-1]
2000 del cur_line[-1]
2001
2001
2002 # Convert current line back to a string and store it in list
2002 # Convert current line back to a string and store it in list
2003 # of all lines (return value).
2003 # of all lines (return value).
2004 if cur_line:
2004 if cur_line:
2005 lines.append(indent + ''.join(cur_line))
2005 lines.append(indent + ''.join(cur_line))
2006
2006
2007 return lines
2007 return lines
2008
2008
2009 global MBTextWrapper
2009 global MBTextWrapper
2010 MBTextWrapper = tw
2010 MBTextWrapper = tw
2011 return tw(**kwargs)
2011 return tw(**kwargs)
2012
2012
2013 def wrap(line, width, initindent='', hangindent=''):
2013 def wrap(line, width, initindent='', hangindent=''):
2014 maxindent = max(len(hangindent), len(initindent))
2014 maxindent = max(len(hangindent), len(initindent))
2015 if width <= maxindent:
2015 if width <= maxindent:
2016 # adjust for weird terminal size
2016 # adjust for weird terminal size
2017 width = max(78, maxindent + 1)
2017 width = max(78, maxindent + 1)
2018 line = line.decode(encoding.encoding, encoding.encodingmode)
2018 line = line.decode(encoding.encoding, encoding.encodingmode)
2019 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2019 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2020 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2020 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2021 wrapper = MBTextWrapper(width=width,
2021 wrapper = MBTextWrapper(width=width,
2022 initial_indent=initindent,
2022 initial_indent=initindent,
2023 subsequent_indent=hangindent)
2023 subsequent_indent=hangindent)
2024 return wrapper.fill(line).encode(encoding.encoding)
2024 return wrapper.fill(line).encode(encoding.encoding)
2025
2025
2026 def iterlines(iterator):
2026 def iterlines(iterator):
2027 for chunk in iterator:
2027 for chunk in iterator:
2028 for line in chunk.splitlines():
2028 for line in chunk.splitlines():
2029 yield line
2029 yield line
2030
2030
2031 def expandpath(path):
2031 def expandpath(path):
2032 return os.path.expanduser(os.path.expandvars(path))
2032 return os.path.expanduser(os.path.expandvars(path))
2033
2033
2034 def hgcmd():
2034 def hgcmd():
2035 """Return the command used to execute current hg
2035 """Return the command used to execute current hg
2036
2036
2037 This is different from hgexecutable() because on Windows we want
2037 This is different from hgexecutable() because on Windows we want
2038 to avoid things opening new shell windows like batch files, so we
2038 to avoid things opening new shell windows like batch files, so we
2039 get either the python call or current executable.
2039 get either the python call or current executable.
2040 """
2040 """
2041 if mainfrozen():
2041 if mainfrozen():
2042 if getattr(sys, 'frozen', None) == 'macosx_app':
2042 if getattr(sys, 'frozen', None) == 'macosx_app':
2043 # Env variable set by py2app
2043 # Env variable set by py2app
2044 return [os.environ['EXECUTABLEPATH']]
2044 return [os.environ['EXECUTABLEPATH']]
2045 else:
2045 else:
2046 return [sys.executable]
2046 return [sys.executable]
2047 return gethgcmd()
2047 return gethgcmd()
2048
2048
2049 def rundetached(args, condfn):
2049 def rundetached(args, condfn):
2050 """Execute the argument list in a detached process.
2050 """Execute the argument list in a detached process.
2051
2051
2052 condfn is a callable which is called repeatedly and should return
2052 condfn is a callable which is called repeatedly and should return
2053 True once the child process is known to have started successfully.
2053 True once the child process is known to have started successfully.
2054 At this point, the child process PID is returned. If the child
2054 At this point, the child process PID is returned. If the child
2055 process fails to start or finishes before condfn() evaluates to
2055 process fails to start or finishes before condfn() evaluates to
2056 True, return -1.
2056 True, return -1.
2057 """
2057 """
2058 # Windows case is easier because the child process is either
2058 # Windows case is easier because the child process is either
2059 # successfully starting and validating the condition or exiting
2059 # successfully starting and validating the condition or exiting
2060 # on failure. We just poll on its PID. On Unix, if the child
2060 # on failure. We just poll on its PID. On Unix, if the child
2061 # process fails to start, it will be left in a zombie state until
2061 # process fails to start, it will be left in a zombie state until
2062 # the parent wait on it, which we cannot do since we expect a long
2062 # the parent wait on it, which we cannot do since we expect a long
2063 # running process on success. Instead we listen for SIGCHLD telling
2063 # running process on success. Instead we listen for SIGCHLD telling
2064 # us our child process terminated.
2064 # us our child process terminated.
2065 terminated = set()
2065 terminated = set()
2066 def handler(signum, frame):
2066 def handler(signum, frame):
2067 terminated.add(os.wait())
2067 terminated.add(os.wait())
2068 prevhandler = None
2068 prevhandler = None
2069 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2069 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2070 if SIGCHLD is not None:
2070 if SIGCHLD is not None:
2071 prevhandler = signal.signal(SIGCHLD, handler)
2071 prevhandler = signal.signal(SIGCHLD, handler)
2072 try:
2072 try:
2073 pid = spawndetached(args)
2073 pid = spawndetached(args)
2074 while not condfn():
2074 while not condfn():
2075 if ((pid in terminated or not testpid(pid))
2075 if ((pid in terminated or not testpid(pid))
2076 and not condfn()):
2076 and not condfn()):
2077 return -1
2077 return -1
2078 time.sleep(0.1)
2078 time.sleep(0.1)
2079 return pid
2079 return pid
2080 finally:
2080 finally:
2081 if prevhandler is not None:
2081 if prevhandler is not None:
2082 signal.signal(signal.SIGCHLD, prevhandler)
2082 signal.signal(signal.SIGCHLD, prevhandler)
2083
2083
2084 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2084 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2085 """Return the result of interpolating items in the mapping into string s.
2085 """Return the result of interpolating items in the mapping into string s.
2086
2086
2087 prefix is a single character string, or a two character string with
2087 prefix is a single character string, or a two character string with
2088 a backslash as the first character if the prefix needs to be escaped in
2088 a backslash as the first character if the prefix needs to be escaped in
2089 a regular expression.
2089 a regular expression.
2090
2090
2091 fn is an optional function that will be applied to the replacement text
2091 fn is an optional function that will be applied to the replacement text
2092 just before replacement.
2092 just before replacement.
2093
2093
2094 escape_prefix is an optional flag that allows using doubled prefix for
2094 escape_prefix is an optional flag that allows using doubled prefix for
2095 its escaping.
2095 its escaping.
2096 """
2096 """
2097 fn = fn or (lambda s: s)
2097 fn = fn or (lambda s: s)
2098 patterns = '|'.join(mapping.keys())
2098 patterns = '|'.join(mapping.keys())
2099 if escape_prefix:
2099 if escape_prefix:
2100 patterns += '|' + prefix
2100 patterns += '|' + prefix
2101 if len(prefix) > 1:
2101 if len(prefix) > 1:
2102 prefix_char = prefix[1:]
2102 prefix_char = prefix[1:]
2103 else:
2103 else:
2104 prefix_char = prefix
2104 prefix_char = prefix
2105 mapping[prefix_char] = prefix_char
2105 mapping[prefix_char] = prefix_char
2106 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2106 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2107 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2107 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2108
2108
2109 def getport(port):
2109 def getport(port):
2110 """Return the port for a given network service.
2110 """Return the port for a given network service.
2111
2111
2112 If port is an integer, it's returned as is. If it's a string, it's
2112 If port is an integer, it's returned as is. If it's a string, it's
2113 looked up using socket.getservbyname(). If there's no matching
2113 looked up using socket.getservbyname(). If there's no matching
2114 service, error.Abort is raised.
2114 service, error.Abort is raised.
2115 """
2115 """
2116 try:
2116 try:
2117 return int(port)
2117 return int(port)
2118 except ValueError:
2118 except ValueError:
2119 pass
2119 pass
2120
2120
2121 try:
2121 try:
2122 return socket.getservbyname(port)
2122 return socket.getservbyname(port)
2123 except socket.error:
2123 except socket.error:
2124 raise Abort(_("no port number associated with service '%s'") % port)
2124 raise Abort(_("no port number associated with service '%s'") % port)
2125
2125
2126 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2126 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2127 '0': False, 'no': False, 'false': False, 'off': False,
2127 '0': False, 'no': False, 'false': False, 'off': False,
2128 'never': False}
2128 'never': False}
2129
2129
2130 def parsebool(s):
2130 def parsebool(s):
2131 """Parse s into a boolean.
2131 """Parse s into a boolean.
2132
2132
2133 If s is not a valid boolean, returns None.
2133 If s is not a valid boolean, returns None.
2134 """
2134 """
2135 return _booleans.get(s.lower(), None)
2135 return _booleans.get(s.lower(), None)
2136
2136
2137 _hexdig = '0123456789ABCDEFabcdef'
2137 _hexdig = '0123456789ABCDEFabcdef'
2138 _hextochr = dict((a + b, chr(int(a + b, 16)))
2138 _hextochr = dict((a + b, chr(int(a + b, 16)))
2139 for a in _hexdig for b in _hexdig)
2139 for a in _hexdig for b in _hexdig)
2140
2140
2141 def _urlunquote(s):
2141 def _urlunquote(s):
2142 """Decode HTTP/HTML % encoding.
2142 """Decode HTTP/HTML % encoding.
2143
2143
2144 >>> _urlunquote('abc%20def')
2144 >>> _urlunquote('abc%20def')
2145 'abc def'
2145 'abc def'
2146 """
2146 """
2147 res = s.split('%')
2147 res = s.split('%')
2148 # fastpath
2148 # fastpath
2149 if len(res) == 1:
2149 if len(res) == 1:
2150 return s
2150 return s
2151 s = res[0]
2151 s = res[0]
2152 for item in res[1:]:
2152 for item in res[1:]:
2153 try:
2153 try:
2154 s += _hextochr[item[:2]] + item[2:]
2154 s += _hextochr[item[:2]] + item[2:]
2155 except KeyError:
2155 except KeyError:
2156 s += '%' + item
2156 s += '%' + item
2157 except UnicodeDecodeError:
2157 except UnicodeDecodeError:
2158 s += unichr(int(item[:2], 16)) + item[2:]
2158 s += unichr(int(item[:2], 16)) + item[2:]
2159 return s
2159 return s
2160
2160
2161 class url(object):
2161 class url(object):
2162 r"""Reliable URL parser.
2162 r"""Reliable URL parser.
2163
2163
2164 This parses URLs and provides attributes for the following
2164 This parses URLs and provides attributes for the following
2165 components:
2165 components:
2166
2166
2167 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2167 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2168
2168
2169 Missing components are set to None. The only exception is
2169 Missing components are set to None. The only exception is
2170 fragment, which is set to '' if present but empty.
2170 fragment, which is set to '' if present but empty.
2171
2171
2172 If parsefragment is False, fragment is included in query. If
2172 If parsefragment is False, fragment is included in query. If
2173 parsequery is False, query is included in path. If both are
2173 parsequery is False, query is included in path. If both are
2174 False, both fragment and query are included in path.
2174 False, both fragment and query are included in path.
2175
2175
2176 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2176 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2177
2177
2178 Note that for backward compatibility reasons, bundle URLs do not
2178 Note that for backward compatibility reasons, bundle URLs do not
2179 take host names. That means 'bundle://../' has a path of '../'.
2179 take host names. That means 'bundle://../' has a path of '../'.
2180
2180
2181 Examples:
2181 Examples:
2182
2182
2183 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2183 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2184 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2184 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2185 >>> url('ssh://[::1]:2200//home/joe/repo')
2185 >>> url('ssh://[::1]:2200//home/joe/repo')
2186 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2186 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2187 >>> url('file:///home/joe/repo')
2187 >>> url('file:///home/joe/repo')
2188 <url scheme: 'file', path: '/home/joe/repo'>
2188 <url scheme: 'file', path: '/home/joe/repo'>
2189 >>> url('file:///c:/temp/foo/')
2189 >>> url('file:///c:/temp/foo/')
2190 <url scheme: 'file', path: 'c:/temp/foo/'>
2190 <url scheme: 'file', path: 'c:/temp/foo/'>
2191 >>> url('bundle:foo')
2191 >>> url('bundle:foo')
2192 <url scheme: 'bundle', path: 'foo'>
2192 <url scheme: 'bundle', path: 'foo'>
2193 >>> url('bundle://../foo')
2193 >>> url('bundle://../foo')
2194 <url scheme: 'bundle', path: '../foo'>
2194 <url scheme: 'bundle', path: '../foo'>
2195 >>> url(r'c:\foo\bar')
2195 >>> url(r'c:\foo\bar')
2196 <url path: 'c:\\foo\\bar'>
2196 <url path: 'c:\\foo\\bar'>
2197 >>> url(r'\\blah\blah\blah')
2197 >>> url(r'\\blah\blah\blah')
2198 <url path: '\\\\blah\\blah\\blah'>
2198 <url path: '\\\\blah\\blah\\blah'>
2199 >>> url(r'\\blah\blah\blah#baz')
2199 >>> url(r'\\blah\blah\blah#baz')
2200 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2200 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2201 >>> url(r'file:///C:\users\me')
2201 >>> url(r'file:///C:\users\me')
2202 <url scheme: 'file', path: 'C:\\users\\me'>
2202 <url scheme: 'file', path: 'C:\\users\\me'>
2203
2203
2204 Authentication credentials:
2204 Authentication credentials:
2205
2205
2206 >>> url('ssh://joe:xyz@x/repo')
2206 >>> url('ssh://joe:xyz@x/repo')
2207 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2207 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2208 >>> url('ssh://joe@x/repo')
2208 >>> url('ssh://joe@x/repo')
2209 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2209 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2210
2210
2211 Query strings and fragments:
2211 Query strings and fragments:
2212
2212
2213 >>> url('http://host/a?b#c')
2213 >>> url('http://host/a?b#c')
2214 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2214 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2215 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2215 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2216 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2216 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2217 """
2217 """
2218
2218
2219 _safechars = "!~*'()+"
2219 _safechars = "!~*'()+"
2220 _safepchars = "/!~*'()+:\\"
2220 _safepchars = "/!~*'()+:\\"
2221 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2221 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2222
2222
2223 def __init__(self, path, parsequery=True, parsefragment=True):
2223 def __init__(self, path, parsequery=True, parsefragment=True):
2224 # We slowly chomp away at path until we have only the path left
2224 # We slowly chomp away at path until we have only the path left
2225 self.scheme = self.user = self.passwd = self.host = None
2225 self.scheme = self.user = self.passwd = self.host = None
2226 self.port = self.path = self.query = self.fragment = None
2226 self.port = self.path = self.query = self.fragment = None
2227 self._localpath = True
2227 self._localpath = True
2228 self._hostport = ''
2228 self._hostport = ''
2229 self._origpath = path
2229 self._origpath = path
2230
2230
2231 if parsefragment and '#' in path:
2231 if parsefragment and '#' in path:
2232 path, self.fragment = path.split('#', 1)
2232 path, self.fragment = path.split('#', 1)
2233 if not path:
2233 if not path:
2234 path = None
2234 path = None
2235
2235
2236 # special case for Windows drive letters and UNC paths
2236 # special case for Windows drive letters and UNC paths
2237 if hasdriveletter(path) or path.startswith(r'\\'):
2237 if hasdriveletter(path) or path.startswith(r'\\'):
2238 self.path = path
2238 self.path = path
2239 return
2239 return
2240
2240
2241 # For compatibility reasons, we can't handle bundle paths as
2241 # For compatibility reasons, we can't handle bundle paths as
2242 # normal URLS
2242 # normal URLS
2243 if path.startswith('bundle:'):
2243 if path.startswith('bundle:'):
2244 self.scheme = 'bundle'
2244 self.scheme = 'bundle'
2245 path = path[7:]
2245 path = path[7:]
2246 if path.startswith('//'):
2246 if path.startswith('//'):
2247 path = path[2:]
2247 path = path[2:]
2248 self.path = path
2248 self.path = path
2249 return
2249 return
2250
2250
2251 if self._matchscheme(path):
2251 if self._matchscheme(path):
2252 parts = path.split(':', 1)
2252 parts = path.split(':', 1)
2253 if parts[0]:
2253 if parts[0]:
2254 self.scheme, path = parts
2254 self.scheme, path = parts
2255 self._localpath = False
2255 self._localpath = False
2256
2256
2257 if not path:
2257 if not path:
2258 path = None
2258 path = None
2259 if self._localpath:
2259 if self._localpath:
2260 self.path = ''
2260 self.path = ''
2261 return
2261 return
2262 else:
2262 else:
2263 if self._localpath:
2263 if self._localpath:
2264 self.path = path
2264 self.path = path
2265 return
2265 return
2266
2266
2267 if parsequery and '?' in path:
2267 if parsequery and '?' in path:
2268 path, self.query = path.split('?', 1)
2268 path, self.query = path.split('?', 1)
2269 if not path:
2269 if not path:
2270 path = None
2270 path = None
2271 if not self.query:
2271 if not self.query:
2272 self.query = None
2272 self.query = None
2273
2273
2274 # // is required to specify a host/authority
2274 # // is required to specify a host/authority
2275 if path and path.startswith('//'):
2275 if path and path.startswith('//'):
2276 parts = path[2:].split('/', 1)
2276 parts = path[2:].split('/', 1)
2277 if len(parts) > 1:
2277 if len(parts) > 1:
2278 self.host, path = parts
2278 self.host, path = parts
2279 else:
2279 else:
2280 self.host = parts[0]
2280 self.host = parts[0]
2281 path = None
2281 path = None
2282 if not self.host:
2282 if not self.host:
2283 self.host = None
2283 self.host = None
2284 # path of file:///d is /d
2284 # path of file:///d is /d
2285 # path of file:///d:/ is d:/, not /d:/
2285 # path of file:///d:/ is d:/, not /d:/
2286 if path and not hasdriveletter(path):
2286 if path and not hasdriveletter(path):
2287 path = '/' + path
2287 path = '/' + path
2288
2288
2289 if self.host and '@' in self.host:
2289 if self.host and '@' in self.host:
2290 self.user, self.host = self.host.rsplit('@', 1)
2290 self.user, self.host = self.host.rsplit('@', 1)
2291 if ':' in self.user:
2291 if ':' in self.user:
2292 self.user, self.passwd = self.user.split(':', 1)
2292 self.user, self.passwd = self.user.split(':', 1)
2293 if not self.host:
2293 if not self.host:
2294 self.host = None
2294 self.host = None
2295
2295
2296 # Don't split on colons in IPv6 addresses without ports
2296 # Don't split on colons in IPv6 addresses without ports
2297 if (self.host and ':' in self.host and
2297 if (self.host and ':' in self.host and
2298 not (self.host.startswith('[') and self.host.endswith(']'))):
2298 not (self.host.startswith('[') and self.host.endswith(']'))):
2299 self._hostport = self.host
2299 self._hostport = self.host
2300 self.host, self.port = self.host.rsplit(':', 1)
2300 self.host, self.port = self.host.rsplit(':', 1)
2301 if not self.host:
2301 if not self.host:
2302 self.host = None
2302 self.host = None
2303
2303
2304 if (self.host and self.scheme == 'file' and
2304 if (self.host and self.scheme == 'file' and
2305 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2305 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2306 raise Abort(_('file:// URLs can only refer to localhost'))
2306 raise Abort(_('file:// URLs can only refer to localhost'))
2307
2307
2308 self.path = path
2308 self.path = path
2309
2309
2310 # leave the query string escaped
2310 # leave the query string escaped
2311 for a in ('user', 'passwd', 'host', 'port',
2311 for a in ('user', 'passwd', 'host', 'port',
2312 'path', 'fragment'):
2312 'path', 'fragment'):
2313 v = getattr(self, a)
2313 v = getattr(self, a)
2314 if v is not None:
2314 if v is not None:
2315 setattr(self, a, _urlunquote(v))
2315 setattr(self, a, _urlunquote(v))
2316
2316
2317 def __repr__(self):
2317 def __repr__(self):
2318 attrs = []
2318 attrs = []
2319 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2319 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2320 'query', 'fragment'):
2320 'query', 'fragment'):
2321 v = getattr(self, a)
2321 v = getattr(self, a)
2322 if v is not None:
2322 if v is not None:
2323 attrs.append('%s: %r' % (a, v))
2323 attrs.append('%s: %r' % (a, v))
2324 return '<url %s>' % ', '.join(attrs)
2324 return '<url %s>' % ', '.join(attrs)
2325
2325
2326 def __str__(self):
2326 def __str__(self):
2327 r"""Join the URL's components back into a URL string.
2327 r"""Join the URL's components back into a URL string.
2328
2328
2329 Examples:
2329 Examples:
2330
2330
2331 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2331 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2332 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2332 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2333 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2333 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2334 'http://user:pw@host:80/?foo=bar&baz=42'
2334 'http://user:pw@host:80/?foo=bar&baz=42'
2335 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2335 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2336 'http://user:pw@host:80/?foo=bar%3dbaz'
2336 'http://user:pw@host:80/?foo=bar%3dbaz'
2337 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2337 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2338 'ssh://user:pw@[::1]:2200//home/joe#'
2338 'ssh://user:pw@[::1]:2200//home/joe#'
2339 >>> str(url('http://localhost:80//'))
2339 >>> str(url('http://localhost:80//'))
2340 'http://localhost:80//'
2340 'http://localhost:80//'
2341 >>> str(url('http://localhost:80/'))
2341 >>> str(url('http://localhost:80/'))
2342 'http://localhost:80/'
2342 'http://localhost:80/'
2343 >>> str(url('http://localhost:80'))
2343 >>> str(url('http://localhost:80'))
2344 'http://localhost:80/'
2344 'http://localhost:80/'
2345 >>> str(url('bundle:foo'))
2345 >>> str(url('bundle:foo'))
2346 'bundle:foo'
2346 'bundle:foo'
2347 >>> str(url('bundle://../foo'))
2347 >>> str(url('bundle://../foo'))
2348 'bundle:../foo'
2348 'bundle:../foo'
2349 >>> str(url('path'))
2349 >>> str(url('path'))
2350 'path'
2350 'path'
2351 >>> str(url('file:///tmp/foo/bar'))
2351 >>> str(url('file:///tmp/foo/bar'))
2352 'file:///tmp/foo/bar'
2352 'file:///tmp/foo/bar'
2353 >>> str(url('file:///c:/tmp/foo/bar'))
2353 >>> str(url('file:///c:/tmp/foo/bar'))
2354 'file:///c:/tmp/foo/bar'
2354 'file:///c:/tmp/foo/bar'
2355 >>> print url(r'bundle:foo\bar')
2355 >>> print url(r'bundle:foo\bar')
2356 bundle:foo\bar
2356 bundle:foo\bar
2357 >>> print url(r'file:///D:\data\hg')
2357 >>> print url(r'file:///D:\data\hg')
2358 file:///D:\data\hg
2358 file:///D:\data\hg
2359 """
2359 """
2360 if self._localpath:
2360 if self._localpath:
2361 s = self.path
2361 s = self.path
2362 if self.scheme == 'bundle':
2362 if self.scheme == 'bundle':
2363 s = 'bundle:' + s
2363 s = 'bundle:' + s
2364 if self.fragment:
2364 if self.fragment:
2365 s += '#' + self.fragment
2365 s += '#' + self.fragment
2366 return s
2366 return s
2367
2367
2368 s = self.scheme + ':'
2368 s = self.scheme + ':'
2369 if self.user or self.passwd or self.host:
2369 if self.user or self.passwd or self.host:
2370 s += '//'
2370 s += '//'
2371 elif self.scheme and (not self.path or self.path.startswith('/')
2371 elif self.scheme and (not self.path or self.path.startswith('/')
2372 or hasdriveletter(self.path)):
2372 or hasdriveletter(self.path)):
2373 s += '//'
2373 s += '//'
2374 if hasdriveletter(self.path):
2374 if hasdriveletter(self.path):
2375 s += '/'
2375 s += '/'
2376 if self.user:
2376 if self.user:
2377 s += urllib.quote(self.user, safe=self._safechars)
2377 s += urllib.quote(self.user, safe=self._safechars)
2378 if self.passwd:
2378 if self.passwd:
2379 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2379 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2380 if self.user or self.passwd:
2380 if self.user or self.passwd:
2381 s += '@'
2381 s += '@'
2382 if self.host:
2382 if self.host:
2383 if not (self.host.startswith('[') and self.host.endswith(']')):
2383 if not (self.host.startswith('[') and self.host.endswith(']')):
2384 s += urllib.quote(self.host)
2384 s += urllib.quote(self.host)
2385 else:
2385 else:
2386 s += self.host
2386 s += self.host
2387 if self.port:
2387 if self.port:
2388 s += ':' + urllib.quote(self.port)
2388 s += ':' + urllib.quote(self.port)
2389 if self.host:
2389 if self.host:
2390 s += '/'
2390 s += '/'
2391 if self.path:
2391 if self.path:
2392 # TODO: similar to the query string, we should not unescape the
2392 # TODO: similar to the query string, we should not unescape the
2393 # path when we store it, the path might contain '%2f' = '/',
2393 # path when we store it, the path might contain '%2f' = '/',
2394 # which we should *not* escape.
2394 # which we should *not* escape.
2395 s += urllib.quote(self.path, safe=self._safepchars)
2395 s += urllib.quote(self.path, safe=self._safepchars)
2396 if self.query:
2396 if self.query:
2397 # we store the query in escaped form.
2397 # we store the query in escaped form.
2398 s += '?' + self.query
2398 s += '?' + self.query
2399 if self.fragment is not None:
2399 if self.fragment is not None:
2400 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2400 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2401 return s
2401 return s
2402
2402
2403 def authinfo(self):
2403 def authinfo(self):
2404 user, passwd = self.user, self.passwd
2404 user, passwd = self.user, self.passwd
2405 try:
2405 try:
2406 self.user, self.passwd = None, None
2406 self.user, self.passwd = None, None
2407 s = str(self)
2407 s = str(self)
2408 finally:
2408 finally:
2409 self.user, self.passwd = user, passwd
2409 self.user, self.passwd = user, passwd
2410 if not self.user:
2410 if not self.user:
2411 return (s, None)
2411 return (s, None)
2412 # authinfo[1] is passed to urllib2 password manager, and its
2412 # authinfo[1] is passed to urllib2 password manager, and its
2413 # URIs must not contain credentials. The host is passed in the
2413 # URIs must not contain credentials. The host is passed in the
2414 # URIs list because Python < 2.4.3 uses only that to search for
2414 # URIs list because Python < 2.4.3 uses only that to search for
2415 # a password.
2415 # a password.
2416 return (s, (None, (s, self.host),
2416 return (s, (None, (s, self.host),
2417 self.user, self.passwd or ''))
2417 self.user, self.passwd or ''))
2418
2418
2419 def isabs(self):
2419 def isabs(self):
2420 if self.scheme and self.scheme != 'file':
2420 if self.scheme and self.scheme != 'file':
2421 return True # remote URL
2421 return True # remote URL
2422 if hasdriveletter(self.path):
2422 if hasdriveletter(self.path):
2423 return True # absolute for our purposes - can't be joined()
2423 return True # absolute for our purposes - can't be joined()
2424 if self.path.startswith(r'\\'):
2424 if self.path.startswith(r'\\'):
2425 return True # Windows UNC path
2425 return True # Windows UNC path
2426 if self.path.startswith('/'):
2426 if self.path.startswith('/'):
2427 return True # POSIX-style
2427 return True # POSIX-style
2428 return False
2428 return False
2429
2429
2430 def localpath(self):
2430 def localpath(self):
2431 if self.scheme == 'file' or self.scheme == 'bundle':
2431 if self.scheme == 'file' or self.scheme == 'bundle':
2432 path = self.path or '/'
2432 path = self.path or '/'
2433 # For Windows, we need to promote hosts containing drive
2433 # For Windows, we need to promote hosts containing drive
2434 # letters to paths with drive letters.
2434 # letters to paths with drive letters.
2435 if hasdriveletter(self._hostport):
2435 if hasdriveletter(self._hostport):
2436 path = self._hostport + '/' + self.path
2436 path = self._hostport + '/' + self.path
2437 elif (self.host is not None and self.path
2437 elif (self.host is not None and self.path
2438 and not hasdriveletter(path)):
2438 and not hasdriveletter(path)):
2439 path = '/' + path
2439 path = '/' + path
2440 return path
2440 return path
2441 return self._origpath
2441 return self._origpath
2442
2442
2443 def islocal(self):
2443 def islocal(self):
2444 '''whether localpath will return something that posixfile can open'''
2444 '''whether localpath will return something that posixfile can open'''
2445 return (not self.scheme or self.scheme == 'file'
2445 return (not self.scheme or self.scheme == 'file'
2446 or self.scheme == 'bundle')
2446 or self.scheme == 'bundle')
2447
2447
2448 def hasscheme(path):
2448 def hasscheme(path):
2449 return bool(url(path).scheme)
2449 return bool(url(path).scheme)
2450
2450
2451 def hasdriveletter(path):
2451 def hasdriveletter(path):
2452 return path and path[1:2] == ':' and path[0:1].isalpha()
2452 return path and path[1:2] == ':' and path[0:1].isalpha()
2453
2453
2454 def urllocalpath(path):
2454 def urllocalpath(path):
2455 return url(path, parsequery=False, parsefragment=False).localpath()
2455 return url(path, parsequery=False, parsefragment=False).localpath()
2456
2456
2457 def hidepassword(u):
2457 def hidepassword(u):
2458 '''hide user credential in a url string'''
2458 '''hide user credential in a url string'''
2459 u = url(u)
2459 u = url(u)
2460 if u.passwd:
2460 if u.passwd:
2461 u.passwd = '***'
2461 u.passwd = '***'
2462 return str(u)
2462 return str(u)
2463
2463
2464 def removeauth(u):
2464 def removeauth(u):
2465 '''remove all authentication information from a url string'''
2465 '''remove all authentication information from a url string'''
2466 u = url(u)
2466 u = url(u)
2467 u.user = u.passwd = None
2467 u.user = u.passwd = None
2468 return str(u)
2468 return str(u)
2469
2469
2470 def isatty(fp):
2470 def isatty(fp):
2471 try:
2471 try:
2472 return fp.isatty()
2472 return fp.isatty()
2473 except AttributeError:
2473 except AttributeError:
2474 return False
2474 return False
2475
2475
2476 timecount = unitcountfn(
2476 timecount = unitcountfn(
2477 (1, 1e3, _('%.0f s')),
2477 (1, 1e3, _('%.0f s')),
2478 (100, 1, _('%.1f s')),
2478 (100, 1, _('%.1f s')),
2479 (10, 1, _('%.2f s')),
2479 (10, 1, _('%.2f s')),
2480 (1, 1, _('%.3f s')),
2480 (1, 1, _('%.3f s')),
2481 (100, 0.001, _('%.1f ms')),
2481 (100, 0.001, _('%.1f ms')),
2482 (10, 0.001, _('%.2f ms')),
2482 (10, 0.001, _('%.2f ms')),
2483 (1, 0.001, _('%.3f ms')),
2483 (1, 0.001, _('%.3f ms')),
2484 (100, 0.000001, _('%.1f us')),
2484 (100, 0.000001, _('%.1f us')),
2485 (10, 0.000001, _('%.2f us')),
2485 (10, 0.000001, _('%.2f us')),
2486 (1, 0.000001, _('%.3f us')),
2486 (1, 0.000001, _('%.3f us')),
2487 (100, 0.000000001, _('%.1f ns')),
2487 (100, 0.000000001, _('%.1f ns')),
2488 (10, 0.000000001, _('%.2f ns')),
2488 (10, 0.000000001, _('%.2f ns')),
2489 (1, 0.000000001, _('%.3f ns')),
2489 (1, 0.000000001, _('%.3f ns')),
2490 )
2490 )
2491
2491
2492 _timenesting = [0]
2492 _timenesting = [0]
2493
2493
2494 def timed(func):
2494 def timed(func):
2495 '''Report the execution time of a function call to stderr.
2495 '''Report the execution time of a function call to stderr.
2496
2496
2497 During development, use as a decorator when you need to measure
2497 During development, use as a decorator when you need to measure
2498 the cost of a function, e.g. as follows:
2498 the cost of a function, e.g. as follows:
2499
2499
2500 @util.timed
2500 @util.timed
2501 def foo(a, b, c):
2501 def foo(a, b, c):
2502 pass
2502 pass
2503 '''
2503 '''
2504
2504
2505 def wrapper(*args, **kwargs):
2505 def wrapper(*args, **kwargs):
2506 start = time.time()
2506 start = time.time()
2507 indent = 2
2507 indent = 2
2508 _timenesting[0] += indent
2508 _timenesting[0] += indent
2509 try:
2509 try:
2510 return func(*args, **kwargs)
2510 return func(*args, **kwargs)
2511 finally:
2511 finally:
2512 elapsed = time.time() - start
2512 elapsed = time.time() - start
2513 _timenesting[0] -= indent
2513 _timenesting[0] -= indent
2514 sys.stderr.write('%s%s: %s\n' %
2514 sys.stderr.write('%s%s: %s\n' %
2515 (' ' * _timenesting[0], func.__name__,
2515 (' ' * _timenesting[0], func.__name__,
2516 timecount(elapsed)))
2516 timecount(elapsed)))
2517 return wrapper
2517 return wrapper
2518
2518
2519 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2519 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2520 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2520 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2521
2521
2522 def sizetoint(s):
2522 def sizetoint(s):
2523 '''Convert a space specifier to a byte count.
2523 '''Convert a space specifier to a byte count.
2524
2524
2525 >>> sizetoint('30')
2525 >>> sizetoint('30')
2526 30
2526 30
2527 >>> sizetoint('2.2kb')
2527 >>> sizetoint('2.2kb')
2528 2252
2528 2252
2529 >>> sizetoint('6M')
2529 >>> sizetoint('6M')
2530 6291456
2530 6291456
2531 '''
2531 '''
2532 t = s.strip().lower()
2532 t = s.strip().lower()
2533 try:
2533 try:
2534 for k, u in _sizeunits:
2534 for k, u in _sizeunits:
2535 if t.endswith(k):
2535 if t.endswith(k):
2536 return int(float(t[:-len(k)]) * u)
2536 return int(float(t[:-len(k)]) * u)
2537 return int(t)
2537 return int(t)
2538 except ValueError:
2538 except ValueError:
2539 raise error.ParseError(_("couldn't parse size: %s") % s)
2539 raise error.ParseError(_("couldn't parse size: %s") % s)
2540
2540
2541 class hooks(object):
2541 class hooks(object):
2542 '''A collection of hook functions that can be used to extend a
2542 '''A collection of hook functions that can be used to extend a
2543 function's behavior. Hooks are called in lexicographic order,
2543 function's behavior. Hooks are called in lexicographic order,
2544 based on the names of their sources.'''
2544 based on the names of their sources.'''
2545
2545
2546 def __init__(self):
2546 def __init__(self):
2547 self._hooks = []
2547 self._hooks = []
2548
2548
2549 def add(self, source, hook):
2549 def add(self, source, hook):
2550 self._hooks.append((source, hook))
2550 self._hooks.append((source, hook))
2551
2551
2552 def __call__(self, *args):
2552 def __call__(self, *args):
2553 self._hooks.sort(key=lambda x: x[0])
2553 self._hooks.sort(key=lambda x: x[0])
2554 results = []
2554 results = []
2555 for source, hook in self._hooks:
2555 for source, hook in self._hooks:
2556 results.append(hook(*args))
2556 results.append(hook(*args))
2557 return results
2557 return results
2558
2558
2559 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2559 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2560 '''Yields lines for a nicely formatted stacktrace.
2560 '''Yields lines for a nicely formatted stacktrace.
2561 Skips the 'skip' last entries.
2561 Skips the 'skip' last entries.
2562 Each file+linenumber is formatted according to fileline.
2562 Each file+linenumber is formatted according to fileline.
2563 Each line is formatted according to line.
2563 Each line is formatted according to line.
2564 If line is None, it yields:
2564 If line is None, it yields:
2565 length of longest filepath+line number,
2565 length of longest filepath+line number,
2566 filepath+linenumber,
2566 filepath+linenumber,
2567 function
2567 function
2568
2568
2569 Not be used in production code but very convenient while developing.
2569 Not be used in production code but very convenient while developing.
2570 '''
2570 '''
2571 entries = [(fileline % (fn, ln), func)
2571 entries = [(fileline % (fn, ln), func)
2572 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2572 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2573 if entries:
2573 if entries:
2574 fnmax = max(len(entry[0]) for entry in entries)
2574 fnmax = max(len(entry[0]) for entry in entries)
2575 for fnln, func in entries:
2575 for fnln, func in entries:
2576 if line is None:
2576 if line is None:
2577 yield (fnmax, fnln, func)
2577 yield (fnmax, fnln, func)
2578 else:
2578 else:
2579 yield line % (fnmax, fnln, func)
2579 yield line % (fnmax, fnln, func)
2580
2580
2581 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2581 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2582 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2582 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2583 Skips the 'skip' last entries. By default it will flush stdout first.
2583 Skips the 'skip' last entries. By default it will flush stdout first.
2584 It can be used everywhere and intentionally does not require an ui object.
2584 It can be used everywhere and intentionally does not require an ui object.
2585 Not be used in production code but very convenient while developing.
2585 Not be used in production code but very convenient while developing.
2586 '''
2586 '''
2587 if otherf:
2587 if otherf:
2588 otherf.flush()
2588 otherf.flush()
2589 f.write('%s at:\n' % msg)
2589 f.write('%s at:\n' % msg)
2590 for line in getstackframes(skip + 1):
2590 for line in getstackframes(skip + 1):
2591 f.write(line)
2591 f.write(line)
2592 f.flush()
2592 f.flush()
2593
2593
2594 class dirs(object):
2594 class dirs(object):
2595 '''a multiset of directory names from a dirstate or manifest'''
2595 '''a multiset of directory names from a dirstate or manifest'''
2596
2596
2597 def __init__(self, map, skip=None):
2597 def __init__(self, map, skip=None):
2598 self._dirs = {}
2598 self._dirs = {}
2599 addpath = self.addpath
2599 addpath = self.addpath
2600 if safehasattr(map, 'iteritems') and skip is not None:
2600 if safehasattr(map, 'iteritems') and skip is not None:
2601 for f, s in map.iteritems():
2601 for f, s in map.iteritems():
2602 if s[0] != skip:
2602 if s[0] != skip:
2603 addpath(f)
2603 addpath(f)
2604 else:
2604 else:
2605 for f in map:
2605 for f in map:
2606 addpath(f)
2606 addpath(f)
2607
2607
2608 def addpath(self, path):
2608 def addpath(self, path):
2609 dirs = self._dirs
2609 dirs = self._dirs
2610 for base in finddirs(path):
2610 for base in finddirs(path):
2611 if base in dirs:
2611 if base in dirs:
2612 dirs[base] += 1
2612 dirs[base] += 1
2613 return
2613 return
2614 dirs[base] = 1
2614 dirs[base] = 1
2615
2615
2616 def delpath(self, path):
2616 def delpath(self, path):
2617 dirs = self._dirs
2617 dirs = self._dirs
2618 for base in finddirs(path):
2618 for base in finddirs(path):
2619 if dirs[base] > 1:
2619 if dirs[base] > 1:
2620 dirs[base] -= 1
2620 dirs[base] -= 1
2621 return
2621 return
2622 del dirs[base]
2622 del dirs[base]
2623
2623
2624 def __iter__(self):
2624 def __iter__(self):
2625 return self._dirs.iterkeys()
2625 return self._dirs.iterkeys()
2626
2626
2627 def __contains__(self, d):
2627 def __contains__(self, d):
2628 return d in self._dirs
2628 return d in self._dirs
2629
2629
2630 if safehasattr(parsers, 'dirs'):
2630 if safehasattr(parsers, 'dirs'):
2631 dirs = parsers.dirs
2631 dirs = parsers.dirs
2632
2632
2633 def finddirs(path):
2633 def finddirs(path):
2634 pos = path.rfind('/')
2634 pos = path.rfind('/')
2635 while pos != -1:
2635 while pos != -1:
2636 yield path[:pos]
2636 yield path[:pos]
2637 pos = path.rfind('/', 0, pos)
2637 pos = path.rfind('/', 0, pos)
2638
2638
2639 # compression utility
2639 # compression utility
2640
2640
2641 class nocompress(object):
2641 class nocompress(object):
2642 def compress(self, x):
2642 def compress(self, x):
2643 return x
2643 return x
2644 def flush(self):
2644 def flush(self):
2645 return ""
2645 return ""
2646
2646
2647 compressors = {
2647 compressors = {
2648 None: nocompress,
2648 None: nocompress,
2649 # lambda to prevent early import
2649 # lambda to prevent early import
2650 'BZ': lambda: bz2.BZ2Compressor(),
2650 'BZ': lambda: bz2.BZ2Compressor(),
2651 'GZ': lambda: zlib.compressobj(),
2651 'GZ': lambda: zlib.compressobj(),
2652 }
2652 }
2653 # also support the old form by courtesies
2653 # also support the old form by courtesies
2654 compressors['UN'] = compressors[None]
2654 compressors['UN'] = compressors[None]
2655
2655
2656 def _makedecompressor(decompcls):
2656 def _makedecompressor(decompcls):
2657 def generator(f):
2657 def generator(f):
2658 d = decompcls()
2658 d = decompcls()
2659 for chunk in filechunkiter(f):
2659 for chunk in filechunkiter(f):
2660 yield d.decompress(chunk)
2660 yield d.decompress(chunk)
2661 def func(fh):
2661 def func(fh):
2662 return chunkbuffer(generator(fh))
2662 return chunkbuffer(generator(fh))
2663 return func
2663 return func
2664
2664
2665 class ctxmanager(object):
2665 class ctxmanager(object):
2666 '''A context manager for use in 'with' blocks to allow multiple
2666 '''A context manager for use in 'with' blocks to allow multiple
2667 contexts to be entered at once. This is both safer and more
2667 contexts to be entered at once. This is both safer and more
2668 flexible than contextlib.nested.
2668 flexible than contextlib.nested.
2669
2669
2670 Once Mercurial supports Python 2.7+, this will become mostly
2670 Once Mercurial supports Python 2.7+, this will become mostly
2671 unnecessary.
2671 unnecessary.
2672 '''
2672 '''
2673
2673
2674 def __init__(self, *args):
2674 def __init__(self, *args):
2675 '''Accepts a list of no-argument functions that return context
2675 '''Accepts a list of no-argument functions that return context
2676 managers. These will be invoked at __call__ time.'''
2676 managers. These will be invoked at __call__ time.'''
2677 self._pending = args
2677 self._pending = args
2678 self._atexit = []
2678 self._atexit = []
2679
2679
2680 def __enter__(self):
2680 def __enter__(self):
2681 return self
2681 return self
2682
2682
2683 def enter(self):
2683 def enter(self):
2684 '''Create and enter context managers in the order in which they were
2684 '''Create and enter context managers in the order in which they were
2685 passed to the constructor.'''
2685 passed to the constructor.'''
2686 values = []
2686 values = []
2687 for func in self._pending:
2687 for func in self._pending:
2688 obj = func()
2688 obj = func()
2689 values.append(obj.__enter__())
2689 values.append(obj.__enter__())
2690 self._atexit.append(obj.__exit__)
2690 self._atexit.append(obj.__exit__)
2691 del self._pending
2691 del self._pending
2692 return values
2692 return values
2693
2693
2694 def atexit(self, func, *args, **kwargs):
2694 def atexit(self, func, *args, **kwargs):
2695 '''Add a function to call when this context manager exits. The
2695 '''Add a function to call when this context manager exits. The
2696 ordering of multiple atexit calls is unspecified, save that
2696 ordering of multiple atexit calls is unspecified, save that
2697 they will happen before any __exit__ functions.'''
2697 they will happen before any __exit__ functions.'''
2698 def wrapper(exc_type, exc_val, exc_tb):
2698 def wrapper(exc_type, exc_val, exc_tb):
2699 func(*args, **kwargs)
2699 func(*args, **kwargs)
2700 self._atexit.append(wrapper)
2700 self._atexit.append(wrapper)
2701 return func
2701 return func
2702
2702
2703 def __exit__(self, exc_type, exc_val, exc_tb):
2703 def __exit__(self, exc_type, exc_val, exc_tb):
2704 '''Context managers are exited in the reverse order from which
2704 '''Context managers are exited in the reverse order from which
2705 they were created.'''
2705 they were created.'''
2706 received = exc_type is not None
2706 received = exc_type is not None
2707 suppressed = False
2707 suppressed = False
2708 pending = None
2708 pending = None
2709 self._atexit.reverse()
2709 self._atexit.reverse()
2710 for exitfunc in self._atexit:
2710 for exitfunc in self._atexit:
2711 try:
2711 try:
2712 if exitfunc(exc_type, exc_val, exc_tb):
2712 if exitfunc(exc_type, exc_val, exc_tb):
2713 suppressed = True
2713 suppressed = True
2714 exc_type = None
2714 exc_type = None
2715 exc_val = None
2715 exc_val = None
2716 exc_tb = None
2716 exc_tb = None
2717 except BaseException:
2717 except BaseException:
2718 pending = sys.exc_info()
2718 pending = sys.exc_info()
2719 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2719 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2720 del self._atexit
2720 del self._atexit
2721 if pending:
2721 if pending:
2722 raise exc_val
2722 raise exc_val
2723 return received and suppressed
2723 return received and suppressed
2724
2724
2725 def _bz2():
2725 def _bz2():
2726 d = bz2.BZ2Decompressor()
2726 d = bz2.BZ2Decompressor()
2727 # Bzip2 stream start with BZ, but we stripped it.
2727 # Bzip2 stream start with BZ, but we stripped it.
2728 # we put it back for good measure.
2728 # we put it back for good measure.
2729 d.decompress('BZ')
2729 d.decompress('BZ')
2730 return d
2730 return d
2731
2731
2732 decompressors = {None: lambda fh: fh,
2732 decompressors = {None: lambda fh: fh,
2733 '_truncatedBZ': _makedecompressor(_bz2),
2733 '_truncatedBZ': _makedecompressor(_bz2),
2734 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2734 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2735 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2735 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2736 }
2736 }
2737 # also support the old form by courtesies
2737 # also support the old form by courtesies
2738 decompressors['UN'] = decompressors[None]
2738 decompressors['UN'] = decompressors[None]
2739
2739
2740 # convenient shortcut
2740 # convenient shortcut
2741 dst = debugstacktrace
2741 dst = debugstacktrace
@@ -1,692 +1,692
1 commit date test
1 commit date test
2
2
3 $ hg init test
3 $ hg init test
4 $ cd test
4 $ cd test
5 $ echo foo > foo
5 $ echo foo > foo
6 $ hg add foo
6 $ hg add foo
7 $ cat > $TESTTMP/checkeditform.sh <<EOF
7 $ cat > $TESTTMP/checkeditform.sh <<EOF
8 > env | grep HGEDITFORM
8 > env | grep HGEDITFORM
9 > true
9 > true
10 > EOF
10 > EOF
11 $ HGEDITOR="sh $TESTTMP/checkeditform.sh" hg commit -m ""
11 $ HGEDITOR="sh $TESTTMP/checkeditform.sh" hg commit -m ""
12 HGEDITFORM=commit.normal.normal
12 HGEDITFORM=commit.normal.normal
13 abort: empty commit message
13 abort: empty commit message
14 [255]
14 [255]
15 $ hg commit -d '0 0' -m commit-1
15 $ hg commit -d '0 0' -m commit-1
16 $ echo foo >> foo
16 $ echo foo >> foo
17 $ hg commit -d '1 4444444' -m commit-3
17 $ hg commit -d '1 4444444' -m commit-3
18 abort: impossible time zone offset: 4444444
18 abort: impossible time zone offset: 4444444
19 [255]
19 [255]
20 $ hg commit -d '1 15.1' -m commit-4
20 $ hg commit -d '1 15.1' -m commit-4
21 abort: invalid date: '1\t15.1'
21 abort: invalid date: '1\t15.1'
22 [255]
22 [255]
23 $ hg commit -d 'foo bar' -m commit-5
23 $ hg commit -d 'foo bar' -m commit-5
24 abort: invalid date: 'foo bar'
24 abort: invalid date: 'foo bar'
25 [255]
25 [255]
26 $ hg commit -d ' 1 4444' -m commit-6
26 $ hg commit -d ' 1 4444' -m commit-6
27 $ hg commit -d '111111111111 0' -m commit-7
27 $ hg commit -d '111111111111 0' -m commit-7
28 abort: date exceeds 32 bits: 111111111111
28 abort: date exceeds 32 bits: 111111111111
29 [255]
29 [255]
30 $ hg commit -d '-111111111111 0' -m commit-7
30 $ hg commit -d '-111111111111 0' -m commit-7
31 abort: date exceeds 32 bits: -111111111111
31 abort: date exceeds 32 bits: -111111111111
32 [255]
32 [255]
33 $ echo foo >> foo
33 $ echo foo >> foo
34 $ hg commit -d '1901-12-13 20:45:53 +0000' -m commit-7-2
34 $ hg commit -d '1901-12-13 20:45:52 +0000' -m commit-7-2
35 $ echo foo >> foo
35 $ echo foo >> foo
36 $ hg commit -d '-2147483647 0' -m commit-7-3
36 $ hg commit -d '-2147483648 0' -m commit-7-3
37 $ hg log -T '{rev} {date|isodatesec}\n' -l2
37 $ hg log -T '{rev} {date|isodatesec}\n' -l2
38 3 1901-12-13 20:45:53 +0000
38 3 1901-12-13 20:45:52 +0000
39 2 1901-12-13 20:45:53 +0000
39 2 1901-12-13 20:45:52 +0000
40 $ hg commit -d '1901-12-13 20:45:52 +0000' -m commit-7
40 $ hg commit -d '1901-12-13 20:45:51 +0000' -m commit-7
41 abort: date exceeds 32 bits: -2147483648
41 abort: date exceeds 32 bits: -2147483649
42 [255]
42 [255]
43 $ hg commit -d '-2147483648 0' -m commit-7
43 $ hg commit -d '-2147483649 0' -m commit-7
44 abort: date exceeds 32 bits: -2147483648
44 abort: date exceeds 32 bits: -2147483649
45 [255]
45 [255]
46
46
47 commit added file that has been deleted
47 commit added file that has been deleted
48
48
49 $ echo bar > bar
49 $ echo bar > bar
50 $ hg add bar
50 $ hg add bar
51 $ rm bar
51 $ rm bar
52 $ hg commit -m commit-8
52 $ hg commit -m commit-8
53 nothing changed (1 missing files, see 'hg status')
53 nothing changed (1 missing files, see 'hg status')
54 [1]
54 [1]
55 $ hg commit -m commit-8-2 bar
55 $ hg commit -m commit-8-2 bar
56 abort: bar: file not found!
56 abort: bar: file not found!
57 [255]
57 [255]
58
58
59 $ hg -q revert -a --no-backup
59 $ hg -q revert -a --no-backup
60
60
61 $ mkdir dir
61 $ mkdir dir
62 $ echo boo > dir/file
62 $ echo boo > dir/file
63 $ hg add
63 $ hg add
64 adding dir/file (glob)
64 adding dir/file (glob)
65 $ hg -v commit -m commit-9 dir
65 $ hg -v commit -m commit-9 dir
66 committing files:
66 committing files:
67 dir/file
67 dir/file
68 committing manifest
68 committing manifest
69 committing changelog
69 committing changelog
70 committed changeset 4:76aab26859d7
70 committed changeset 4:1957363f1ced
71
71
72 $ echo > dir.file
72 $ echo > dir.file
73 $ hg add
73 $ hg add
74 adding dir.file
74 adding dir.file
75 $ hg commit -m commit-10 dir dir.file
75 $ hg commit -m commit-10 dir dir.file
76 abort: dir: no match under directory!
76 abort: dir: no match under directory!
77 [255]
77 [255]
78
78
79 $ echo >> dir/file
79 $ echo >> dir/file
80 $ mkdir bleh
80 $ mkdir bleh
81 $ mkdir dir2
81 $ mkdir dir2
82 $ cd bleh
82 $ cd bleh
83 $ hg commit -m commit-11 .
83 $ hg commit -m commit-11 .
84 abort: bleh: no match under directory!
84 abort: bleh: no match under directory!
85 [255]
85 [255]
86 $ hg commit -m commit-12 ../dir ../dir2
86 $ hg commit -m commit-12 ../dir ../dir2
87 abort: dir2: no match under directory!
87 abort: dir2: no match under directory!
88 [255]
88 [255]
89 $ hg -v commit -m commit-13 ../dir
89 $ hg -v commit -m commit-13 ../dir
90 committing files:
90 committing files:
91 dir/file
91 dir/file
92 committing manifest
92 committing manifest
93 committing changelog
93 committing changelog
94 committed changeset 5:9a50557f1baf
94 committed changeset 5:a31d8f87544a
95 $ cd ..
95 $ cd ..
96
96
97 $ hg commit -m commit-14 does-not-exist
97 $ hg commit -m commit-14 does-not-exist
98 abort: does-not-exist: * (glob)
98 abort: does-not-exist: * (glob)
99 [255]
99 [255]
100
100
101 #if symlink
101 #if symlink
102 $ ln -s foo baz
102 $ ln -s foo baz
103 $ hg commit -m commit-15 baz
103 $ hg commit -m commit-15 baz
104 abort: baz: file not tracked!
104 abort: baz: file not tracked!
105 [255]
105 [255]
106 #endif
106 #endif
107
107
108 $ touch quux
108 $ touch quux
109 $ hg commit -m commit-16 quux
109 $ hg commit -m commit-16 quux
110 abort: quux: file not tracked!
110 abort: quux: file not tracked!
111 [255]
111 [255]
112 $ echo >> dir/file
112 $ echo >> dir/file
113 $ hg -v commit -m commit-17 dir/file
113 $ hg -v commit -m commit-17 dir/file
114 committing files:
114 committing files:
115 dir/file
115 dir/file
116 committing manifest
116 committing manifest
117 committing changelog
117 committing changelog
118 committed changeset 6:4b4c75bf422d
118 committed changeset 6:32d054c9d085
119
119
120 An empty date was interpreted as epoch origin
120 An empty date was interpreted as epoch origin
121
121
122 $ echo foo >> foo
122 $ echo foo >> foo
123 $ hg commit -d '' -m commit-no-date
123 $ hg commit -d '' -m commit-no-date
124 $ hg tip --template '{date|isodate}\n' | grep '1970'
124 $ hg tip --template '{date|isodate}\n' | grep '1970'
125 [1]
125 [1]
126
126
127 Make sure we do not obscure unknown requires file entries (issue2649)
127 Make sure we do not obscure unknown requires file entries (issue2649)
128
128
129 $ echo foo >> foo
129 $ echo foo >> foo
130 $ echo fake >> .hg/requires
130 $ echo fake >> .hg/requires
131 $ hg commit -m bla
131 $ hg commit -m bla
132 abort: repository requires features unknown to this Mercurial: fake!
132 abort: repository requires features unknown to this Mercurial: fake!
133 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
133 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
134 [255]
134 [255]
135
135
136 $ cd ..
136 $ cd ..
137
137
138
138
139 partial subdir commit test
139 partial subdir commit test
140
140
141 $ hg init test2
141 $ hg init test2
142 $ cd test2
142 $ cd test2
143 $ mkdir foo
143 $ mkdir foo
144 $ echo foo > foo/foo
144 $ echo foo > foo/foo
145 $ mkdir bar
145 $ mkdir bar
146 $ echo bar > bar/bar
146 $ echo bar > bar/bar
147 $ hg add
147 $ hg add
148 adding bar/bar (glob)
148 adding bar/bar (glob)
149 adding foo/foo (glob)
149 adding foo/foo (glob)
150 $ HGEDITOR=cat hg ci -e -m commit-subdir-1 foo
150 $ HGEDITOR=cat hg ci -e -m commit-subdir-1 foo
151 commit-subdir-1
151 commit-subdir-1
152
152
153
153
154 HG: Enter commit message. Lines beginning with 'HG:' are removed.
154 HG: Enter commit message. Lines beginning with 'HG:' are removed.
155 HG: Leave message empty to abort commit.
155 HG: Leave message empty to abort commit.
156 HG: --
156 HG: --
157 HG: user: test
157 HG: user: test
158 HG: branch 'default'
158 HG: branch 'default'
159 HG: added foo/foo
159 HG: added foo/foo
160
160
161
161
162 $ hg ci -m commit-subdir-2 bar
162 $ hg ci -m commit-subdir-2 bar
163
163
164 subdir log 1
164 subdir log 1
165
165
166 $ hg log -v foo
166 $ hg log -v foo
167 changeset: 0:f97e73a25882
167 changeset: 0:f97e73a25882
168 user: test
168 user: test
169 date: Thu Jan 01 00:00:00 1970 +0000
169 date: Thu Jan 01 00:00:00 1970 +0000
170 files: foo/foo
170 files: foo/foo
171 description:
171 description:
172 commit-subdir-1
172 commit-subdir-1
173
173
174
174
175
175
176 subdir log 2
176 subdir log 2
177
177
178 $ hg log -v bar
178 $ hg log -v bar
179 changeset: 1:aa809156d50d
179 changeset: 1:aa809156d50d
180 tag: tip
180 tag: tip
181 user: test
181 user: test
182 date: Thu Jan 01 00:00:00 1970 +0000
182 date: Thu Jan 01 00:00:00 1970 +0000
183 files: bar/bar
183 files: bar/bar
184 description:
184 description:
185 commit-subdir-2
185 commit-subdir-2
186
186
187
187
188
188
189 full log
189 full log
190
190
191 $ hg log -v
191 $ hg log -v
192 changeset: 1:aa809156d50d
192 changeset: 1:aa809156d50d
193 tag: tip
193 tag: tip
194 user: test
194 user: test
195 date: Thu Jan 01 00:00:00 1970 +0000
195 date: Thu Jan 01 00:00:00 1970 +0000
196 files: bar/bar
196 files: bar/bar
197 description:
197 description:
198 commit-subdir-2
198 commit-subdir-2
199
199
200
200
201 changeset: 0:f97e73a25882
201 changeset: 0:f97e73a25882
202 user: test
202 user: test
203 date: Thu Jan 01 00:00:00 1970 +0000
203 date: Thu Jan 01 00:00:00 1970 +0000
204 files: foo/foo
204 files: foo/foo
205 description:
205 description:
206 commit-subdir-1
206 commit-subdir-1
207
207
208
208
209 $ cd ..
209 $ cd ..
210
210
211
211
212 dot and subdir commit test
212 dot and subdir commit test
213
213
214 $ hg init test3
214 $ hg init test3
215 $ echo commit-foo-subdir > commit-log-test
215 $ echo commit-foo-subdir > commit-log-test
216 $ cd test3
216 $ cd test3
217 $ mkdir foo
217 $ mkdir foo
218 $ echo foo content > foo/plain-file
218 $ echo foo content > foo/plain-file
219 $ hg add foo/plain-file
219 $ hg add foo/plain-file
220 $ HGEDITOR=cat hg ci --edit -l ../commit-log-test foo
220 $ HGEDITOR=cat hg ci --edit -l ../commit-log-test foo
221 commit-foo-subdir
221 commit-foo-subdir
222
222
223
223
224 HG: Enter commit message. Lines beginning with 'HG:' are removed.
224 HG: Enter commit message. Lines beginning with 'HG:' are removed.
225 HG: Leave message empty to abort commit.
225 HG: Leave message empty to abort commit.
226 HG: --
226 HG: --
227 HG: user: test
227 HG: user: test
228 HG: branch 'default'
228 HG: branch 'default'
229 HG: added foo/plain-file
229 HG: added foo/plain-file
230
230
231
231
232 $ echo modified foo content > foo/plain-file
232 $ echo modified foo content > foo/plain-file
233 $ hg ci -m commit-foo-dot .
233 $ hg ci -m commit-foo-dot .
234
234
235 full log
235 full log
236
236
237 $ hg log -v
237 $ hg log -v
238 changeset: 1:95b38e3a5b2e
238 changeset: 1:95b38e3a5b2e
239 tag: tip
239 tag: tip
240 user: test
240 user: test
241 date: Thu Jan 01 00:00:00 1970 +0000
241 date: Thu Jan 01 00:00:00 1970 +0000
242 files: foo/plain-file
242 files: foo/plain-file
243 description:
243 description:
244 commit-foo-dot
244 commit-foo-dot
245
245
246
246
247 changeset: 0:65d4e9386227
247 changeset: 0:65d4e9386227
248 user: test
248 user: test
249 date: Thu Jan 01 00:00:00 1970 +0000
249 date: Thu Jan 01 00:00:00 1970 +0000
250 files: foo/plain-file
250 files: foo/plain-file
251 description:
251 description:
252 commit-foo-subdir
252 commit-foo-subdir
253
253
254
254
255
255
256 subdir log
256 subdir log
257
257
258 $ cd foo
258 $ cd foo
259 $ hg log .
259 $ hg log .
260 changeset: 1:95b38e3a5b2e
260 changeset: 1:95b38e3a5b2e
261 tag: tip
261 tag: tip
262 user: test
262 user: test
263 date: Thu Jan 01 00:00:00 1970 +0000
263 date: Thu Jan 01 00:00:00 1970 +0000
264 summary: commit-foo-dot
264 summary: commit-foo-dot
265
265
266 changeset: 0:65d4e9386227
266 changeset: 0:65d4e9386227
267 user: test
267 user: test
268 date: Thu Jan 01 00:00:00 1970 +0000
268 date: Thu Jan 01 00:00:00 1970 +0000
269 summary: commit-foo-subdir
269 summary: commit-foo-subdir
270
270
271 $ cd ..
271 $ cd ..
272 $ cd ..
272 $ cd ..
273
273
274 Issue1049: Hg permits partial commit of merge without warning
274 Issue1049: Hg permits partial commit of merge without warning
275
275
276 $ hg init issue1049
276 $ hg init issue1049
277 $ cd issue1049
277 $ cd issue1049
278 $ echo a > a
278 $ echo a > a
279 $ hg ci -Ama
279 $ hg ci -Ama
280 adding a
280 adding a
281 $ echo a >> a
281 $ echo a >> a
282 $ hg ci -mb
282 $ hg ci -mb
283 $ hg up 0
283 $ hg up 0
284 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
284 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
285 $ echo b >> a
285 $ echo b >> a
286 $ hg ci -mc
286 $ hg ci -mc
287 created new head
287 created new head
288 $ HGMERGE=true hg merge
288 $ HGMERGE=true hg merge
289 merging a
289 merging a
290 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
290 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
291 (branch merge, don't forget to commit)
291 (branch merge, don't forget to commit)
292
292
293 should fail because we are specifying a file name
293 should fail because we are specifying a file name
294
294
295 $ hg ci -mmerge a
295 $ hg ci -mmerge a
296 abort: cannot partially commit a merge (do not specify files or patterns)
296 abort: cannot partially commit a merge (do not specify files or patterns)
297 [255]
297 [255]
298
298
299 should fail because we are specifying a pattern
299 should fail because we are specifying a pattern
300
300
301 $ hg ci -mmerge -I a
301 $ hg ci -mmerge -I a
302 abort: cannot partially commit a merge (do not specify files or patterns)
302 abort: cannot partially commit a merge (do not specify files or patterns)
303 [255]
303 [255]
304
304
305 should succeed
305 should succeed
306
306
307 $ HGEDITOR="sh $TESTTMP/checkeditform.sh" hg ci -mmerge --edit
307 $ HGEDITOR="sh $TESTTMP/checkeditform.sh" hg ci -mmerge --edit
308 HGEDITFORM=commit.normal.merge
308 HGEDITFORM=commit.normal.merge
309 $ cd ..
309 $ cd ..
310
310
311
311
312 test commit message content
312 test commit message content
313
313
314 $ hg init commitmsg
314 $ hg init commitmsg
315 $ cd commitmsg
315 $ cd commitmsg
316 $ echo changed > changed
316 $ echo changed > changed
317 $ echo removed > removed
317 $ echo removed > removed
318 $ hg book activebookmark
318 $ hg book activebookmark
319 $ hg ci -qAm init
319 $ hg ci -qAm init
320
320
321 $ hg rm removed
321 $ hg rm removed
322 $ echo changed >> changed
322 $ echo changed >> changed
323 $ echo added > added
323 $ echo added > added
324 $ hg add added
324 $ hg add added
325 $ HGEDITOR=cat hg ci -A
325 $ HGEDITOR=cat hg ci -A
326
326
327
327
328 HG: Enter commit message. Lines beginning with 'HG:' are removed.
328 HG: Enter commit message. Lines beginning with 'HG:' are removed.
329 HG: Leave message empty to abort commit.
329 HG: Leave message empty to abort commit.
330 HG: --
330 HG: --
331 HG: user: test
331 HG: user: test
332 HG: branch 'default'
332 HG: branch 'default'
333 HG: bookmark 'activebookmark'
333 HG: bookmark 'activebookmark'
334 HG: added added
334 HG: added added
335 HG: changed changed
335 HG: changed changed
336 HG: removed removed
336 HG: removed removed
337 abort: empty commit message
337 abort: empty commit message
338 [255]
338 [255]
339
339
340 test saving last-message.txt
340 test saving last-message.txt
341
341
342 $ hg init sub
342 $ hg init sub
343 $ echo a > sub/a
343 $ echo a > sub/a
344 $ hg -R sub add sub/a
344 $ hg -R sub add sub/a
345 $ cat > sub/.hg/hgrc <<EOF
345 $ cat > sub/.hg/hgrc <<EOF
346 > [hooks]
346 > [hooks]
347 > precommit.test-saving-last-message = false
347 > precommit.test-saving-last-message = false
348 > EOF
348 > EOF
349
349
350 $ echo 'sub = sub' > .hgsub
350 $ echo 'sub = sub' > .hgsub
351 $ hg add .hgsub
351 $ hg add .hgsub
352
352
353 $ cat > $TESTTMP/editor.sh <<EOF
353 $ cat > $TESTTMP/editor.sh <<EOF
354 > echo "==== before editing:"
354 > echo "==== before editing:"
355 > cat \$1
355 > cat \$1
356 > echo "===="
356 > echo "===="
357 > echo "test saving last-message.txt" >> \$1
357 > echo "test saving last-message.txt" >> \$1
358 > EOF
358 > EOF
359
359
360 $ rm -f .hg/last-message.txt
360 $ rm -f .hg/last-message.txt
361 $ HGEDITOR="sh $TESTTMP/editor.sh" hg commit -S -q
361 $ HGEDITOR="sh $TESTTMP/editor.sh" hg commit -S -q
362 ==== before editing:
362 ==== before editing:
363
363
364
364
365 HG: Enter commit message. Lines beginning with 'HG:' are removed.
365 HG: Enter commit message. Lines beginning with 'HG:' are removed.
366 HG: Leave message empty to abort commit.
366 HG: Leave message empty to abort commit.
367 HG: --
367 HG: --
368 HG: user: test
368 HG: user: test
369 HG: branch 'default'
369 HG: branch 'default'
370 HG: bookmark 'activebookmark'
370 HG: bookmark 'activebookmark'
371 HG: subrepo sub
371 HG: subrepo sub
372 HG: added .hgsub
372 HG: added .hgsub
373 HG: added added
373 HG: added added
374 HG: changed .hgsubstate
374 HG: changed .hgsubstate
375 HG: changed changed
375 HG: changed changed
376 HG: removed removed
376 HG: removed removed
377 ====
377 ====
378 abort: precommit.test-saving-last-message hook exited with status 1 (in subrepo sub)
378 abort: precommit.test-saving-last-message hook exited with status 1 (in subrepo sub)
379 [255]
379 [255]
380 $ cat .hg/last-message.txt
380 $ cat .hg/last-message.txt
381
381
382
382
383 test saving last-message.txt
383 test saving last-message.txt
384
384
385 test that '[committemplate] changeset' definition and commit log
385 test that '[committemplate] changeset' definition and commit log
386 specific template keywords work well
386 specific template keywords work well
387
387
388 $ cat >> .hg/hgrc <<EOF
388 $ cat >> .hg/hgrc <<EOF
389 > [committemplate]
389 > [committemplate]
390 > changeset.commit.normal = HG: this is "commit.normal" template
390 > changeset.commit.normal = HG: this is "commit.normal" template
391 > HG: {extramsg}
391 > HG: {extramsg}
392 > {if(activebookmark,
392 > {if(activebookmark,
393 > "HG: bookmark '{activebookmark}' is activated\n",
393 > "HG: bookmark '{activebookmark}' is activated\n",
394 > "HG: no bookmark is activated\n")}{subrepos %
394 > "HG: no bookmark is activated\n")}{subrepos %
395 > "HG: subrepo '{subrepo}' is changed\n"}
395 > "HG: subrepo '{subrepo}' is changed\n"}
396 >
396 >
397 > changeset.commit = HG: this is "commit" template
397 > changeset.commit = HG: this is "commit" template
398 > HG: {extramsg}
398 > HG: {extramsg}
399 > {if(activebookmark,
399 > {if(activebookmark,
400 > "HG: bookmark '{activebookmark}' is activated\n",
400 > "HG: bookmark '{activebookmark}' is activated\n",
401 > "HG: no bookmark is activated\n")}{subrepos %
401 > "HG: no bookmark is activated\n")}{subrepos %
402 > "HG: subrepo '{subrepo}' is changed\n"}
402 > "HG: subrepo '{subrepo}' is changed\n"}
403 >
403 >
404 > changeset = HG: this is customized commit template
404 > changeset = HG: this is customized commit template
405 > HG: {extramsg}
405 > HG: {extramsg}
406 > {if(activebookmark,
406 > {if(activebookmark,
407 > "HG: bookmark '{activebookmark}' is activated\n",
407 > "HG: bookmark '{activebookmark}' is activated\n",
408 > "HG: no bookmark is activated\n")}{subrepos %
408 > "HG: no bookmark is activated\n")}{subrepos %
409 > "HG: subrepo '{subrepo}' is changed\n"}
409 > "HG: subrepo '{subrepo}' is changed\n"}
410 > EOF
410 > EOF
411
411
412 $ hg init sub2
412 $ hg init sub2
413 $ echo a > sub2/a
413 $ echo a > sub2/a
414 $ hg -R sub2 add sub2/a
414 $ hg -R sub2 add sub2/a
415 $ echo 'sub2 = sub2' >> .hgsub
415 $ echo 'sub2 = sub2' >> .hgsub
416
416
417 $ HGEDITOR=cat hg commit -S -q
417 $ HGEDITOR=cat hg commit -S -q
418 HG: this is "commit.normal" template
418 HG: this is "commit.normal" template
419 HG: Leave message empty to abort commit.
419 HG: Leave message empty to abort commit.
420 HG: bookmark 'activebookmark' is activated
420 HG: bookmark 'activebookmark' is activated
421 HG: subrepo 'sub' is changed
421 HG: subrepo 'sub' is changed
422 HG: subrepo 'sub2' is changed
422 HG: subrepo 'sub2' is changed
423 abort: empty commit message
423 abort: empty commit message
424 [255]
424 [255]
425
425
426 $ cat >> .hg/hgrc <<EOF
426 $ cat >> .hg/hgrc <<EOF
427 > [committemplate]
427 > [committemplate]
428 > changeset.commit.normal =
428 > changeset.commit.normal =
429 > # now, "changeset.commit" should be chosen for "hg commit"
429 > # now, "changeset.commit" should be chosen for "hg commit"
430 > EOF
430 > EOF
431
431
432 $ hg bookmark --inactive activebookmark
432 $ hg bookmark --inactive activebookmark
433 $ hg forget .hgsub
433 $ hg forget .hgsub
434 $ HGEDITOR=cat hg commit -q
434 $ HGEDITOR=cat hg commit -q
435 HG: this is "commit" template
435 HG: this is "commit" template
436 HG: Leave message empty to abort commit.
436 HG: Leave message empty to abort commit.
437 HG: no bookmark is activated
437 HG: no bookmark is activated
438 abort: empty commit message
438 abort: empty commit message
439 [255]
439 [255]
440
440
441 $ cat >> .hg/hgrc <<EOF
441 $ cat >> .hg/hgrc <<EOF
442 > [committemplate]
442 > [committemplate]
443 > changeset.commit =
443 > changeset.commit =
444 > # now, "changeset" should be chosen for "hg commit"
444 > # now, "changeset" should be chosen for "hg commit"
445 > EOF
445 > EOF
446
446
447 $ HGEDITOR=cat hg commit -q
447 $ HGEDITOR=cat hg commit -q
448 HG: this is customized commit template
448 HG: this is customized commit template
449 HG: Leave message empty to abort commit.
449 HG: Leave message empty to abort commit.
450 HG: no bookmark is activated
450 HG: no bookmark is activated
451 abort: empty commit message
451 abort: empty commit message
452 [255]
452 [255]
453
453
454 $ cat >> .hg/hgrc <<EOF
454 $ cat >> .hg/hgrc <<EOF
455 > [committemplate]
455 > [committemplate]
456 > changeset = {desc}
456 > changeset = {desc}
457 > HG: mods={file_mods}
457 > HG: mods={file_mods}
458 > HG: adds={file_adds}
458 > HG: adds={file_adds}
459 > HG: dels={file_dels}
459 > HG: dels={file_dels}
460 > HG: files={files}
460 > HG: files={files}
461 > HG:
461 > HG:
462 > {splitlines(diff()) % 'HG: {line}\n'
462 > {splitlines(diff()) % 'HG: {line}\n'
463 > }HG:
463 > }HG:
464 > HG: mods={file_mods}
464 > HG: mods={file_mods}
465 > HG: adds={file_adds}
465 > HG: adds={file_adds}
466 > HG: dels={file_dels}
466 > HG: dels={file_dels}
467 > HG: files={files}\n
467 > HG: files={files}\n
468 > EOF
468 > EOF
469 $ hg status -amr
469 $ hg status -amr
470 M changed
470 M changed
471 A added
471 A added
472 R removed
472 R removed
473 $ HGEDITOR=cat hg commit -q -e -m "foo bar" changed
473 $ HGEDITOR=cat hg commit -q -e -m "foo bar" changed
474 foo bar
474 foo bar
475 HG: mods=changed
475 HG: mods=changed
476 HG: adds=
476 HG: adds=
477 HG: dels=
477 HG: dels=
478 HG: files=changed
478 HG: files=changed
479 HG:
479 HG:
480 HG: --- a/changed Thu Jan 01 00:00:00 1970 +0000
480 HG: --- a/changed Thu Jan 01 00:00:00 1970 +0000
481 HG: +++ b/changed Thu Jan 01 00:00:00 1970 +0000
481 HG: +++ b/changed Thu Jan 01 00:00:00 1970 +0000
482 HG: @@ -1,1 +1,2 @@
482 HG: @@ -1,1 +1,2 @@
483 HG: changed
483 HG: changed
484 HG: +changed
484 HG: +changed
485 HG:
485 HG:
486 HG: mods=changed
486 HG: mods=changed
487 HG: adds=
487 HG: adds=
488 HG: dels=
488 HG: dels=
489 HG: files=changed
489 HG: files=changed
490 $ hg status -amr
490 $ hg status -amr
491 A added
491 A added
492 R removed
492 R removed
493 $ hg parents --template "M {file_mods}\nA {file_adds}\nR {file_dels}\n"
493 $ hg parents --template "M {file_mods}\nA {file_adds}\nR {file_dels}\n"
494 M changed
494 M changed
495 A
495 A
496 R
496 R
497 $ hg rollback -q
497 $ hg rollback -q
498
498
499 $ cat >> .hg/hgrc <<EOF
499 $ cat >> .hg/hgrc <<EOF
500 > [committemplate]
500 > [committemplate]
501 > changeset = {desc}
501 > changeset = {desc}
502 > HG: mods={file_mods}
502 > HG: mods={file_mods}
503 > HG: adds={file_adds}
503 > HG: adds={file_adds}
504 > HG: dels={file_dels}
504 > HG: dels={file_dels}
505 > HG: files={files}
505 > HG: files={files}
506 > HG:
506 > HG:
507 > {splitlines(diff("changed")) % 'HG: {line}\n'
507 > {splitlines(diff("changed")) % 'HG: {line}\n'
508 > }HG:
508 > }HG:
509 > HG: mods={file_mods}
509 > HG: mods={file_mods}
510 > HG: adds={file_adds}
510 > HG: adds={file_adds}
511 > HG: dels={file_dels}
511 > HG: dels={file_dels}
512 > HG: files={files}
512 > HG: files={files}
513 > HG:
513 > HG:
514 > {splitlines(diff("added")) % 'HG: {line}\n'
514 > {splitlines(diff("added")) % 'HG: {line}\n'
515 > }HG:
515 > }HG:
516 > HG: mods={file_mods}
516 > HG: mods={file_mods}
517 > HG: adds={file_adds}
517 > HG: adds={file_adds}
518 > HG: dels={file_dels}
518 > HG: dels={file_dels}
519 > HG: files={files}
519 > HG: files={files}
520 > HG:
520 > HG:
521 > {splitlines(diff("removed")) % 'HG: {line}\n'
521 > {splitlines(diff("removed")) % 'HG: {line}\n'
522 > }HG:
522 > }HG:
523 > HG: mods={file_mods}
523 > HG: mods={file_mods}
524 > HG: adds={file_adds}
524 > HG: adds={file_adds}
525 > HG: dels={file_dels}
525 > HG: dels={file_dels}
526 > HG: files={files}\n
526 > HG: files={files}\n
527 > EOF
527 > EOF
528 $ HGEDITOR=cat hg commit -q -e -m "foo bar" added removed
528 $ HGEDITOR=cat hg commit -q -e -m "foo bar" added removed
529 foo bar
529 foo bar
530 HG: mods=
530 HG: mods=
531 HG: adds=added
531 HG: adds=added
532 HG: dels=removed
532 HG: dels=removed
533 HG: files=added removed
533 HG: files=added removed
534 HG:
534 HG:
535 HG:
535 HG:
536 HG: mods=
536 HG: mods=
537 HG: adds=added
537 HG: adds=added
538 HG: dels=removed
538 HG: dels=removed
539 HG: files=added removed
539 HG: files=added removed
540 HG:
540 HG:
541 HG: --- /dev/null Thu Jan 01 00:00:00 1970 +0000
541 HG: --- /dev/null Thu Jan 01 00:00:00 1970 +0000
542 HG: +++ b/added Thu Jan 01 00:00:00 1970 +0000
542 HG: +++ b/added Thu Jan 01 00:00:00 1970 +0000
543 HG: @@ -0,0 +1,1 @@
543 HG: @@ -0,0 +1,1 @@
544 HG: +added
544 HG: +added
545 HG:
545 HG:
546 HG: mods=
546 HG: mods=
547 HG: adds=added
547 HG: adds=added
548 HG: dels=removed
548 HG: dels=removed
549 HG: files=added removed
549 HG: files=added removed
550 HG:
550 HG:
551 HG: --- a/removed Thu Jan 01 00:00:00 1970 +0000
551 HG: --- a/removed Thu Jan 01 00:00:00 1970 +0000
552 HG: +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
552 HG: +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
553 HG: @@ -1,1 +0,0 @@
553 HG: @@ -1,1 +0,0 @@
554 HG: -removed
554 HG: -removed
555 HG:
555 HG:
556 HG: mods=
556 HG: mods=
557 HG: adds=added
557 HG: adds=added
558 HG: dels=removed
558 HG: dels=removed
559 HG: files=added removed
559 HG: files=added removed
560 $ hg status -amr
560 $ hg status -amr
561 M changed
561 M changed
562 $ hg parents --template "M {file_mods}\nA {file_adds}\nR {file_dels}\n"
562 $ hg parents --template "M {file_mods}\nA {file_adds}\nR {file_dels}\n"
563 M
563 M
564 A added
564 A added
565 R removed
565 R removed
566 $ hg rollback -q
566 $ hg rollback -q
567
567
568 $ cat >> .hg/hgrc <<EOF
568 $ cat >> .hg/hgrc <<EOF
569 > # disable customizing for subsequent tests
569 > # disable customizing for subsequent tests
570 > [committemplate]
570 > [committemplate]
571 > changeset =
571 > changeset =
572 > EOF
572 > EOF
573
573
574 $ cd ..
574 $ cd ..
575
575
576
576
577 commit copy
577 commit copy
578
578
579 $ hg init dir2
579 $ hg init dir2
580 $ cd dir2
580 $ cd dir2
581 $ echo bleh > bar
581 $ echo bleh > bar
582 $ hg add bar
582 $ hg add bar
583 $ hg ci -m 'add bar'
583 $ hg ci -m 'add bar'
584
584
585 $ hg cp bar foo
585 $ hg cp bar foo
586 $ echo >> bar
586 $ echo >> bar
587 $ hg ci -m 'cp bar foo; change bar'
587 $ hg ci -m 'cp bar foo; change bar'
588
588
589 $ hg debugrename foo
589 $ hg debugrename foo
590 foo renamed from bar:26d3ca0dfd18e44d796b564e38dd173c9668d3a9
590 foo renamed from bar:26d3ca0dfd18e44d796b564e38dd173c9668d3a9
591 $ hg debugindex bar
591 $ hg debugindex bar
592 rev offset length ..... linkrev nodeid p1 p2 (re)
592 rev offset length ..... linkrev nodeid p1 p2 (re)
593 0 0 6 ..... 0 26d3ca0dfd18 000000000000 000000000000 (re)
593 0 0 6 ..... 0 26d3ca0dfd18 000000000000 000000000000 (re)
594 1 6 7 ..... 1 d267bddd54f7 26d3ca0dfd18 000000000000 (re)
594 1 6 7 ..... 1 d267bddd54f7 26d3ca0dfd18 000000000000 (re)
595
595
596 Test making empty commits
596 Test making empty commits
597 $ hg commit --config ui.allowemptycommit=True -m "empty commit"
597 $ hg commit --config ui.allowemptycommit=True -m "empty commit"
598 $ hg log -r . -v --stat
598 $ hg log -r . -v --stat
599 changeset: 2:d809f3644287
599 changeset: 2:d809f3644287
600 tag: tip
600 tag: tip
601 user: test
601 user: test
602 date: Thu Jan 01 00:00:00 1970 +0000
602 date: Thu Jan 01 00:00:00 1970 +0000
603 description:
603 description:
604 empty commit
604 empty commit
605
605
606
606
607
607
608 verify pathauditor blocks evil filepaths
608 verify pathauditor blocks evil filepaths
609 $ cat > evil-commit.py <<EOF
609 $ cat > evil-commit.py <<EOF
610 > from mercurial import ui, hg, context, node
610 > from mercurial import ui, hg, context, node
611 > notrc = u".h\u200cg".encode('utf-8') + '/hgrc'
611 > notrc = u".h\u200cg".encode('utf-8') + '/hgrc'
612 > u = ui.ui()
612 > u = ui.ui()
613 > r = hg.repository(u, '.')
613 > r = hg.repository(u, '.')
614 > def filectxfn(repo, memctx, path):
614 > def filectxfn(repo, memctx, path):
615 > return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned')
615 > return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned')
616 > c = context.memctx(r, [r['tip'].node(), node.nullid],
616 > c = context.memctx(r, [r['tip'].node(), node.nullid],
617 > 'evil', [notrc], filectxfn, 0)
617 > 'evil', [notrc], filectxfn, 0)
618 > r.commitctx(c)
618 > r.commitctx(c)
619 > EOF
619 > EOF
620 $ $PYTHON evil-commit.py
620 $ $PYTHON evil-commit.py
621 #if windows
621 #if windows
622 $ hg co --clean tip
622 $ hg co --clean tip
623 abort: path contains illegal component: .h\xe2\x80\x8cg\\hgrc (esc)
623 abort: path contains illegal component: .h\xe2\x80\x8cg\\hgrc (esc)
624 [255]
624 [255]
625 #else
625 #else
626 $ hg co --clean tip
626 $ hg co --clean tip
627 abort: path contains illegal component: .h\xe2\x80\x8cg/hgrc (esc)
627 abort: path contains illegal component: .h\xe2\x80\x8cg/hgrc (esc)
628 [255]
628 [255]
629 #endif
629 #endif
630
630
631 $ hg rollback -f
631 $ hg rollback -f
632 repository tip rolled back to revision 2 (undo commit)
632 repository tip rolled back to revision 2 (undo commit)
633 $ cat > evil-commit.py <<EOF
633 $ cat > evil-commit.py <<EOF
634 > from mercurial import ui, hg, context, node
634 > from mercurial import ui, hg, context, node
635 > notrc = "HG~1/hgrc"
635 > notrc = "HG~1/hgrc"
636 > u = ui.ui()
636 > u = ui.ui()
637 > r = hg.repository(u, '.')
637 > r = hg.repository(u, '.')
638 > def filectxfn(repo, memctx, path):
638 > def filectxfn(repo, memctx, path):
639 > return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned')
639 > return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned')
640 > c = context.memctx(r, [r['tip'].node(), node.nullid],
640 > c = context.memctx(r, [r['tip'].node(), node.nullid],
641 > 'evil', [notrc], filectxfn, 0)
641 > 'evil', [notrc], filectxfn, 0)
642 > r.commitctx(c)
642 > r.commitctx(c)
643 > EOF
643 > EOF
644 $ $PYTHON evil-commit.py
644 $ $PYTHON evil-commit.py
645 $ hg co --clean tip
645 $ hg co --clean tip
646 abort: path contains illegal component: HG~1/hgrc (glob)
646 abort: path contains illegal component: HG~1/hgrc (glob)
647 [255]
647 [255]
648
648
649 $ hg rollback -f
649 $ hg rollback -f
650 repository tip rolled back to revision 2 (undo commit)
650 repository tip rolled back to revision 2 (undo commit)
651 $ cat > evil-commit.py <<EOF
651 $ cat > evil-commit.py <<EOF
652 > from mercurial import ui, hg, context, node
652 > from mercurial import ui, hg, context, node
653 > notrc = "HG8B6C~2/hgrc"
653 > notrc = "HG8B6C~2/hgrc"
654 > u = ui.ui()
654 > u = ui.ui()
655 > r = hg.repository(u, '.')
655 > r = hg.repository(u, '.')
656 > def filectxfn(repo, memctx, path):
656 > def filectxfn(repo, memctx, path):
657 > return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned')
657 > return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned')
658 > c = context.memctx(r, [r['tip'].node(), node.nullid],
658 > c = context.memctx(r, [r['tip'].node(), node.nullid],
659 > 'evil', [notrc], filectxfn, 0)
659 > 'evil', [notrc], filectxfn, 0)
660 > r.commitctx(c)
660 > r.commitctx(c)
661 > EOF
661 > EOF
662 $ $PYTHON evil-commit.py
662 $ $PYTHON evil-commit.py
663 $ hg co --clean tip
663 $ hg co --clean tip
664 abort: path contains illegal component: HG8B6C~2/hgrc (glob)
664 abort: path contains illegal component: HG8B6C~2/hgrc (glob)
665 [255]
665 [255]
666
666
667 # test that an unmodified commit template message aborts
667 # test that an unmodified commit template message aborts
668
668
669 $ hg init unmodified_commit_template
669 $ hg init unmodified_commit_template
670 $ cd unmodified_commit_template
670 $ cd unmodified_commit_template
671 $ echo foo > foo
671 $ echo foo > foo
672 $ hg add foo
672 $ hg add foo
673 $ hg commit -m "foo"
673 $ hg commit -m "foo"
674 $ cat >> .hg/hgrc <<EOF
674 $ cat >> .hg/hgrc <<EOF
675 > [committemplate]
675 > [committemplate]
676 > changeset.commit = HI THIS IS NOT STRIPPED
676 > changeset.commit = HI THIS IS NOT STRIPPED
677 > HG: this is customized commit template
677 > HG: this is customized commit template
678 > HG: {extramsg}
678 > HG: {extramsg}
679 > {if(activebookmark,
679 > {if(activebookmark,
680 > "HG: bookmark '{activebookmark}' is activated\n",
680 > "HG: bookmark '{activebookmark}' is activated\n",
681 > "HG: no bookmark is activated\n")}{subrepos %
681 > "HG: no bookmark is activated\n")}{subrepos %
682 > "HG: subrepo '{subrepo}' is changed\n"}
682 > "HG: subrepo '{subrepo}' is changed\n"}
683 > EOF
683 > EOF
684 $ cat > $TESTTMP/notouching.sh <<EOF
684 $ cat > $TESTTMP/notouching.sh <<EOF
685 > true
685 > true
686 > EOF
686 > EOF
687 $ echo foo2 > foo2
687 $ echo foo2 > foo2
688 $ hg add foo2
688 $ hg add foo2
689 $ HGEDITOR="sh $TESTTMP/notouching.sh" hg commit
689 $ HGEDITOR="sh $TESTTMP/notouching.sh" hg commit
690 abort: commit message unchanged
690 abort: commit message unchanged
691 [255]
691 [255]
692 $ cd ..
692 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now