##// END OF EJS Templates
date: reallow negative timestamp, fix for Windows buggy gmtime() (issue2513)...
Florent Gallaire -
r28825:87c6ad22 default
parent child Browse files
Show More
@@ -1,2742 +1,2741
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import urllib
37 import urllib
38 import zlib
38 import zlib
39
39
40 from . import (
40 from . import (
41 encoding,
41 encoding,
42 error,
42 error,
43 i18n,
43 i18n,
44 osutil,
44 osutil,
45 parsers,
45 parsers,
46 pycompat,
46 pycompat,
47 )
47 )
48
48
49 for attr in (
49 for attr in (
50 'empty',
50 'empty',
51 'queue',
51 'queue',
52 ):
52 ):
53 globals()[attr] = getattr(pycompat, attr)
53 globals()[attr] = getattr(pycompat, attr)
54
54
55 if os.name == 'nt':
55 if os.name == 'nt':
56 from . import windows as platform
56 from . import windows as platform
57 else:
57 else:
58 from . import posix as platform
58 from . import posix as platform
59
59
60 md5 = hashlib.md5
60 md5 = hashlib.md5
61 sha1 = hashlib.sha1
61 sha1 = hashlib.sha1
62 sha512 = hashlib.sha512
62 sha512 = hashlib.sha512
63 _ = i18n._
63 _ = i18n._
64
64
65 cachestat = platform.cachestat
65 cachestat = platform.cachestat
66 checkexec = platform.checkexec
66 checkexec = platform.checkexec
67 checklink = platform.checklink
67 checklink = platform.checklink
68 copymode = platform.copymode
68 copymode = platform.copymode
69 executablepath = platform.executablepath
69 executablepath = platform.executablepath
70 expandglobs = platform.expandglobs
70 expandglobs = platform.expandglobs
71 explainexit = platform.explainexit
71 explainexit = platform.explainexit
72 findexe = platform.findexe
72 findexe = platform.findexe
73 gethgcmd = platform.gethgcmd
73 gethgcmd = platform.gethgcmd
74 getuser = platform.getuser
74 getuser = platform.getuser
75 getpid = os.getpid
75 getpid = os.getpid
76 groupmembers = platform.groupmembers
76 groupmembers = platform.groupmembers
77 groupname = platform.groupname
77 groupname = platform.groupname
78 hidewindow = platform.hidewindow
78 hidewindow = platform.hidewindow
79 isexec = platform.isexec
79 isexec = platform.isexec
80 isowner = platform.isowner
80 isowner = platform.isowner
81 localpath = platform.localpath
81 localpath = platform.localpath
82 lookupreg = platform.lookupreg
82 lookupreg = platform.lookupreg
83 makedir = platform.makedir
83 makedir = platform.makedir
84 nlinks = platform.nlinks
84 nlinks = platform.nlinks
85 normpath = platform.normpath
85 normpath = platform.normpath
86 normcase = platform.normcase
86 normcase = platform.normcase
87 normcasespec = platform.normcasespec
87 normcasespec = platform.normcasespec
88 normcasefallback = platform.normcasefallback
88 normcasefallback = platform.normcasefallback
89 openhardlinks = platform.openhardlinks
89 openhardlinks = platform.openhardlinks
90 oslink = platform.oslink
90 oslink = platform.oslink
91 parsepatchoutput = platform.parsepatchoutput
91 parsepatchoutput = platform.parsepatchoutput
92 pconvert = platform.pconvert
92 pconvert = platform.pconvert
93 poll = platform.poll
93 poll = platform.poll
94 popen = platform.popen
94 popen = platform.popen
95 posixfile = platform.posixfile
95 posixfile = platform.posixfile
96 quotecommand = platform.quotecommand
96 quotecommand = platform.quotecommand
97 readpipe = platform.readpipe
97 readpipe = platform.readpipe
98 rename = platform.rename
98 rename = platform.rename
99 removedirs = platform.removedirs
99 removedirs = platform.removedirs
100 samedevice = platform.samedevice
100 samedevice = platform.samedevice
101 samefile = platform.samefile
101 samefile = platform.samefile
102 samestat = platform.samestat
102 samestat = platform.samestat
103 setbinary = platform.setbinary
103 setbinary = platform.setbinary
104 setflags = platform.setflags
104 setflags = platform.setflags
105 setsignalhandler = platform.setsignalhandler
105 setsignalhandler = platform.setsignalhandler
106 shellquote = platform.shellquote
106 shellquote = platform.shellquote
107 spawndetached = platform.spawndetached
107 spawndetached = platform.spawndetached
108 split = platform.split
108 split = platform.split
109 sshargs = platform.sshargs
109 sshargs = platform.sshargs
110 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
110 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
111 statisexec = platform.statisexec
111 statisexec = platform.statisexec
112 statislink = platform.statislink
112 statislink = platform.statislink
113 termwidth = platform.termwidth
113 termwidth = platform.termwidth
114 testpid = platform.testpid
114 testpid = platform.testpid
115 umask = platform.umask
115 umask = platform.umask
116 unlink = platform.unlink
116 unlink = platform.unlink
117 unlinkpath = platform.unlinkpath
117 unlinkpath = platform.unlinkpath
118 username = platform.username
118 username = platform.username
119
119
120 # Python compatibility
120 # Python compatibility
121
121
122 _notset = object()
122 _notset = object()
123
123
124 # disable Python's problematic floating point timestamps (issue4836)
124 # disable Python's problematic floating point timestamps (issue4836)
125 # (Python hypocritically says you shouldn't change this behavior in
125 # (Python hypocritically says you shouldn't change this behavior in
126 # libraries, and sure enough Mercurial is not a library.)
126 # libraries, and sure enough Mercurial is not a library.)
127 os.stat_float_times(False)
127 os.stat_float_times(False)
128
128
129 def safehasattr(thing, attr):
129 def safehasattr(thing, attr):
130 return getattr(thing, attr, _notset) is not _notset
130 return getattr(thing, attr, _notset) is not _notset
131
131
132 DIGESTS = {
132 DIGESTS = {
133 'md5': md5,
133 'md5': md5,
134 'sha1': sha1,
134 'sha1': sha1,
135 'sha512': sha512,
135 'sha512': sha512,
136 }
136 }
137 # List of digest types from strongest to weakest
137 # List of digest types from strongest to weakest
138 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
138 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
139
139
140 for k in DIGESTS_BY_STRENGTH:
140 for k in DIGESTS_BY_STRENGTH:
141 assert k in DIGESTS
141 assert k in DIGESTS
142
142
143 class digester(object):
143 class digester(object):
144 """helper to compute digests.
144 """helper to compute digests.
145
145
146 This helper can be used to compute one or more digests given their name.
146 This helper can be used to compute one or more digests given their name.
147
147
148 >>> d = digester(['md5', 'sha1'])
148 >>> d = digester(['md5', 'sha1'])
149 >>> d.update('foo')
149 >>> d.update('foo')
150 >>> [k for k in sorted(d)]
150 >>> [k for k in sorted(d)]
151 ['md5', 'sha1']
151 ['md5', 'sha1']
152 >>> d['md5']
152 >>> d['md5']
153 'acbd18db4cc2f85cedef654fccc4a4d8'
153 'acbd18db4cc2f85cedef654fccc4a4d8'
154 >>> d['sha1']
154 >>> d['sha1']
155 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
155 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
156 >>> digester.preferred(['md5', 'sha1'])
156 >>> digester.preferred(['md5', 'sha1'])
157 'sha1'
157 'sha1'
158 """
158 """
159
159
160 def __init__(self, digests, s=''):
160 def __init__(self, digests, s=''):
161 self._hashes = {}
161 self._hashes = {}
162 for k in digests:
162 for k in digests:
163 if k not in DIGESTS:
163 if k not in DIGESTS:
164 raise Abort(_('unknown digest type: %s') % k)
164 raise Abort(_('unknown digest type: %s') % k)
165 self._hashes[k] = DIGESTS[k]()
165 self._hashes[k] = DIGESTS[k]()
166 if s:
166 if s:
167 self.update(s)
167 self.update(s)
168
168
169 def update(self, data):
169 def update(self, data):
170 for h in self._hashes.values():
170 for h in self._hashes.values():
171 h.update(data)
171 h.update(data)
172
172
173 def __getitem__(self, key):
173 def __getitem__(self, key):
174 if key not in DIGESTS:
174 if key not in DIGESTS:
175 raise Abort(_('unknown digest type: %s') % k)
175 raise Abort(_('unknown digest type: %s') % k)
176 return self._hashes[key].hexdigest()
176 return self._hashes[key].hexdigest()
177
177
178 def __iter__(self):
178 def __iter__(self):
179 return iter(self._hashes)
179 return iter(self._hashes)
180
180
181 @staticmethod
181 @staticmethod
182 def preferred(supported):
182 def preferred(supported):
183 """returns the strongest digest type in both supported and DIGESTS."""
183 """returns the strongest digest type in both supported and DIGESTS."""
184
184
185 for k in DIGESTS_BY_STRENGTH:
185 for k in DIGESTS_BY_STRENGTH:
186 if k in supported:
186 if k in supported:
187 return k
187 return k
188 return None
188 return None
189
189
190 class digestchecker(object):
190 class digestchecker(object):
191 """file handle wrapper that additionally checks content against a given
191 """file handle wrapper that additionally checks content against a given
192 size and digests.
192 size and digests.
193
193
194 d = digestchecker(fh, size, {'md5': '...'})
194 d = digestchecker(fh, size, {'md5': '...'})
195
195
196 When multiple digests are given, all of them are validated.
196 When multiple digests are given, all of them are validated.
197 """
197 """
198
198
199 def __init__(self, fh, size, digests):
199 def __init__(self, fh, size, digests):
200 self._fh = fh
200 self._fh = fh
201 self._size = size
201 self._size = size
202 self._got = 0
202 self._got = 0
203 self._digests = dict(digests)
203 self._digests = dict(digests)
204 self._digester = digester(self._digests.keys())
204 self._digester = digester(self._digests.keys())
205
205
206 def read(self, length=-1):
206 def read(self, length=-1):
207 content = self._fh.read(length)
207 content = self._fh.read(length)
208 self._digester.update(content)
208 self._digester.update(content)
209 self._got += len(content)
209 self._got += len(content)
210 return content
210 return content
211
211
212 def validate(self):
212 def validate(self):
213 if self._size != self._got:
213 if self._size != self._got:
214 raise Abort(_('size mismatch: expected %d, got %d') %
214 raise Abort(_('size mismatch: expected %d, got %d') %
215 (self._size, self._got))
215 (self._size, self._got))
216 for k, v in self._digests.items():
216 for k, v in self._digests.items():
217 if v != self._digester[k]:
217 if v != self._digester[k]:
218 # i18n: first parameter is a digest name
218 # i18n: first parameter is a digest name
219 raise Abort(_('%s mismatch: expected %s, got %s') %
219 raise Abort(_('%s mismatch: expected %s, got %s') %
220 (k, v, self._digester[k]))
220 (k, v, self._digester[k]))
221
221
222 try:
222 try:
223 buffer = buffer
223 buffer = buffer
224 except NameError:
224 except NameError:
225 if sys.version_info[0] < 3:
225 if sys.version_info[0] < 3:
226 def buffer(sliceable, offset=0):
226 def buffer(sliceable, offset=0):
227 return sliceable[offset:]
227 return sliceable[offset:]
228 else:
228 else:
229 def buffer(sliceable, offset=0):
229 def buffer(sliceable, offset=0):
230 return memoryview(sliceable)[offset:]
230 return memoryview(sliceable)[offset:]
231
231
232 closefds = os.name == 'posix'
232 closefds = os.name == 'posix'
233
233
234 _chunksize = 4096
234 _chunksize = 4096
235
235
236 class bufferedinputpipe(object):
236 class bufferedinputpipe(object):
237 """a manually buffered input pipe
237 """a manually buffered input pipe
238
238
239 Python will not let us use buffered IO and lazy reading with 'polling' at
239 Python will not let us use buffered IO and lazy reading with 'polling' at
240 the same time. We cannot probe the buffer state and select will not detect
240 the same time. We cannot probe the buffer state and select will not detect
241 that data are ready to read if they are already buffered.
241 that data are ready to read if they are already buffered.
242
242
243 This class let us work around that by implementing its own buffering
243 This class let us work around that by implementing its own buffering
244 (allowing efficient readline) while offering a way to know if the buffer is
244 (allowing efficient readline) while offering a way to know if the buffer is
245 empty from the output (allowing collaboration of the buffer with polling).
245 empty from the output (allowing collaboration of the buffer with polling).
246
246
247 This class lives in the 'util' module because it makes use of the 'os'
247 This class lives in the 'util' module because it makes use of the 'os'
248 module from the python stdlib.
248 module from the python stdlib.
249 """
249 """
250
250
251 def __init__(self, input):
251 def __init__(self, input):
252 self._input = input
252 self._input = input
253 self._buffer = []
253 self._buffer = []
254 self._eof = False
254 self._eof = False
255 self._lenbuf = 0
255 self._lenbuf = 0
256
256
257 @property
257 @property
258 def hasbuffer(self):
258 def hasbuffer(self):
259 """True is any data is currently buffered
259 """True is any data is currently buffered
260
260
261 This will be used externally a pre-step for polling IO. If there is
261 This will be used externally a pre-step for polling IO. If there is
262 already data then no polling should be set in place."""
262 already data then no polling should be set in place."""
263 return bool(self._buffer)
263 return bool(self._buffer)
264
264
265 @property
265 @property
266 def closed(self):
266 def closed(self):
267 return self._input.closed
267 return self._input.closed
268
268
269 def fileno(self):
269 def fileno(self):
270 return self._input.fileno()
270 return self._input.fileno()
271
271
272 def close(self):
272 def close(self):
273 return self._input.close()
273 return self._input.close()
274
274
275 def read(self, size):
275 def read(self, size):
276 while (not self._eof) and (self._lenbuf < size):
276 while (not self._eof) and (self._lenbuf < size):
277 self._fillbuffer()
277 self._fillbuffer()
278 return self._frombuffer(size)
278 return self._frombuffer(size)
279
279
280 def readline(self, *args, **kwargs):
280 def readline(self, *args, **kwargs):
281 if 1 < len(self._buffer):
281 if 1 < len(self._buffer):
282 # this should not happen because both read and readline end with a
282 # this should not happen because both read and readline end with a
283 # _frombuffer call that collapse it.
283 # _frombuffer call that collapse it.
284 self._buffer = [''.join(self._buffer)]
284 self._buffer = [''.join(self._buffer)]
285 self._lenbuf = len(self._buffer[0])
285 self._lenbuf = len(self._buffer[0])
286 lfi = -1
286 lfi = -1
287 if self._buffer:
287 if self._buffer:
288 lfi = self._buffer[-1].find('\n')
288 lfi = self._buffer[-1].find('\n')
289 while (not self._eof) and lfi < 0:
289 while (not self._eof) and lfi < 0:
290 self._fillbuffer()
290 self._fillbuffer()
291 if self._buffer:
291 if self._buffer:
292 lfi = self._buffer[-1].find('\n')
292 lfi = self._buffer[-1].find('\n')
293 size = lfi + 1
293 size = lfi + 1
294 if lfi < 0: # end of file
294 if lfi < 0: # end of file
295 size = self._lenbuf
295 size = self._lenbuf
296 elif 1 < len(self._buffer):
296 elif 1 < len(self._buffer):
297 # we need to take previous chunks into account
297 # we need to take previous chunks into account
298 size += self._lenbuf - len(self._buffer[-1])
298 size += self._lenbuf - len(self._buffer[-1])
299 return self._frombuffer(size)
299 return self._frombuffer(size)
300
300
301 def _frombuffer(self, size):
301 def _frombuffer(self, size):
302 """return at most 'size' data from the buffer
302 """return at most 'size' data from the buffer
303
303
304 The data are removed from the buffer."""
304 The data are removed from the buffer."""
305 if size == 0 or not self._buffer:
305 if size == 0 or not self._buffer:
306 return ''
306 return ''
307 buf = self._buffer[0]
307 buf = self._buffer[0]
308 if 1 < len(self._buffer):
308 if 1 < len(self._buffer):
309 buf = ''.join(self._buffer)
309 buf = ''.join(self._buffer)
310
310
311 data = buf[:size]
311 data = buf[:size]
312 buf = buf[len(data):]
312 buf = buf[len(data):]
313 if buf:
313 if buf:
314 self._buffer = [buf]
314 self._buffer = [buf]
315 self._lenbuf = len(buf)
315 self._lenbuf = len(buf)
316 else:
316 else:
317 self._buffer = []
317 self._buffer = []
318 self._lenbuf = 0
318 self._lenbuf = 0
319 return data
319 return data
320
320
321 def _fillbuffer(self):
321 def _fillbuffer(self):
322 """read data to the buffer"""
322 """read data to the buffer"""
323 data = os.read(self._input.fileno(), _chunksize)
323 data = os.read(self._input.fileno(), _chunksize)
324 if not data:
324 if not data:
325 self._eof = True
325 self._eof = True
326 else:
326 else:
327 self._lenbuf += len(data)
327 self._lenbuf += len(data)
328 self._buffer.append(data)
328 self._buffer.append(data)
329
329
330 def popen2(cmd, env=None, newlines=False):
330 def popen2(cmd, env=None, newlines=False):
331 # Setting bufsize to -1 lets the system decide the buffer size.
331 # Setting bufsize to -1 lets the system decide the buffer size.
332 # The default for bufsize is 0, meaning unbuffered. This leads to
332 # The default for bufsize is 0, meaning unbuffered. This leads to
333 # poor performance on Mac OS X: http://bugs.python.org/issue4194
333 # poor performance on Mac OS X: http://bugs.python.org/issue4194
334 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
334 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
335 close_fds=closefds,
335 close_fds=closefds,
336 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
336 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
337 universal_newlines=newlines,
337 universal_newlines=newlines,
338 env=env)
338 env=env)
339 return p.stdin, p.stdout
339 return p.stdin, p.stdout
340
340
341 def popen3(cmd, env=None, newlines=False):
341 def popen3(cmd, env=None, newlines=False):
342 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
342 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
343 return stdin, stdout, stderr
343 return stdin, stdout, stderr
344
344
345 def popen4(cmd, env=None, newlines=False, bufsize=-1):
345 def popen4(cmd, env=None, newlines=False, bufsize=-1):
346 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
346 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
347 close_fds=closefds,
347 close_fds=closefds,
348 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
348 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
349 stderr=subprocess.PIPE,
349 stderr=subprocess.PIPE,
350 universal_newlines=newlines,
350 universal_newlines=newlines,
351 env=env)
351 env=env)
352 return p.stdin, p.stdout, p.stderr, p
352 return p.stdin, p.stdout, p.stderr, p
353
353
354 def version():
354 def version():
355 """Return version information if available."""
355 """Return version information if available."""
356 try:
356 try:
357 from . import __version__
357 from . import __version__
358 return __version__.version
358 return __version__.version
359 except ImportError:
359 except ImportError:
360 return 'unknown'
360 return 'unknown'
361
361
362 def versiontuple(v=None, n=4):
362 def versiontuple(v=None, n=4):
363 """Parses a Mercurial version string into an N-tuple.
363 """Parses a Mercurial version string into an N-tuple.
364
364
365 The version string to be parsed is specified with the ``v`` argument.
365 The version string to be parsed is specified with the ``v`` argument.
366 If it isn't defined, the current Mercurial version string will be parsed.
366 If it isn't defined, the current Mercurial version string will be parsed.
367
367
368 ``n`` can be 2, 3, or 4. Here is how some version strings map to
368 ``n`` can be 2, 3, or 4. Here is how some version strings map to
369 returned values:
369 returned values:
370
370
371 >>> v = '3.6.1+190-df9b73d2d444'
371 >>> v = '3.6.1+190-df9b73d2d444'
372 >>> versiontuple(v, 2)
372 >>> versiontuple(v, 2)
373 (3, 6)
373 (3, 6)
374 >>> versiontuple(v, 3)
374 >>> versiontuple(v, 3)
375 (3, 6, 1)
375 (3, 6, 1)
376 >>> versiontuple(v, 4)
376 >>> versiontuple(v, 4)
377 (3, 6, 1, '190-df9b73d2d444')
377 (3, 6, 1, '190-df9b73d2d444')
378
378
379 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
379 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
380 (3, 6, 1, '190-df9b73d2d444+20151118')
380 (3, 6, 1, '190-df9b73d2d444+20151118')
381
381
382 >>> v = '3.6'
382 >>> v = '3.6'
383 >>> versiontuple(v, 2)
383 >>> versiontuple(v, 2)
384 (3, 6)
384 (3, 6)
385 >>> versiontuple(v, 3)
385 >>> versiontuple(v, 3)
386 (3, 6, None)
386 (3, 6, None)
387 >>> versiontuple(v, 4)
387 >>> versiontuple(v, 4)
388 (3, 6, None, None)
388 (3, 6, None, None)
389 """
389 """
390 if not v:
390 if not v:
391 v = version()
391 v = version()
392 parts = v.split('+', 1)
392 parts = v.split('+', 1)
393 if len(parts) == 1:
393 if len(parts) == 1:
394 vparts, extra = parts[0], None
394 vparts, extra = parts[0], None
395 else:
395 else:
396 vparts, extra = parts
396 vparts, extra = parts
397
397
398 vints = []
398 vints = []
399 for i in vparts.split('.'):
399 for i in vparts.split('.'):
400 try:
400 try:
401 vints.append(int(i))
401 vints.append(int(i))
402 except ValueError:
402 except ValueError:
403 break
403 break
404 # (3, 6) -> (3, 6, None)
404 # (3, 6) -> (3, 6, None)
405 while len(vints) < 3:
405 while len(vints) < 3:
406 vints.append(None)
406 vints.append(None)
407
407
408 if n == 2:
408 if n == 2:
409 return (vints[0], vints[1])
409 return (vints[0], vints[1])
410 if n == 3:
410 if n == 3:
411 return (vints[0], vints[1], vints[2])
411 return (vints[0], vints[1], vints[2])
412 if n == 4:
412 if n == 4:
413 return (vints[0], vints[1], vints[2], extra)
413 return (vints[0], vints[1], vints[2], extra)
414
414
415 # used by parsedate
415 # used by parsedate
416 defaultdateformats = (
416 defaultdateformats = (
417 '%Y-%m-%d %H:%M:%S',
417 '%Y-%m-%d %H:%M:%S',
418 '%Y-%m-%d %I:%M:%S%p',
418 '%Y-%m-%d %I:%M:%S%p',
419 '%Y-%m-%d %H:%M',
419 '%Y-%m-%d %H:%M',
420 '%Y-%m-%d %I:%M%p',
420 '%Y-%m-%d %I:%M%p',
421 '%Y-%m-%d',
421 '%Y-%m-%d',
422 '%m-%d',
422 '%m-%d',
423 '%m/%d',
423 '%m/%d',
424 '%m/%d/%y',
424 '%m/%d/%y',
425 '%m/%d/%Y',
425 '%m/%d/%Y',
426 '%a %b %d %H:%M:%S %Y',
426 '%a %b %d %H:%M:%S %Y',
427 '%a %b %d %I:%M:%S%p %Y',
427 '%a %b %d %I:%M:%S%p %Y',
428 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
428 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
429 '%b %d %H:%M:%S %Y',
429 '%b %d %H:%M:%S %Y',
430 '%b %d %I:%M:%S%p %Y',
430 '%b %d %I:%M:%S%p %Y',
431 '%b %d %H:%M:%S',
431 '%b %d %H:%M:%S',
432 '%b %d %I:%M:%S%p',
432 '%b %d %I:%M:%S%p',
433 '%b %d %H:%M',
433 '%b %d %H:%M',
434 '%b %d %I:%M%p',
434 '%b %d %I:%M%p',
435 '%b %d %Y',
435 '%b %d %Y',
436 '%b %d',
436 '%b %d',
437 '%H:%M:%S',
437 '%H:%M:%S',
438 '%I:%M:%S%p',
438 '%I:%M:%S%p',
439 '%H:%M',
439 '%H:%M',
440 '%I:%M%p',
440 '%I:%M%p',
441 )
441 )
442
442
443 extendeddateformats = defaultdateformats + (
443 extendeddateformats = defaultdateformats + (
444 "%Y",
444 "%Y",
445 "%Y-%m",
445 "%Y-%m",
446 "%b",
446 "%b",
447 "%b %Y",
447 "%b %Y",
448 )
448 )
449
449
450 def cachefunc(func):
450 def cachefunc(func):
451 '''cache the result of function calls'''
451 '''cache the result of function calls'''
452 # XXX doesn't handle keywords args
452 # XXX doesn't handle keywords args
453 if func.func_code.co_argcount == 0:
453 if func.func_code.co_argcount == 0:
454 cache = []
454 cache = []
455 def f():
455 def f():
456 if len(cache) == 0:
456 if len(cache) == 0:
457 cache.append(func())
457 cache.append(func())
458 return cache[0]
458 return cache[0]
459 return f
459 return f
460 cache = {}
460 cache = {}
461 if func.func_code.co_argcount == 1:
461 if func.func_code.co_argcount == 1:
462 # we gain a small amount of time because
462 # we gain a small amount of time because
463 # we don't need to pack/unpack the list
463 # we don't need to pack/unpack the list
464 def f(arg):
464 def f(arg):
465 if arg not in cache:
465 if arg not in cache:
466 cache[arg] = func(arg)
466 cache[arg] = func(arg)
467 return cache[arg]
467 return cache[arg]
468 else:
468 else:
469 def f(*args):
469 def f(*args):
470 if args not in cache:
470 if args not in cache:
471 cache[args] = func(*args)
471 cache[args] = func(*args)
472 return cache[args]
472 return cache[args]
473
473
474 return f
474 return f
475
475
476 class sortdict(dict):
476 class sortdict(dict):
477 '''a simple sorted dictionary'''
477 '''a simple sorted dictionary'''
478 def __init__(self, data=None):
478 def __init__(self, data=None):
479 self._list = []
479 self._list = []
480 if data:
480 if data:
481 self.update(data)
481 self.update(data)
482 def copy(self):
482 def copy(self):
483 return sortdict(self)
483 return sortdict(self)
484 def __setitem__(self, key, val):
484 def __setitem__(self, key, val):
485 if key in self:
485 if key in self:
486 self._list.remove(key)
486 self._list.remove(key)
487 self._list.append(key)
487 self._list.append(key)
488 dict.__setitem__(self, key, val)
488 dict.__setitem__(self, key, val)
489 def __iter__(self):
489 def __iter__(self):
490 return self._list.__iter__()
490 return self._list.__iter__()
491 def update(self, src):
491 def update(self, src):
492 if isinstance(src, dict):
492 if isinstance(src, dict):
493 src = src.iteritems()
493 src = src.iteritems()
494 for k, v in src:
494 for k, v in src:
495 self[k] = v
495 self[k] = v
496 def clear(self):
496 def clear(self):
497 dict.clear(self)
497 dict.clear(self)
498 self._list = []
498 self._list = []
499 def items(self):
499 def items(self):
500 return [(k, self[k]) for k in self._list]
500 return [(k, self[k]) for k in self._list]
501 def __delitem__(self, key):
501 def __delitem__(self, key):
502 dict.__delitem__(self, key)
502 dict.__delitem__(self, key)
503 self._list.remove(key)
503 self._list.remove(key)
504 def pop(self, key, *args, **kwargs):
504 def pop(self, key, *args, **kwargs):
505 dict.pop(self, key, *args, **kwargs)
505 dict.pop(self, key, *args, **kwargs)
506 try:
506 try:
507 self._list.remove(key)
507 self._list.remove(key)
508 except ValueError:
508 except ValueError:
509 pass
509 pass
510 def keys(self):
510 def keys(self):
511 return self._list
511 return self._list
512 def iterkeys(self):
512 def iterkeys(self):
513 return self._list.__iter__()
513 return self._list.__iter__()
514 def iteritems(self):
514 def iteritems(self):
515 for k in self._list:
515 for k in self._list:
516 yield k, self[k]
516 yield k, self[k]
517 def insert(self, index, key, val):
517 def insert(self, index, key, val):
518 self._list.insert(index, key)
518 self._list.insert(index, key)
519 dict.__setitem__(self, key, val)
519 dict.__setitem__(self, key, val)
520
520
521 class _lrucachenode(object):
521 class _lrucachenode(object):
522 """A node in a doubly linked list.
522 """A node in a doubly linked list.
523
523
524 Holds a reference to nodes on either side as well as a key-value
524 Holds a reference to nodes on either side as well as a key-value
525 pair for the dictionary entry.
525 pair for the dictionary entry.
526 """
526 """
527 __slots__ = ('next', 'prev', 'key', 'value')
527 __slots__ = ('next', 'prev', 'key', 'value')
528
528
529 def __init__(self):
529 def __init__(self):
530 self.next = None
530 self.next = None
531 self.prev = None
531 self.prev = None
532
532
533 self.key = _notset
533 self.key = _notset
534 self.value = None
534 self.value = None
535
535
536 def markempty(self):
536 def markempty(self):
537 """Mark the node as emptied."""
537 """Mark the node as emptied."""
538 self.key = _notset
538 self.key = _notset
539
539
540 class lrucachedict(object):
540 class lrucachedict(object):
541 """Dict that caches most recent accesses and sets.
541 """Dict that caches most recent accesses and sets.
542
542
543 The dict consists of an actual backing dict - indexed by original
543 The dict consists of an actual backing dict - indexed by original
544 key - and a doubly linked circular list defining the order of entries in
544 key - and a doubly linked circular list defining the order of entries in
545 the cache.
545 the cache.
546
546
547 The head node is the newest entry in the cache. If the cache is full,
547 The head node is the newest entry in the cache. If the cache is full,
548 we recycle head.prev and make it the new head. Cache accesses result in
548 we recycle head.prev and make it the new head. Cache accesses result in
549 the node being moved to before the existing head and being marked as the
549 the node being moved to before the existing head and being marked as the
550 new head node.
550 new head node.
551 """
551 """
552 def __init__(self, max):
552 def __init__(self, max):
553 self._cache = {}
553 self._cache = {}
554
554
555 self._head = head = _lrucachenode()
555 self._head = head = _lrucachenode()
556 head.prev = head
556 head.prev = head
557 head.next = head
557 head.next = head
558 self._size = 1
558 self._size = 1
559 self._capacity = max
559 self._capacity = max
560
560
561 def __len__(self):
561 def __len__(self):
562 return len(self._cache)
562 return len(self._cache)
563
563
564 def __contains__(self, k):
564 def __contains__(self, k):
565 return k in self._cache
565 return k in self._cache
566
566
567 def __iter__(self):
567 def __iter__(self):
568 # We don't have to iterate in cache order, but why not.
568 # We don't have to iterate in cache order, but why not.
569 n = self._head
569 n = self._head
570 for i in range(len(self._cache)):
570 for i in range(len(self._cache)):
571 yield n.key
571 yield n.key
572 n = n.next
572 n = n.next
573
573
574 def __getitem__(self, k):
574 def __getitem__(self, k):
575 node = self._cache[k]
575 node = self._cache[k]
576 self._movetohead(node)
576 self._movetohead(node)
577 return node.value
577 return node.value
578
578
579 def __setitem__(self, k, v):
579 def __setitem__(self, k, v):
580 node = self._cache.get(k)
580 node = self._cache.get(k)
581 # Replace existing value and mark as newest.
581 # Replace existing value and mark as newest.
582 if node is not None:
582 if node is not None:
583 node.value = v
583 node.value = v
584 self._movetohead(node)
584 self._movetohead(node)
585 return
585 return
586
586
587 if self._size < self._capacity:
587 if self._size < self._capacity:
588 node = self._addcapacity()
588 node = self._addcapacity()
589 else:
589 else:
590 # Grab the last/oldest item.
590 # Grab the last/oldest item.
591 node = self._head.prev
591 node = self._head.prev
592
592
593 # At capacity. Kill the old entry.
593 # At capacity. Kill the old entry.
594 if node.key is not _notset:
594 if node.key is not _notset:
595 del self._cache[node.key]
595 del self._cache[node.key]
596
596
597 node.key = k
597 node.key = k
598 node.value = v
598 node.value = v
599 self._cache[k] = node
599 self._cache[k] = node
600 # And mark it as newest entry. No need to adjust order since it
600 # And mark it as newest entry. No need to adjust order since it
601 # is already self._head.prev.
601 # is already self._head.prev.
602 self._head = node
602 self._head = node
603
603
604 def __delitem__(self, k):
604 def __delitem__(self, k):
605 node = self._cache.pop(k)
605 node = self._cache.pop(k)
606 node.markempty()
606 node.markempty()
607
607
608 # Temporarily mark as newest item before re-adjusting head to make
608 # Temporarily mark as newest item before re-adjusting head to make
609 # this node the oldest item.
609 # this node the oldest item.
610 self._movetohead(node)
610 self._movetohead(node)
611 self._head = node.next
611 self._head = node.next
612
612
613 # Additional dict methods.
613 # Additional dict methods.
614
614
615 def get(self, k, default=None):
615 def get(self, k, default=None):
616 try:
616 try:
617 return self._cache[k]
617 return self._cache[k]
618 except KeyError:
618 except KeyError:
619 return default
619 return default
620
620
621 def clear(self):
621 def clear(self):
622 n = self._head
622 n = self._head
623 while n.key is not _notset:
623 while n.key is not _notset:
624 n.markempty()
624 n.markempty()
625 n = n.next
625 n = n.next
626
626
627 self._cache.clear()
627 self._cache.clear()
628
628
629 def copy(self):
629 def copy(self):
630 result = lrucachedict(self._capacity)
630 result = lrucachedict(self._capacity)
631 n = self._head.prev
631 n = self._head.prev
632 # Iterate in oldest-to-newest order, so the copy has the right ordering
632 # Iterate in oldest-to-newest order, so the copy has the right ordering
633 for i in range(len(self._cache)):
633 for i in range(len(self._cache)):
634 result[n.key] = n.value
634 result[n.key] = n.value
635 n = n.prev
635 n = n.prev
636 return result
636 return result
637
637
638 def _movetohead(self, node):
638 def _movetohead(self, node):
639 """Mark a node as the newest, making it the new head.
639 """Mark a node as the newest, making it the new head.
640
640
641 When a node is accessed, it becomes the freshest entry in the LRU
641 When a node is accessed, it becomes the freshest entry in the LRU
642 list, which is denoted by self._head.
642 list, which is denoted by self._head.
643
643
644 Visually, let's make ``N`` the new head node (* denotes head):
644 Visually, let's make ``N`` the new head node (* denotes head):
645
645
646 previous/oldest <-> head <-> next/next newest
646 previous/oldest <-> head <-> next/next newest
647
647
648 ----<->--- A* ---<->-----
648 ----<->--- A* ---<->-----
649 | |
649 | |
650 E <-> D <-> N <-> C <-> B
650 E <-> D <-> N <-> C <-> B
651
651
652 To:
652 To:
653
653
654 ----<->--- N* ---<->-----
654 ----<->--- N* ---<->-----
655 | |
655 | |
656 E <-> D <-> C <-> B <-> A
656 E <-> D <-> C <-> B <-> A
657
657
658 This requires the following moves:
658 This requires the following moves:
659
659
660 C.next = D (node.prev.next = node.next)
660 C.next = D (node.prev.next = node.next)
661 D.prev = C (node.next.prev = node.prev)
661 D.prev = C (node.next.prev = node.prev)
662 E.next = N (head.prev.next = node)
662 E.next = N (head.prev.next = node)
663 N.prev = E (node.prev = head.prev)
663 N.prev = E (node.prev = head.prev)
664 N.next = A (node.next = head)
664 N.next = A (node.next = head)
665 A.prev = N (head.prev = node)
665 A.prev = N (head.prev = node)
666 """
666 """
667 head = self._head
667 head = self._head
668 # C.next = D
668 # C.next = D
669 node.prev.next = node.next
669 node.prev.next = node.next
670 # D.prev = C
670 # D.prev = C
671 node.next.prev = node.prev
671 node.next.prev = node.prev
672 # N.prev = E
672 # N.prev = E
673 node.prev = head.prev
673 node.prev = head.prev
674 # N.next = A
674 # N.next = A
675 # It is tempting to do just "head" here, however if node is
675 # It is tempting to do just "head" here, however if node is
676 # adjacent to head, this will do bad things.
676 # adjacent to head, this will do bad things.
677 node.next = head.prev.next
677 node.next = head.prev.next
678 # E.next = N
678 # E.next = N
679 node.next.prev = node
679 node.next.prev = node
680 # A.prev = N
680 # A.prev = N
681 node.prev.next = node
681 node.prev.next = node
682
682
683 self._head = node
683 self._head = node
684
684
685 def _addcapacity(self):
685 def _addcapacity(self):
686 """Add a node to the circular linked list.
686 """Add a node to the circular linked list.
687
687
688 The new node is inserted before the head node.
688 The new node is inserted before the head node.
689 """
689 """
690 head = self._head
690 head = self._head
691 node = _lrucachenode()
691 node = _lrucachenode()
692 head.prev.next = node
692 head.prev.next = node
693 node.prev = head.prev
693 node.prev = head.prev
694 node.next = head
694 node.next = head
695 head.prev = node
695 head.prev = node
696 self._size += 1
696 self._size += 1
697 return node
697 return node
698
698
699 def lrucachefunc(func):
699 def lrucachefunc(func):
700 '''cache most recent results of function calls'''
700 '''cache most recent results of function calls'''
701 cache = {}
701 cache = {}
702 order = collections.deque()
702 order = collections.deque()
703 if func.func_code.co_argcount == 1:
703 if func.func_code.co_argcount == 1:
704 def f(arg):
704 def f(arg):
705 if arg not in cache:
705 if arg not in cache:
706 if len(cache) > 20:
706 if len(cache) > 20:
707 del cache[order.popleft()]
707 del cache[order.popleft()]
708 cache[arg] = func(arg)
708 cache[arg] = func(arg)
709 else:
709 else:
710 order.remove(arg)
710 order.remove(arg)
711 order.append(arg)
711 order.append(arg)
712 return cache[arg]
712 return cache[arg]
713 else:
713 else:
714 def f(*args):
714 def f(*args):
715 if args not in cache:
715 if args not in cache:
716 if len(cache) > 20:
716 if len(cache) > 20:
717 del cache[order.popleft()]
717 del cache[order.popleft()]
718 cache[args] = func(*args)
718 cache[args] = func(*args)
719 else:
719 else:
720 order.remove(args)
720 order.remove(args)
721 order.append(args)
721 order.append(args)
722 return cache[args]
722 return cache[args]
723
723
724 return f
724 return f
725
725
726 class propertycache(object):
726 class propertycache(object):
727 def __init__(self, func):
727 def __init__(self, func):
728 self.func = func
728 self.func = func
729 self.name = func.__name__
729 self.name = func.__name__
730 def __get__(self, obj, type=None):
730 def __get__(self, obj, type=None):
731 result = self.func(obj)
731 result = self.func(obj)
732 self.cachevalue(obj, result)
732 self.cachevalue(obj, result)
733 return result
733 return result
734
734
735 def cachevalue(self, obj, value):
735 def cachevalue(self, obj, value):
736 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
736 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
737 obj.__dict__[self.name] = value
737 obj.__dict__[self.name] = value
738
738
739 def pipefilter(s, cmd):
739 def pipefilter(s, cmd):
740 '''filter string S through command CMD, returning its output'''
740 '''filter string S through command CMD, returning its output'''
741 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
741 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
742 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
742 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
743 pout, perr = p.communicate(s)
743 pout, perr = p.communicate(s)
744 return pout
744 return pout
745
745
746 def tempfilter(s, cmd):
746 def tempfilter(s, cmd):
747 '''filter string S through a pair of temporary files with CMD.
747 '''filter string S through a pair of temporary files with CMD.
748 CMD is used as a template to create the real command to be run,
748 CMD is used as a template to create the real command to be run,
749 with the strings INFILE and OUTFILE replaced by the real names of
749 with the strings INFILE and OUTFILE replaced by the real names of
750 the temporary files generated.'''
750 the temporary files generated.'''
751 inname, outname = None, None
751 inname, outname = None, None
752 try:
752 try:
753 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
753 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
754 fp = os.fdopen(infd, 'wb')
754 fp = os.fdopen(infd, 'wb')
755 fp.write(s)
755 fp.write(s)
756 fp.close()
756 fp.close()
757 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
757 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
758 os.close(outfd)
758 os.close(outfd)
759 cmd = cmd.replace('INFILE', inname)
759 cmd = cmd.replace('INFILE', inname)
760 cmd = cmd.replace('OUTFILE', outname)
760 cmd = cmd.replace('OUTFILE', outname)
761 code = os.system(cmd)
761 code = os.system(cmd)
762 if sys.platform == 'OpenVMS' and code & 1:
762 if sys.platform == 'OpenVMS' and code & 1:
763 code = 0
763 code = 0
764 if code:
764 if code:
765 raise Abort(_("command '%s' failed: %s") %
765 raise Abort(_("command '%s' failed: %s") %
766 (cmd, explainexit(code)))
766 (cmd, explainexit(code)))
767 return readfile(outname)
767 return readfile(outname)
768 finally:
768 finally:
769 try:
769 try:
770 if inname:
770 if inname:
771 os.unlink(inname)
771 os.unlink(inname)
772 except OSError:
772 except OSError:
773 pass
773 pass
774 try:
774 try:
775 if outname:
775 if outname:
776 os.unlink(outname)
776 os.unlink(outname)
777 except OSError:
777 except OSError:
778 pass
778 pass
779
779
780 filtertable = {
780 filtertable = {
781 'tempfile:': tempfilter,
781 'tempfile:': tempfilter,
782 'pipe:': pipefilter,
782 'pipe:': pipefilter,
783 }
783 }
784
784
785 def filter(s, cmd):
785 def filter(s, cmd):
786 "filter a string through a command that transforms its input to its output"
786 "filter a string through a command that transforms its input to its output"
787 for name, fn in filtertable.iteritems():
787 for name, fn in filtertable.iteritems():
788 if cmd.startswith(name):
788 if cmd.startswith(name):
789 return fn(s, cmd[len(name):].lstrip())
789 return fn(s, cmd[len(name):].lstrip())
790 return pipefilter(s, cmd)
790 return pipefilter(s, cmd)
791
791
792 def binary(s):
792 def binary(s):
793 """return true if a string is binary data"""
793 """return true if a string is binary data"""
794 return bool(s and '\0' in s)
794 return bool(s and '\0' in s)
795
795
796 def increasingchunks(source, min=1024, max=65536):
796 def increasingchunks(source, min=1024, max=65536):
797 '''return no less than min bytes per chunk while data remains,
797 '''return no less than min bytes per chunk while data remains,
798 doubling min after each chunk until it reaches max'''
798 doubling min after each chunk until it reaches max'''
799 def log2(x):
799 def log2(x):
800 if not x:
800 if not x:
801 return 0
801 return 0
802 i = 0
802 i = 0
803 while x:
803 while x:
804 x >>= 1
804 x >>= 1
805 i += 1
805 i += 1
806 return i - 1
806 return i - 1
807
807
808 buf = []
808 buf = []
809 blen = 0
809 blen = 0
810 for chunk in source:
810 for chunk in source:
811 buf.append(chunk)
811 buf.append(chunk)
812 blen += len(chunk)
812 blen += len(chunk)
813 if blen >= min:
813 if blen >= min:
814 if min < max:
814 if min < max:
815 min = min << 1
815 min = min << 1
816 nmin = 1 << log2(blen)
816 nmin = 1 << log2(blen)
817 if nmin > min:
817 if nmin > min:
818 min = nmin
818 min = nmin
819 if min > max:
819 if min > max:
820 min = max
820 min = max
821 yield ''.join(buf)
821 yield ''.join(buf)
822 blen = 0
822 blen = 0
823 buf = []
823 buf = []
824 if buf:
824 if buf:
825 yield ''.join(buf)
825 yield ''.join(buf)
826
826
827 Abort = error.Abort
827 Abort = error.Abort
828
828
829 def always(fn):
829 def always(fn):
830 return True
830 return True
831
831
832 def never(fn):
832 def never(fn):
833 return False
833 return False
834
834
835 def nogc(func):
835 def nogc(func):
836 """disable garbage collector
836 """disable garbage collector
837
837
838 Python's garbage collector triggers a GC each time a certain number of
838 Python's garbage collector triggers a GC each time a certain number of
839 container objects (the number being defined by gc.get_threshold()) are
839 container objects (the number being defined by gc.get_threshold()) are
840 allocated even when marked not to be tracked by the collector. Tracking has
840 allocated even when marked not to be tracked by the collector. Tracking has
841 no effect on when GCs are triggered, only on what objects the GC looks
841 no effect on when GCs are triggered, only on what objects the GC looks
842 into. As a workaround, disable GC while building complex (huge)
842 into. As a workaround, disable GC while building complex (huge)
843 containers.
843 containers.
844
844
845 This garbage collector issue have been fixed in 2.7.
845 This garbage collector issue have been fixed in 2.7.
846 """
846 """
847 def wrapper(*args, **kwargs):
847 def wrapper(*args, **kwargs):
848 gcenabled = gc.isenabled()
848 gcenabled = gc.isenabled()
849 gc.disable()
849 gc.disable()
850 try:
850 try:
851 return func(*args, **kwargs)
851 return func(*args, **kwargs)
852 finally:
852 finally:
853 if gcenabled:
853 if gcenabled:
854 gc.enable()
854 gc.enable()
855 return wrapper
855 return wrapper
856
856
857 def pathto(root, n1, n2):
857 def pathto(root, n1, n2):
858 '''return the relative path from one place to another.
858 '''return the relative path from one place to another.
859 root should use os.sep to separate directories
859 root should use os.sep to separate directories
860 n1 should use os.sep to separate directories
860 n1 should use os.sep to separate directories
861 n2 should use "/" to separate directories
861 n2 should use "/" to separate directories
862 returns an os.sep-separated path.
862 returns an os.sep-separated path.
863
863
864 If n1 is a relative path, it's assumed it's
864 If n1 is a relative path, it's assumed it's
865 relative to root.
865 relative to root.
866 n2 should always be relative to root.
866 n2 should always be relative to root.
867 '''
867 '''
868 if not n1:
868 if not n1:
869 return localpath(n2)
869 return localpath(n2)
870 if os.path.isabs(n1):
870 if os.path.isabs(n1):
871 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
871 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
872 return os.path.join(root, localpath(n2))
872 return os.path.join(root, localpath(n2))
873 n2 = '/'.join((pconvert(root), n2))
873 n2 = '/'.join((pconvert(root), n2))
874 a, b = splitpath(n1), n2.split('/')
874 a, b = splitpath(n1), n2.split('/')
875 a.reverse()
875 a.reverse()
876 b.reverse()
876 b.reverse()
877 while a and b and a[-1] == b[-1]:
877 while a and b and a[-1] == b[-1]:
878 a.pop()
878 a.pop()
879 b.pop()
879 b.pop()
880 b.reverse()
880 b.reverse()
881 return os.sep.join((['..'] * len(a)) + b) or '.'
881 return os.sep.join((['..'] * len(a)) + b) or '.'
882
882
883 def mainfrozen():
883 def mainfrozen():
884 """return True if we are a frozen executable.
884 """return True if we are a frozen executable.
885
885
886 The code supports py2exe (most common, Windows only) and tools/freeze
886 The code supports py2exe (most common, Windows only) and tools/freeze
887 (portable, not much used).
887 (portable, not much used).
888 """
888 """
889 return (safehasattr(sys, "frozen") or # new py2exe
889 return (safehasattr(sys, "frozen") or # new py2exe
890 safehasattr(sys, "importers") or # old py2exe
890 safehasattr(sys, "importers") or # old py2exe
891 imp.is_frozen("__main__")) # tools/freeze
891 imp.is_frozen("__main__")) # tools/freeze
892
892
893 # the location of data files matching the source code
893 # the location of data files matching the source code
894 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
894 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
895 # executable version (py2exe) doesn't support __file__
895 # executable version (py2exe) doesn't support __file__
896 datapath = os.path.dirname(sys.executable)
896 datapath = os.path.dirname(sys.executable)
897 else:
897 else:
898 datapath = os.path.dirname(__file__)
898 datapath = os.path.dirname(__file__)
899
899
900 i18n.setdatapath(datapath)
900 i18n.setdatapath(datapath)
901
901
902 _hgexecutable = None
902 _hgexecutable = None
903
903
904 def hgexecutable():
904 def hgexecutable():
905 """return location of the 'hg' executable.
905 """return location of the 'hg' executable.
906
906
907 Defaults to $HG or 'hg' in the search path.
907 Defaults to $HG or 'hg' in the search path.
908 """
908 """
909 if _hgexecutable is None:
909 if _hgexecutable is None:
910 hg = os.environ.get('HG')
910 hg = os.environ.get('HG')
911 mainmod = sys.modules['__main__']
911 mainmod = sys.modules['__main__']
912 if hg:
912 if hg:
913 _sethgexecutable(hg)
913 _sethgexecutable(hg)
914 elif mainfrozen():
914 elif mainfrozen():
915 if getattr(sys, 'frozen', None) == 'macosx_app':
915 if getattr(sys, 'frozen', None) == 'macosx_app':
916 # Env variable set by py2app
916 # Env variable set by py2app
917 _sethgexecutable(os.environ['EXECUTABLEPATH'])
917 _sethgexecutable(os.environ['EXECUTABLEPATH'])
918 else:
918 else:
919 _sethgexecutable(sys.executable)
919 _sethgexecutable(sys.executable)
920 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
920 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
921 _sethgexecutable(mainmod.__file__)
921 _sethgexecutable(mainmod.__file__)
922 else:
922 else:
923 exe = findexe('hg') or os.path.basename(sys.argv[0])
923 exe = findexe('hg') or os.path.basename(sys.argv[0])
924 _sethgexecutable(exe)
924 _sethgexecutable(exe)
925 return _hgexecutable
925 return _hgexecutable
926
926
927 def _sethgexecutable(path):
927 def _sethgexecutable(path):
928 """set location of the 'hg' executable"""
928 """set location of the 'hg' executable"""
929 global _hgexecutable
929 global _hgexecutable
930 _hgexecutable = path
930 _hgexecutable = path
931
931
932 def _isstdout(f):
932 def _isstdout(f):
933 fileno = getattr(f, 'fileno', None)
933 fileno = getattr(f, 'fileno', None)
934 return fileno and fileno() == sys.__stdout__.fileno()
934 return fileno and fileno() == sys.__stdout__.fileno()
935
935
936 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
936 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
937 '''enhanced shell command execution.
937 '''enhanced shell command execution.
938 run with environment maybe modified, maybe in different dir.
938 run with environment maybe modified, maybe in different dir.
939
939
940 if command fails and onerr is None, return status, else raise onerr
940 if command fails and onerr is None, return status, else raise onerr
941 object as exception.
941 object as exception.
942
942
943 if out is specified, it is assumed to be a file-like object that has a
943 if out is specified, it is assumed to be a file-like object that has a
944 write() method. stdout and stderr will be redirected to out.'''
944 write() method. stdout and stderr will be redirected to out.'''
945 if environ is None:
945 if environ is None:
946 environ = {}
946 environ = {}
947 try:
947 try:
948 sys.stdout.flush()
948 sys.stdout.flush()
949 except Exception:
949 except Exception:
950 pass
950 pass
951 def py2shell(val):
951 def py2shell(val):
952 'convert python object into string that is useful to shell'
952 'convert python object into string that is useful to shell'
953 if val is None or val is False:
953 if val is None or val is False:
954 return '0'
954 return '0'
955 if val is True:
955 if val is True:
956 return '1'
956 return '1'
957 return str(val)
957 return str(val)
958 origcmd = cmd
958 origcmd = cmd
959 cmd = quotecommand(cmd)
959 cmd = quotecommand(cmd)
960 if sys.platform == 'plan9' and (sys.version_info[0] == 2
960 if sys.platform == 'plan9' and (sys.version_info[0] == 2
961 and sys.version_info[1] < 7):
961 and sys.version_info[1] < 7):
962 # subprocess kludge to work around issues in half-baked Python
962 # subprocess kludge to work around issues in half-baked Python
963 # ports, notably bichued/python:
963 # ports, notably bichued/python:
964 if not cwd is None:
964 if not cwd is None:
965 os.chdir(cwd)
965 os.chdir(cwd)
966 rc = os.system(cmd)
966 rc = os.system(cmd)
967 else:
967 else:
968 env = dict(os.environ)
968 env = dict(os.environ)
969 env.update((k, py2shell(v)) for k, v in environ.iteritems())
969 env.update((k, py2shell(v)) for k, v in environ.iteritems())
970 env['HG'] = hgexecutable()
970 env['HG'] = hgexecutable()
971 if out is None or _isstdout(out):
971 if out is None or _isstdout(out):
972 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
972 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
973 env=env, cwd=cwd)
973 env=env, cwd=cwd)
974 else:
974 else:
975 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
975 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
976 env=env, cwd=cwd, stdout=subprocess.PIPE,
976 env=env, cwd=cwd, stdout=subprocess.PIPE,
977 stderr=subprocess.STDOUT)
977 stderr=subprocess.STDOUT)
978 while True:
978 while True:
979 line = proc.stdout.readline()
979 line = proc.stdout.readline()
980 if not line:
980 if not line:
981 break
981 break
982 out.write(line)
982 out.write(line)
983 proc.wait()
983 proc.wait()
984 rc = proc.returncode
984 rc = proc.returncode
985 if sys.platform == 'OpenVMS' and rc & 1:
985 if sys.platform == 'OpenVMS' and rc & 1:
986 rc = 0
986 rc = 0
987 if rc and onerr:
987 if rc and onerr:
988 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
988 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
989 explainexit(rc)[0])
989 explainexit(rc)[0])
990 if errprefix:
990 if errprefix:
991 errmsg = '%s: %s' % (errprefix, errmsg)
991 errmsg = '%s: %s' % (errprefix, errmsg)
992 raise onerr(errmsg)
992 raise onerr(errmsg)
993 return rc
993 return rc
994
994
995 def checksignature(func):
995 def checksignature(func):
996 '''wrap a function with code to check for calling errors'''
996 '''wrap a function with code to check for calling errors'''
997 def check(*args, **kwargs):
997 def check(*args, **kwargs):
998 try:
998 try:
999 return func(*args, **kwargs)
999 return func(*args, **kwargs)
1000 except TypeError:
1000 except TypeError:
1001 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1001 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1002 raise error.SignatureError
1002 raise error.SignatureError
1003 raise
1003 raise
1004
1004
1005 return check
1005 return check
1006
1006
1007 def copyfile(src, dest, hardlink=False, copystat=False):
1007 def copyfile(src, dest, hardlink=False, copystat=False):
1008 '''copy a file, preserving mode and optionally other stat info like
1008 '''copy a file, preserving mode and optionally other stat info like
1009 atime/mtime'''
1009 atime/mtime'''
1010 if os.path.lexists(dest):
1010 if os.path.lexists(dest):
1011 unlink(dest)
1011 unlink(dest)
1012 # hardlinks are problematic on CIFS, quietly ignore this flag
1012 # hardlinks are problematic on CIFS, quietly ignore this flag
1013 # until we find a way to work around it cleanly (issue4546)
1013 # until we find a way to work around it cleanly (issue4546)
1014 if False and hardlink:
1014 if False and hardlink:
1015 try:
1015 try:
1016 oslink(src, dest)
1016 oslink(src, dest)
1017 return
1017 return
1018 except (IOError, OSError):
1018 except (IOError, OSError):
1019 pass # fall back to normal copy
1019 pass # fall back to normal copy
1020 if os.path.islink(src):
1020 if os.path.islink(src):
1021 os.symlink(os.readlink(src), dest)
1021 os.symlink(os.readlink(src), dest)
1022 # copytime is ignored for symlinks, but in general copytime isn't needed
1022 # copytime is ignored for symlinks, but in general copytime isn't needed
1023 # for them anyway
1023 # for them anyway
1024 else:
1024 else:
1025 try:
1025 try:
1026 shutil.copyfile(src, dest)
1026 shutil.copyfile(src, dest)
1027 if copystat:
1027 if copystat:
1028 # copystat also copies mode
1028 # copystat also copies mode
1029 shutil.copystat(src, dest)
1029 shutil.copystat(src, dest)
1030 else:
1030 else:
1031 shutil.copymode(src, dest)
1031 shutil.copymode(src, dest)
1032 except shutil.Error as inst:
1032 except shutil.Error as inst:
1033 raise Abort(str(inst))
1033 raise Abort(str(inst))
1034
1034
1035 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1035 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1036 """Copy a directory tree using hardlinks if possible."""
1036 """Copy a directory tree using hardlinks if possible."""
1037 num = 0
1037 num = 0
1038
1038
1039 if hardlink is None:
1039 if hardlink is None:
1040 hardlink = (os.stat(src).st_dev ==
1040 hardlink = (os.stat(src).st_dev ==
1041 os.stat(os.path.dirname(dst)).st_dev)
1041 os.stat(os.path.dirname(dst)).st_dev)
1042 if hardlink:
1042 if hardlink:
1043 topic = _('linking')
1043 topic = _('linking')
1044 else:
1044 else:
1045 topic = _('copying')
1045 topic = _('copying')
1046
1046
1047 if os.path.isdir(src):
1047 if os.path.isdir(src):
1048 os.mkdir(dst)
1048 os.mkdir(dst)
1049 for name, kind in osutil.listdir(src):
1049 for name, kind in osutil.listdir(src):
1050 srcname = os.path.join(src, name)
1050 srcname = os.path.join(src, name)
1051 dstname = os.path.join(dst, name)
1051 dstname = os.path.join(dst, name)
1052 def nprog(t, pos):
1052 def nprog(t, pos):
1053 if pos is not None:
1053 if pos is not None:
1054 return progress(t, pos + num)
1054 return progress(t, pos + num)
1055 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1055 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1056 num += n
1056 num += n
1057 else:
1057 else:
1058 if hardlink:
1058 if hardlink:
1059 try:
1059 try:
1060 oslink(src, dst)
1060 oslink(src, dst)
1061 except (IOError, OSError):
1061 except (IOError, OSError):
1062 hardlink = False
1062 hardlink = False
1063 shutil.copy(src, dst)
1063 shutil.copy(src, dst)
1064 else:
1064 else:
1065 shutil.copy(src, dst)
1065 shutil.copy(src, dst)
1066 num += 1
1066 num += 1
1067 progress(topic, num)
1067 progress(topic, num)
1068 progress(topic, None)
1068 progress(topic, None)
1069
1069
1070 return hardlink, num
1070 return hardlink, num
1071
1071
1072 _winreservednames = '''con prn aux nul
1072 _winreservednames = '''con prn aux nul
1073 com1 com2 com3 com4 com5 com6 com7 com8 com9
1073 com1 com2 com3 com4 com5 com6 com7 com8 com9
1074 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1074 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1075 _winreservedchars = ':*?"<>|'
1075 _winreservedchars = ':*?"<>|'
1076 def checkwinfilename(path):
1076 def checkwinfilename(path):
1077 r'''Check that the base-relative path is a valid filename on Windows.
1077 r'''Check that the base-relative path is a valid filename on Windows.
1078 Returns None if the path is ok, or a UI string describing the problem.
1078 Returns None if the path is ok, or a UI string describing the problem.
1079
1079
1080 >>> checkwinfilename("just/a/normal/path")
1080 >>> checkwinfilename("just/a/normal/path")
1081 >>> checkwinfilename("foo/bar/con.xml")
1081 >>> checkwinfilename("foo/bar/con.xml")
1082 "filename contains 'con', which is reserved on Windows"
1082 "filename contains 'con', which is reserved on Windows"
1083 >>> checkwinfilename("foo/con.xml/bar")
1083 >>> checkwinfilename("foo/con.xml/bar")
1084 "filename contains 'con', which is reserved on Windows"
1084 "filename contains 'con', which is reserved on Windows"
1085 >>> checkwinfilename("foo/bar/xml.con")
1085 >>> checkwinfilename("foo/bar/xml.con")
1086 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1086 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1087 "filename contains 'AUX', which is reserved on Windows"
1087 "filename contains 'AUX', which is reserved on Windows"
1088 >>> checkwinfilename("foo/bar/bla:.txt")
1088 >>> checkwinfilename("foo/bar/bla:.txt")
1089 "filename contains ':', which is reserved on Windows"
1089 "filename contains ':', which is reserved on Windows"
1090 >>> checkwinfilename("foo/bar/b\07la.txt")
1090 >>> checkwinfilename("foo/bar/b\07la.txt")
1091 "filename contains '\\x07', which is invalid on Windows"
1091 "filename contains '\\x07', which is invalid on Windows"
1092 >>> checkwinfilename("foo/bar/bla ")
1092 >>> checkwinfilename("foo/bar/bla ")
1093 "filename ends with ' ', which is not allowed on Windows"
1093 "filename ends with ' ', which is not allowed on Windows"
1094 >>> checkwinfilename("../bar")
1094 >>> checkwinfilename("../bar")
1095 >>> checkwinfilename("foo\\")
1095 >>> checkwinfilename("foo\\")
1096 "filename ends with '\\', which is invalid on Windows"
1096 "filename ends with '\\', which is invalid on Windows"
1097 >>> checkwinfilename("foo\\/bar")
1097 >>> checkwinfilename("foo\\/bar")
1098 "directory name ends with '\\', which is invalid on Windows"
1098 "directory name ends with '\\', which is invalid on Windows"
1099 '''
1099 '''
1100 if path.endswith('\\'):
1100 if path.endswith('\\'):
1101 return _("filename ends with '\\', which is invalid on Windows")
1101 return _("filename ends with '\\', which is invalid on Windows")
1102 if '\\/' in path:
1102 if '\\/' in path:
1103 return _("directory name ends with '\\', which is invalid on Windows")
1103 return _("directory name ends with '\\', which is invalid on Windows")
1104 for n in path.replace('\\', '/').split('/'):
1104 for n in path.replace('\\', '/').split('/'):
1105 if not n:
1105 if not n:
1106 continue
1106 continue
1107 for c in n:
1107 for c in n:
1108 if c in _winreservedchars:
1108 if c in _winreservedchars:
1109 return _("filename contains '%s', which is reserved "
1109 return _("filename contains '%s', which is reserved "
1110 "on Windows") % c
1110 "on Windows") % c
1111 if ord(c) <= 31:
1111 if ord(c) <= 31:
1112 return _("filename contains %r, which is invalid "
1112 return _("filename contains %r, which is invalid "
1113 "on Windows") % c
1113 "on Windows") % c
1114 base = n.split('.')[0]
1114 base = n.split('.')[0]
1115 if base and base.lower() in _winreservednames:
1115 if base and base.lower() in _winreservednames:
1116 return _("filename contains '%s', which is reserved "
1116 return _("filename contains '%s', which is reserved "
1117 "on Windows") % base
1117 "on Windows") % base
1118 t = n[-1]
1118 t = n[-1]
1119 if t in '. ' and n not in '..':
1119 if t in '. ' and n not in '..':
1120 return _("filename ends with '%s', which is not allowed "
1120 return _("filename ends with '%s', which is not allowed "
1121 "on Windows") % t
1121 "on Windows") % t
1122
1122
1123 if os.name == 'nt':
1123 if os.name == 'nt':
1124 checkosfilename = checkwinfilename
1124 checkosfilename = checkwinfilename
1125 else:
1125 else:
1126 checkosfilename = platform.checkosfilename
1126 checkosfilename = platform.checkosfilename
1127
1127
1128 def makelock(info, pathname):
1128 def makelock(info, pathname):
1129 try:
1129 try:
1130 return os.symlink(info, pathname)
1130 return os.symlink(info, pathname)
1131 except OSError as why:
1131 except OSError as why:
1132 if why.errno == errno.EEXIST:
1132 if why.errno == errno.EEXIST:
1133 raise
1133 raise
1134 except AttributeError: # no symlink in os
1134 except AttributeError: # no symlink in os
1135 pass
1135 pass
1136
1136
1137 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1137 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1138 os.write(ld, info)
1138 os.write(ld, info)
1139 os.close(ld)
1139 os.close(ld)
1140
1140
1141 def readlock(pathname):
1141 def readlock(pathname):
1142 try:
1142 try:
1143 return os.readlink(pathname)
1143 return os.readlink(pathname)
1144 except OSError as why:
1144 except OSError as why:
1145 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1145 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1146 raise
1146 raise
1147 except AttributeError: # no symlink in os
1147 except AttributeError: # no symlink in os
1148 pass
1148 pass
1149 fp = posixfile(pathname)
1149 fp = posixfile(pathname)
1150 r = fp.read()
1150 r = fp.read()
1151 fp.close()
1151 fp.close()
1152 return r
1152 return r
1153
1153
1154 def fstat(fp):
1154 def fstat(fp):
1155 '''stat file object that may not have fileno method.'''
1155 '''stat file object that may not have fileno method.'''
1156 try:
1156 try:
1157 return os.fstat(fp.fileno())
1157 return os.fstat(fp.fileno())
1158 except AttributeError:
1158 except AttributeError:
1159 return os.stat(fp.name)
1159 return os.stat(fp.name)
1160
1160
1161 # File system features
1161 # File system features
1162
1162
1163 def checkcase(path):
1163 def checkcase(path):
1164 """
1164 """
1165 Return true if the given path is on a case-sensitive filesystem
1165 Return true if the given path is on a case-sensitive filesystem
1166
1166
1167 Requires a path (like /foo/.hg) ending with a foldable final
1167 Requires a path (like /foo/.hg) ending with a foldable final
1168 directory component.
1168 directory component.
1169 """
1169 """
1170 s1 = os.lstat(path)
1170 s1 = os.lstat(path)
1171 d, b = os.path.split(path)
1171 d, b = os.path.split(path)
1172 b2 = b.upper()
1172 b2 = b.upper()
1173 if b == b2:
1173 if b == b2:
1174 b2 = b.lower()
1174 b2 = b.lower()
1175 if b == b2:
1175 if b == b2:
1176 return True # no evidence against case sensitivity
1176 return True # no evidence against case sensitivity
1177 p2 = os.path.join(d, b2)
1177 p2 = os.path.join(d, b2)
1178 try:
1178 try:
1179 s2 = os.lstat(p2)
1179 s2 = os.lstat(p2)
1180 if s2 == s1:
1180 if s2 == s1:
1181 return False
1181 return False
1182 return True
1182 return True
1183 except OSError:
1183 except OSError:
1184 return True
1184 return True
1185
1185
1186 try:
1186 try:
1187 import re2
1187 import re2
1188 _re2 = None
1188 _re2 = None
1189 except ImportError:
1189 except ImportError:
1190 _re2 = False
1190 _re2 = False
1191
1191
1192 class _re(object):
1192 class _re(object):
1193 def _checkre2(self):
1193 def _checkre2(self):
1194 global _re2
1194 global _re2
1195 try:
1195 try:
1196 # check if match works, see issue3964
1196 # check if match works, see issue3964
1197 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1197 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1198 except ImportError:
1198 except ImportError:
1199 _re2 = False
1199 _re2 = False
1200
1200
1201 def compile(self, pat, flags=0):
1201 def compile(self, pat, flags=0):
1202 '''Compile a regular expression, using re2 if possible
1202 '''Compile a regular expression, using re2 if possible
1203
1203
1204 For best performance, use only re2-compatible regexp features. The
1204 For best performance, use only re2-compatible regexp features. The
1205 only flags from the re module that are re2-compatible are
1205 only flags from the re module that are re2-compatible are
1206 IGNORECASE and MULTILINE.'''
1206 IGNORECASE and MULTILINE.'''
1207 if _re2 is None:
1207 if _re2 is None:
1208 self._checkre2()
1208 self._checkre2()
1209 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1209 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1210 if flags & remod.IGNORECASE:
1210 if flags & remod.IGNORECASE:
1211 pat = '(?i)' + pat
1211 pat = '(?i)' + pat
1212 if flags & remod.MULTILINE:
1212 if flags & remod.MULTILINE:
1213 pat = '(?m)' + pat
1213 pat = '(?m)' + pat
1214 try:
1214 try:
1215 return re2.compile(pat)
1215 return re2.compile(pat)
1216 except re2.error:
1216 except re2.error:
1217 pass
1217 pass
1218 return remod.compile(pat, flags)
1218 return remod.compile(pat, flags)
1219
1219
1220 @propertycache
1220 @propertycache
1221 def escape(self):
1221 def escape(self):
1222 '''Return the version of escape corresponding to self.compile.
1222 '''Return the version of escape corresponding to self.compile.
1223
1223
1224 This is imperfect because whether re2 or re is used for a particular
1224 This is imperfect because whether re2 or re is used for a particular
1225 function depends on the flags, etc, but it's the best we can do.
1225 function depends on the flags, etc, but it's the best we can do.
1226 '''
1226 '''
1227 global _re2
1227 global _re2
1228 if _re2 is None:
1228 if _re2 is None:
1229 self._checkre2()
1229 self._checkre2()
1230 if _re2:
1230 if _re2:
1231 return re2.escape
1231 return re2.escape
1232 else:
1232 else:
1233 return remod.escape
1233 return remod.escape
1234
1234
1235 re = _re()
1235 re = _re()
1236
1236
1237 _fspathcache = {}
1237 _fspathcache = {}
1238 def fspath(name, root):
1238 def fspath(name, root):
1239 '''Get name in the case stored in the filesystem
1239 '''Get name in the case stored in the filesystem
1240
1240
1241 The name should be relative to root, and be normcase-ed for efficiency.
1241 The name should be relative to root, and be normcase-ed for efficiency.
1242
1242
1243 Note that this function is unnecessary, and should not be
1243 Note that this function is unnecessary, and should not be
1244 called, for case-sensitive filesystems (simply because it's expensive).
1244 called, for case-sensitive filesystems (simply because it's expensive).
1245
1245
1246 The root should be normcase-ed, too.
1246 The root should be normcase-ed, too.
1247 '''
1247 '''
1248 def _makefspathcacheentry(dir):
1248 def _makefspathcacheentry(dir):
1249 return dict((normcase(n), n) for n in os.listdir(dir))
1249 return dict((normcase(n), n) for n in os.listdir(dir))
1250
1250
1251 seps = os.sep
1251 seps = os.sep
1252 if os.altsep:
1252 if os.altsep:
1253 seps = seps + os.altsep
1253 seps = seps + os.altsep
1254 # Protect backslashes. This gets silly very quickly.
1254 # Protect backslashes. This gets silly very quickly.
1255 seps.replace('\\','\\\\')
1255 seps.replace('\\','\\\\')
1256 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1256 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1257 dir = os.path.normpath(root)
1257 dir = os.path.normpath(root)
1258 result = []
1258 result = []
1259 for part, sep in pattern.findall(name):
1259 for part, sep in pattern.findall(name):
1260 if sep:
1260 if sep:
1261 result.append(sep)
1261 result.append(sep)
1262 continue
1262 continue
1263
1263
1264 if dir not in _fspathcache:
1264 if dir not in _fspathcache:
1265 _fspathcache[dir] = _makefspathcacheentry(dir)
1265 _fspathcache[dir] = _makefspathcacheentry(dir)
1266 contents = _fspathcache[dir]
1266 contents = _fspathcache[dir]
1267
1267
1268 found = contents.get(part)
1268 found = contents.get(part)
1269 if not found:
1269 if not found:
1270 # retry "once per directory" per "dirstate.walk" which
1270 # retry "once per directory" per "dirstate.walk" which
1271 # may take place for each patches of "hg qpush", for example
1271 # may take place for each patches of "hg qpush", for example
1272 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1272 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1273 found = contents.get(part)
1273 found = contents.get(part)
1274
1274
1275 result.append(found or part)
1275 result.append(found or part)
1276 dir = os.path.join(dir, part)
1276 dir = os.path.join(dir, part)
1277
1277
1278 return ''.join(result)
1278 return ''.join(result)
1279
1279
1280 def checknlink(testfile):
1280 def checknlink(testfile):
1281 '''check whether hardlink count reporting works properly'''
1281 '''check whether hardlink count reporting works properly'''
1282
1282
1283 # testfile may be open, so we need a separate file for checking to
1283 # testfile may be open, so we need a separate file for checking to
1284 # work around issue2543 (or testfile may get lost on Samba shares)
1284 # work around issue2543 (or testfile may get lost on Samba shares)
1285 f1 = testfile + ".hgtmp1"
1285 f1 = testfile + ".hgtmp1"
1286 if os.path.lexists(f1):
1286 if os.path.lexists(f1):
1287 return False
1287 return False
1288 try:
1288 try:
1289 posixfile(f1, 'w').close()
1289 posixfile(f1, 'w').close()
1290 except IOError:
1290 except IOError:
1291 return False
1291 return False
1292
1292
1293 f2 = testfile + ".hgtmp2"
1293 f2 = testfile + ".hgtmp2"
1294 fd = None
1294 fd = None
1295 try:
1295 try:
1296 oslink(f1, f2)
1296 oslink(f1, f2)
1297 # nlinks() may behave differently for files on Windows shares if
1297 # nlinks() may behave differently for files on Windows shares if
1298 # the file is open.
1298 # the file is open.
1299 fd = posixfile(f2)
1299 fd = posixfile(f2)
1300 return nlinks(f2) > 1
1300 return nlinks(f2) > 1
1301 except OSError:
1301 except OSError:
1302 return False
1302 return False
1303 finally:
1303 finally:
1304 if fd is not None:
1304 if fd is not None:
1305 fd.close()
1305 fd.close()
1306 for f in (f1, f2):
1306 for f in (f1, f2):
1307 try:
1307 try:
1308 os.unlink(f)
1308 os.unlink(f)
1309 except OSError:
1309 except OSError:
1310 pass
1310 pass
1311
1311
1312 def endswithsep(path):
1312 def endswithsep(path):
1313 '''Check path ends with os.sep or os.altsep.'''
1313 '''Check path ends with os.sep or os.altsep.'''
1314 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1314 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1315
1315
1316 def splitpath(path):
1316 def splitpath(path):
1317 '''Split path by os.sep.
1317 '''Split path by os.sep.
1318 Note that this function does not use os.altsep because this is
1318 Note that this function does not use os.altsep because this is
1319 an alternative of simple "xxx.split(os.sep)".
1319 an alternative of simple "xxx.split(os.sep)".
1320 It is recommended to use os.path.normpath() before using this
1320 It is recommended to use os.path.normpath() before using this
1321 function if need.'''
1321 function if need.'''
1322 return path.split(os.sep)
1322 return path.split(os.sep)
1323
1323
1324 def gui():
1324 def gui():
1325 '''Are we running in a GUI?'''
1325 '''Are we running in a GUI?'''
1326 if sys.platform == 'darwin':
1326 if sys.platform == 'darwin':
1327 if 'SSH_CONNECTION' in os.environ:
1327 if 'SSH_CONNECTION' in os.environ:
1328 # handle SSH access to a box where the user is logged in
1328 # handle SSH access to a box where the user is logged in
1329 return False
1329 return False
1330 elif getattr(osutil, 'isgui', None):
1330 elif getattr(osutil, 'isgui', None):
1331 # check if a CoreGraphics session is available
1331 # check if a CoreGraphics session is available
1332 return osutil.isgui()
1332 return osutil.isgui()
1333 else:
1333 else:
1334 # pure build; use a safe default
1334 # pure build; use a safe default
1335 return True
1335 return True
1336 else:
1336 else:
1337 return os.name == "nt" or os.environ.get("DISPLAY")
1337 return os.name == "nt" or os.environ.get("DISPLAY")
1338
1338
1339 def mktempcopy(name, emptyok=False, createmode=None):
1339 def mktempcopy(name, emptyok=False, createmode=None):
1340 """Create a temporary file with the same contents from name
1340 """Create a temporary file with the same contents from name
1341
1341
1342 The permission bits are copied from the original file.
1342 The permission bits are copied from the original file.
1343
1343
1344 If the temporary file is going to be truncated immediately, you
1344 If the temporary file is going to be truncated immediately, you
1345 can use emptyok=True as an optimization.
1345 can use emptyok=True as an optimization.
1346
1346
1347 Returns the name of the temporary file.
1347 Returns the name of the temporary file.
1348 """
1348 """
1349 d, fn = os.path.split(name)
1349 d, fn = os.path.split(name)
1350 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1350 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1351 os.close(fd)
1351 os.close(fd)
1352 # Temporary files are created with mode 0600, which is usually not
1352 # Temporary files are created with mode 0600, which is usually not
1353 # what we want. If the original file already exists, just copy
1353 # what we want. If the original file already exists, just copy
1354 # its mode. Otherwise, manually obey umask.
1354 # its mode. Otherwise, manually obey umask.
1355 copymode(name, temp, createmode)
1355 copymode(name, temp, createmode)
1356 if emptyok:
1356 if emptyok:
1357 return temp
1357 return temp
1358 try:
1358 try:
1359 try:
1359 try:
1360 ifp = posixfile(name, "rb")
1360 ifp = posixfile(name, "rb")
1361 except IOError as inst:
1361 except IOError as inst:
1362 if inst.errno == errno.ENOENT:
1362 if inst.errno == errno.ENOENT:
1363 return temp
1363 return temp
1364 if not getattr(inst, 'filename', None):
1364 if not getattr(inst, 'filename', None):
1365 inst.filename = name
1365 inst.filename = name
1366 raise
1366 raise
1367 ofp = posixfile(temp, "wb")
1367 ofp = posixfile(temp, "wb")
1368 for chunk in filechunkiter(ifp):
1368 for chunk in filechunkiter(ifp):
1369 ofp.write(chunk)
1369 ofp.write(chunk)
1370 ifp.close()
1370 ifp.close()
1371 ofp.close()
1371 ofp.close()
1372 except: # re-raises
1372 except: # re-raises
1373 try: os.unlink(temp)
1373 try: os.unlink(temp)
1374 except OSError: pass
1374 except OSError: pass
1375 raise
1375 raise
1376 return temp
1376 return temp
1377
1377
1378 class atomictempfile(object):
1378 class atomictempfile(object):
1379 '''writable file object that atomically updates a file
1379 '''writable file object that atomically updates a file
1380
1380
1381 All writes will go to a temporary copy of the original file. Call
1381 All writes will go to a temporary copy of the original file. Call
1382 close() when you are done writing, and atomictempfile will rename
1382 close() when you are done writing, and atomictempfile will rename
1383 the temporary copy to the original name, making the changes
1383 the temporary copy to the original name, making the changes
1384 visible. If the object is destroyed without being closed, all your
1384 visible. If the object is destroyed without being closed, all your
1385 writes are discarded.
1385 writes are discarded.
1386 '''
1386 '''
1387 def __init__(self, name, mode='w+b', createmode=None):
1387 def __init__(self, name, mode='w+b', createmode=None):
1388 self.__name = name # permanent name
1388 self.__name = name # permanent name
1389 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1389 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1390 createmode=createmode)
1390 createmode=createmode)
1391 self._fp = posixfile(self._tempname, mode)
1391 self._fp = posixfile(self._tempname, mode)
1392
1392
1393 # delegated methods
1393 # delegated methods
1394 self.write = self._fp.write
1394 self.write = self._fp.write
1395 self.seek = self._fp.seek
1395 self.seek = self._fp.seek
1396 self.tell = self._fp.tell
1396 self.tell = self._fp.tell
1397 self.fileno = self._fp.fileno
1397 self.fileno = self._fp.fileno
1398
1398
1399 def close(self):
1399 def close(self):
1400 if not self._fp.closed:
1400 if not self._fp.closed:
1401 self._fp.close()
1401 self._fp.close()
1402 rename(self._tempname, localpath(self.__name))
1402 rename(self._tempname, localpath(self.__name))
1403
1403
1404 def discard(self):
1404 def discard(self):
1405 if not self._fp.closed:
1405 if not self._fp.closed:
1406 try:
1406 try:
1407 os.unlink(self._tempname)
1407 os.unlink(self._tempname)
1408 except OSError:
1408 except OSError:
1409 pass
1409 pass
1410 self._fp.close()
1410 self._fp.close()
1411
1411
1412 def __del__(self):
1412 def __del__(self):
1413 if safehasattr(self, '_fp'): # constructor actually did something
1413 if safehasattr(self, '_fp'): # constructor actually did something
1414 self.discard()
1414 self.discard()
1415
1415
1416 def makedirs(name, mode=None, notindexed=False):
1416 def makedirs(name, mode=None, notindexed=False):
1417 """recursive directory creation with parent mode inheritance"""
1417 """recursive directory creation with parent mode inheritance"""
1418 try:
1418 try:
1419 makedir(name, notindexed)
1419 makedir(name, notindexed)
1420 except OSError as err:
1420 except OSError as err:
1421 if err.errno == errno.EEXIST:
1421 if err.errno == errno.EEXIST:
1422 return
1422 return
1423 if err.errno != errno.ENOENT or not name:
1423 if err.errno != errno.ENOENT or not name:
1424 raise
1424 raise
1425 parent = os.path.dirname(os.path.abspath(name))
1425 parent = os.path.dirname(os.path.abspath(name))
1426 if parent == name:
1426 if parent == name:
1427 raise
1427 raise
1428 makedirs(parent, mode, notindexed)
1428 makedirs(parent, mode, notindexed)
1429 makedir(name, notindexed)
1429 makedir(name, notindexed)
1430 if mode is not None:
1430 if mode is not None:
1431 os.chmod(name, mode)
1431 os.chmod(name, mode)
1432
1432
1433 def ensuredirs(name, mode=None, notindexed=False):
1433 def ensuredirs(name, mode=None, notindexed=False):
1434 """race-safe recursive directory creation
1434 """race-safe recursive directory creation
1435
1435
1436 Newly created directories are marked as "not to be indexed by
1436 Newly created directories are marked as "not to be indexed by
1437 the content indexing service", if ``notindexed`` is specified
1437 the content indexing service", if ``notindexed`` is specified
1438 for "write" mode access.
1438 for "write" mode access.
1439 """
1439 """
1440 if os.path.isdir(name):
1440 if os.path.isdir(name):
1441 return
1441 return
1442 parent = os.path.dirname(os.path.abspath(name))
1442 parent = os.path.dirname(os.path.abspath(name))
1443 if parent != name:
1443 if parent != name:
1444 ensuredirs(parent, mode, notindexed)
1444 ensuredirs(parent, mode, notindexed)
1445 try:
1445 try:
1446 makedir(name, notindexed)
1446 makedir(name, notindexed)
1447 except OSError as err:
1447 except OSError as err:
1448 if err.errno == errno.EEXIST and os.path.isdir(name):
1448 if err.errno == errno.EEXIST and os.path.isdir(name):
1449 # someone else seems to have won a directory creation race
1449 # someone else seems to have won a directory creation race
1450 return
1450 return
1451 raise
1451 raise
1452 if mode is not None:
1452 if mode is not None:
1453 os.chmod(name, mode)
1453 os.chmod(name, mode)
1454
1454
1455 def readfile(path):
1455 def readfile(path):
1456 with open(path, 'rb') as fp:
1456 with open(path, 'rb') as fp:
1457 return fp.read()
1457 return fp.read()
1458
1458
1459 def writefile(path, text):
1459 def writefile(path, text):
1460 with open(path, 'wb') as fp:
1460 with open(path, 'wb') as fp:
1461 fp.write(text)
1461 fp.write(text)
1462
1462
1463 def appendfile(path, text):
1463 def appendfile(path, text):
1464 with open(path, 'ab') as fp:
1464 with open(path, 'ab') as fp:
1465 fp.write(text)
1465 fp.write(text)
1466
1466
1467 class chunkbuffer(object):
1467 class chunkbuffer(object):
1468 """Allow arbitrary sized chunks of data to be efficiently read from an
1468 """Allow arbitrary sized chunks of data to be efficiently read from an
1469 iterator over chunks of arbitrary size."""
1469 iterator over chunks of arbitrary size."""
1470
1470
1471 def __init__(self, in_iter):
1471 def __init__(self, in_iter):
1472 """in_iter is the iterator that's iterating over the input chunks.
1472 """in_iter is the iterator that's iterating over the input chunks.
1473 targetsize is how big a buffer to try to maintain."""
1473 targetsize is how big a buffer to try to maintain."""
1474 def splitbig(chunks):
1474 def splitbig(chunks):
1475 for chunk in chunks:
1475 for chunk in chunks:
1476 if len(chunk) > 2**20:
1476 if len(chunk) > 2**20:
1477 pos = 0
1477 pos = 0
1478 while pos < len(chunk):
1478 while pos < len(chunk):
1479 end = pos + 2 ** 18
1479 end = pos + 2 ** 18
1480 yield chunk[pos:end]
1480 yield chunk[pos:end]
1481 pos = end
1481 pos = end
1482 else:
1482 else:
1483 yield chunk
1483 yield chunk
1484 self.iter = splitbig(in_iter)
1484 self.iter = splitbig(in_iter)
1485 self._queue = collections.deque()
1485 self._queue = collections.deque()
1486 self._chunkoffset = 0
1486 self._chunkoffset = 0
1487
1487
1488 def read(self, l=None):
1488 def read(self, l=None):
1489 """Read L bytes of data from the iterator of chunks of data.
1489 """Read L bytes of data from the iterator of chunks of data.
1490 Returns less than L bytes if the iterator runs dry.
1490 Returns less than L bytes if the iterator runs dry.
1491
1491
1492 If size parameter is omitted, read everything"""
1492 If size parameter is omitted, read everything"""
1493 if l is None:
1493 if l is None:
1494 return ''.join(self.iter)
1494 return ''.join(self.iter)
1495
1495
1496 left = l
1496 left = l
1497 buf = []
1497 buf = []
1498 queue = self._queue
1498 queue = self._queue
1499 while left > 0:
1499 while left > 0:
1500 # refill the queue
1500 # refill the queue
1501 if not queue:
1501 if not queue:
1502 target = 2**18
1502 target = 2**18
1503 for chunk in self.iter:
1503 for chunk in self.iter:
1504 queue.append(chunk)
1504 queue.append(chunk)
1505 target -= len(chunk)
1505 target -= len(chunk)
1506 if target <= 0:
1506 if target <= 0:
1507 break
1507 break
1508 if not queue:
1508 if not queue:
1509 break
1509 break
1510
1510
1511 # The easy way to do this would be to queue.popleft(), modify the
1511 # The easy way to do this would be to queue.popleft(), modify the
1512 # chunk (if necessary), then queue.appendleft(). However, for cases
1512 # chunk (if necessary), then queue.appendleft(). However, for cases
1513 # where we read partial chunk content, this incurs 2 dequeue
1513 # where we read partial chunk content, this incurs 2 dequeue
1514 # mutations and creates a new str for the remaining chunk in the
1514 # mutations and creates a new str for the remaining chunk in the
1515 # queue. Our code below avoids this overhead.
1515 # queue. Our code below avoids this overhead.
1516
1516
1517 chunk = queue[0]
1517 chunk = queue[0]
1518 chunkl = len(chunk)
1518 chunkl = len(chunk)
1519 offset = self._chunkoffset
1519 offset = self._chunkoffset
1520
1520
1521 # Use full chunk.
1521 # Use full chunk.
1522 if offset == 0 and left >= chunkl:
1522 if offset == 0 and left >= chunkl:
1523 left -= chunkl
1523 left -= chunkl
1524 queue.popleft()
1524 queue.popleft()
1525 buf.append(chunk)
1525 buf.append(chunk)
1526 # self._chunkoffset remains at 0.
1526 # self._chunkoffset remains at 0.
1527 continue
1527 continue
1528
1528
1529 chunkremaining = chunkl - offset
1529 chunkremaining = chunkl - offset
1530
1530
1531 # Use all of unconsumed part of chunk.
1531 # Use all of unconsumed part of chunk.
1532 if left >= chunkremaining:
1532 if left >= chunkremaining:
1533 left -= chunkremaining
1533 left -= chunkremaining
1534 queue.popleft()
1534 queue.popleft()
1535 # offset == 0 is enabled by block above, so this won't merely
1535 # offset == 0 is enabled by block above, so this won't merely
1536 # copy via ``chunk[0:]``.
1536 # copy via ``chunk[0:]``.
1537 buf.append(chunk[offset:])
1537 buf.append(chunk[offset:])
1538 self._chunkoffset = 0
1538 self._chunkoffset = 0
1539
1539
1540 # Partial chunk needed.
1540 # Partial chunk needed.
1541 else:
1541 else:
1542 buf.append(chunk[offset:offset + left])
1542 buf.append(chunk[offset:offset + left])
1543 self._chunkoffset += left
1543 self._chunkoffset += left
1544 left -= chunkremaining
1544 left -= chunkremaining
1545
1545
1546 return ''.join(buf)
1546 return ''.join(buf)
1547
1547
1548 def filechunkiter(f, size=65536, limit=None):
1548 def filechunkiter(f, size=65536, limit=None):
1549 """Create a generator that produces the data in the file size
1549 """Create a generator that produces the data in the file size
1550 (default 65536) bytes at a time, up to optional limit (default is
1550 (default 65536) bytes at a time, up to optional limit (default is
1551 to read all data). Chunks may be less than size bytes if the
1551 to read all data). Chunks may be less than size bytes if the
1552 chunk is the last chunk in the file, or the file is a socket or
1552 chunk is the last chunk in the file, or the file is a socket or
1553 some other type of file that sometimes reads less data than is
1553 some other type of file that sometimes reads less data than is
1554 requested."""
1554 requested."""
1555 assert size >= 0
1555 assert size >= 0
1556 assert limit is None or limit >= 0
1556 assert limit is None or limit >= 0
1557 while True:
1557 while True:
1558 if limit is None:
1558 if limit is None:
1559 nbytes = size
1559 nbytes = size
1560 else:
1560 else:
1561 nbytes = min(limit, size)
1561 nbytes = min(limit, size)
1562 s = nbytes and f.read(nbytes)
1562 s = nbytes and f.read(nbytes)
1563 if not s:
1563 if not s:
1564 break
1564 break
1565 if limit:
1565 if limit:
1566 limit -= len(s)
1566 limit -= len(s)
1567 yield s
1567 yield s
1568
1568
1569 def makedate(timestamp=None):
1569 def makedate(timestamp=None):
1570 '''Return a unix timestamp (or the current time) as a (unixtime,
1570 '''Return a unix timestamp (or the current time) as a (unixtime,
1571 offset) tuple based off the local timezone.'''
1571 offset) tuple based off the local timezone.'''
1572 if timestamp is None:
1572 if timestamp is None:
1573 timestamp = time.time()
1573 timestamp = time.time()
1574 if timestamp < 0:
1574 if timestamp < 0:
1575 hint = _("check your clock")
1575 hint = _("check your clock")
1576 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1576 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1577 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1577 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1578 datetime.datetime.fromtimestamp(timestamp))
1578 datetime.datetime.fromtimestamp(timestamp))
1579 tz = delta.days * 86400 + delta.seconds
1579 tz = delta.days * 86400 + delta.seconds
1580 return timestamp, tz
1580 return timestamp, tz
1581
1581
1582 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1582 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1583 """represent a (unixtime, offset) tuple as a localized time.
1583 """represent a (unixtime, offset) tuple as a localized time.
1584 unixtime is seconds since the epoch, and offset is the time zone's
1584 unixtime is seconds since the epoch, and offset is the time zone's
1585 number of seconds away from UTC. if timezone is false, do not
1585 number of seconds away from UTC. if timezone is false, do not
1586 append time zone to string."""
1586 append time zone to string."""
1587 t, tz = date or makedate()
1587 t, tz = date or makedate()
1588 if t < 0:
1589 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1590 tz = 0
1591 if "%1" in format or "%2" in format or "%z" in format:
1588 if "%1" in format or "%2" in format or "%z" in format:
1592 sign = (tz > 0) and "-" or "+"
1589 sign = (tz > 0) and "-" or "+"
1593 minutes = abs(tz) // 60
1590 minutes = abs(tz) // 60
1594 q, r = divmod(minutes, 60)
1591 q, r = divmod(minutes, 60)
1595 format = format.replace("%z", "%1%2")
1592 format = format.replace("%z", "%1%2")
1596 format = format.replace("%1", "%c%02d" % (sign, q))
1593 format = format.replace("%1", "%c%02d" % (sign, q))
1597 format = format.replace("%2", "%02d" % r)
1594 format = format.replace("%2", "%02d" % r)
1598 try:
1595 d = t - tz
1599 t = time.gmtime(float(t) - tz)
1596 if d > 0x7fffffff:
1600 except ValueError:
1597 d = 0x7fffffff
1601 # time was out of range
1598 elif d < -0x7fffffff:
1602 t = time.gmtime(sys.maxint)
1599 d = -0x7fffffff
1603 s = time.strftime(format, t)
1600 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1601 # because they use the gmtime() system call which is buggy on Windows
1602 # for negative values.
1603 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1604 s = t.strftime(format)
1604 return s
1605 return s
1605
1606
1606 def shortdate(date=None):
1607 def shortdate(date=None):
1607 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1608 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1608 return datestr(date, format='%Y-%m-%d')
1609 return datestr(date, format='%Y-%m-%d')
1609
1610
1610 def parsetimezone(tz):
1611 def parsetimezone(tz):
1611 """parse a timezone string and return an offset integer"""
1612 """parse a timezone string and return an offset integer"""
1612 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1613 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1613 sign = (tz[0] == "+") and 1 or -1
1614 sign = (tz[0] == "+") and 1 or -1
1614 hours = int(tz[1:3])
1615 hours = int(tz[1:3])
1615 minutes = int(tz[3:5])
1616 minutes = int(tz[3:5])
1616 return -sign * (hours * 60 + minutes) * 60
1617 return -sign * (hours * 60 + minutes) * 60
1617 if tz == "GMT" or tz == "UTC":
1618 if tz == "GMT" or tz == "UTC":
1618 return 0
1619 return 0
1619 return None
1620 return None
1620
1621
1621 def strdate(string, format, defaults=[]):
1622 def strdate(string, format, defaults=[]):
1622 """parse a localized time string and return a (unixtime, offset) tuple.
1623 """parse a localized time string and return a (unixtime, offset) tuple.
1623 if the string cannot be parsed, ValueError is raised."""
1624 if the string cannot be parsed, ValueError is raised."""
1624 # NOTE: unixtime = localunixtime + offset
1625 # NOTE: unixtime = localunixtime + offset
1625 offset, date = parsetimezone(string.split()[-1]), string
1626 offset, date = parsetimezone(string.split()[-1]), string
1626 if offset is not None:
1627 if offset is not None:
1627 date = " ".join(string.split()[:-1])
1628 date = " ".join(string.split()[:-1])
1628
1629
1629 # add missing elements from defaults
1630 # add missing elements from defaults
1630 usenow = False # default to using biased defaults
1631 usenow = False # default to using biased defaults
1631 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1632 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1632 found = [True for p in part if ("%"+p) in format]
1633 found = [True for p in part if ("%"+p) in format]
1633 if not found:
1634 if not found:
1634 date += "@" + defaults[part][usenow]
1635 date += "@" + defaults[part][usenow]
1635 format += "@%" + part[0]
1636 format += "@%" + part[0]
1636 else:
1637 else:
1637 # We've found a specific time element, less specific time
1638 # We've found a specific time element, less specific time
1638 # elements are relative to today
1639 # elements are relative to today
1639 usenow = True
1640 usenow = True
1640
1641
1641 timetuple = time.strptime(date, format)
1642 timetuple = time.strptime(date, format)
1642 localunixtime = int(calendar.timegm(timetuple))
1643 localunixtime = int(calendar.timegm(timetuple))
1643 if offset is None:
1644 if offset is None:
1644 # local timezone
1645 # local timezone
1645 unixtime = int(time.mktime(timetuple))
1646 unixtime = int(time.mktime(timetuple))
1646 offset = unixtime - localunixtime
1647 offset = unixtime - localunixtime
1647 else:
1648 else:
1648 unixtime = localunixtime + offset
1649 unixtime = localunixtime + offset
1649 return unixtime, offset
1650 return unixtime, offset
1650
1651
1651 def parsedate(date, formats=None, bias=None):
1652 def parsedate(date, formats=None, bias=None):
1652 """parse a localized date/time and return a (unixtime, offset) tuple.
1653 """parse a localized date/time and return a (unixtime, offset) tuple.
1653
1654
1654 The date may be a "unixtime offset" string or in one of the specified
1655 The date may be a "unixtime offset" string or in one of the specified
1655 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1656 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1656
1657
1657 >>> parsedate(' today ') == parsedate(\
1658 >>> parsedate(' today ') == parsedate(\
1658 datetime.date.today().strftime('%b %d'))
1659 datetime.date.today().strftime('%b %d'))
1659 True
1660 True
1660 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1661 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1661 datetime.timedelta(days=1)\
1662 datetime.timedelta(days=1)\
1662 ).strftime('%b %d'))
1663 ).strftime('%b %d'))
1663 True
1664 True
1664 >>> now, tz = makedate()
1665 >>> now, tz = makedate()
1665 >>> strnow, strtz = parsedate('now')
1666 >>> strnow, strtz = parsedate('now')
1666 >>> (strnow - now) < 1
1667 >>> (strnow - now) < 1
1667 True
1668 True
1668 >>> tz == strtz
1669 >>> tz == strtz
1669 True
1670 True
1670 """
1671 """
1671 if bias is None:
1672 if bias is None:
1672 bias = {}
1673 bias = {}
1673 if not date:
1674 if not date:
1674 return 0, 0
1675 return 0, 0
1675 if isinstance(date, tuple) and len(date) == 2:
1676 if isinstance(date, tuple) and len(date) == 2:
1676 return date
1677 return date
1677 if not formats:
1678 if not formats:
1678 formats = defaultdateformats
1679 formats = defaultdateformats
1679 date = date.strip()
1680 date = date.strip()
1680
1681
1681 if date == 'now' or date == _('now'):
1682 if date == 'now' or date == _('now'):
1682 return makedate()
1683 return makedate()
1683 if date == 'today' or date == _('today'):
1684 if date == 'today' or date == _('today'):
1684 date = datetime.date.today().strftime('%b %d')
1685 date = datetime.date.today().strftime('%b %d')
1685 elif date == 'yesterday' or date == _('yesterday'):
1686 elif date == 'yesterday' or date == _('yesterday'):
1686 date = (datetime.date.today() -
1687 date = (datetime.date.today() -
1687 datetime.timedelta(days=1)).strftime('%b %d')
1688 datetime.timedelta(days=1)).strftime('%b %d')
1688
1689
1689 try:
1690 try:
1690 when, offset = map(int, date.split(' '))
1691 when, offset = map(int, date.split(' '))
1691 except ValueError:
1692 except ValueError:
1692 # fill out defaults
1693 # fill out defaults
1693 now = makedate()
1694 now = makedate()
1694 defaults = {}
1695 defaults = {}
1695 for part in ("d", "mb", "yY", "HI", "M", "S"):
1696 for part in ("d", "mb", "yY", "HI", "M", "S"):
1696 # this piece is for rounding the specific end of unknowns
1697 # this piece is for rounding the specific end of unknowns
1697 b = bias.get(part)
1698 b = bias.get(part)
1698 if b is None:
1699 if b is None:
1699 if part[0] in "HMS":
1700 if part[0] in "HMS":
1700 b = "00"
1701 b = "00"
1701 else:
1702 else:
1702 b = "0"
1703 b = "0"
1703
1704
1704 # this piece is for matching the generic end to today's date
1705 # this piece is for matching the generic end to today's date
1705 n = datestr(now, "%" + part[0])
1706 n = datestr(now, "%" + part[0])
1706
1707
1707 defaults[part] = (b, n)
1708 defaults[part] = (b, n)
1708
1709
1709 for format in formats:
1710 for format in formats:
1710 try:
1711 try:
1711 when, offset = strdate(date, format, defaults)
1712 when, offset = strdate(date, format, defaults)
1712 except (ValueError, OverflowError):
1713 except (ValueError, OverflowError):
1713 pass
1714 pass
1714 else:
1715 else:
1715 break
1716 break
1716 else:
1717 else:
1717 raise Abort(_('invalid date: %r') % date)
1718 raise Abort(_('invalid date: %r') % date)
1718 # validate explicit (probably user-specified) date and
1719 # validate explicit (probably user-specified) date and
1719 # time zone offset. values must fit in signed 32 bits for
1720 # time zone offset. values must fit in signed 32 bits for
1720 # current 32-bit linux runtimes. timezones go from UTC-12
1721 # current 32-bit linux runtimes. timezones go from UTC-12
1721 # to UTC+14
1722 # to UTC+14
1722 if abs(when) > 0x7fffffff:
1723 if abs(when) > 0x7fffffff:
1723 raise Abort(_('date exceeds 32 bits: %d') % when)
1724 raise Abort(_('date exceeds 32 bits: %d') % when)
1724 if when < 0:
1725 raise Abort(_('negative date value: %d') % when)
1726 if offset < -50400 or offset > 43200:
1725 if offset < -50400 or offset > 43200:
1727 raise Abort(_('impossible time zone offset: %d') % offset)
1726 raise Abort(_('impossible time zone offset: %d') % offset)
1728 return when, offset
1727 return when, offset
1729
1728
1730 def matchdate(date):
1729 def matchdate(date):
1731 """Return a function that matches a given date match specifier
1730 """Return a function that matches a given date match specifier
1732
1731
1733 Formats include:
1732 Formats include:
1734
1733
1735 '{date}' match a given date to the accuracy provided
1734 '{date}' match a given date to the accuracy provided
1736
1735
1737 '<{date}' on or before a given date
1736 '<{date}' on or before a given date
1738
1737
1739 '>{date}' on or after a given date
1738 '>{date}' on or after a given date
1740
1739
1741 >>> p1 = parsedate("10:29:59")
1740 >>> p1 = parsedate("10:29:59")
1742 >>> p2 = parsedate("10:30:00")
1741 >>> p2 = parsedate("10:30:00")
1743 >>> p3 = parsedate("10:30:59")
1742 >>> p3 = parsedate("10:30:59")
1744 >>> p4 = parsedate("10:31:00")
1743 >>> p4 = parsedate("10:31:00")
1745 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1744 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1746 >>> f = matchdate("10:30")
1745 >>> f = matchdate("10:30")
1747 >>> f(p1[0])
1746 >>> f(p1[0])
1748 False
1747 False
1749 >>> f(p2[0])
1748 >>> f(p2[0])
1750 True
1749 True
1751 >>> f(p3[0])
1750 >>> f(p3[0])
1752 True
1751 True
1753 >>> f(p4[0])
1752 >>> f(p4[0])
1754 False
1753 False
1755 >>> f(p5[0])
1754 >>> f(p5[0])
1756 False
1755 False
1757 """
1756 """
1758
1757
1759 def lower(date):
1758 def lower(date):
1760 d = {'mb': "1", 'd': "1"}
1759 d = {'mb': "1", 'd': "1"}
1761 return parsedate(date, extendeddateformats, d)[0]
1760 return parsedate(date, extendeddateformats, d)[0]
1762
1761
1763 def upper(date):
1762 def upper(date):
1764 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1763 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1765 for days in ("31", "30", "29"):
1764 for days in ("31", "30", "29"):
1766 try:
1765 try:
1767 d["d"] = days
1766 d["d"] = days
1768 return parsedate(date, extendeddateformats, d)[0]
1767 return parsedate(date, extendeddateformats, d)[0]
1769 except Abort:
1768 except Abort:
1770 pass
1769 pass
1771 d["d"] = "28"
1770 d["d"] = "28"
1772 return parsedate(date, extendeddateformats, d)[0]
1771 return parsedate(date, extendeddateformats, d)[0]
1773
1772
1774 date = date.strip()
1773 date = date.strip()
1775
1774
1776 if not date:
1775 if not date:
1777 raise Abort(_("dates cannot consist entirely of whitespace"))
1776 raise Abort(_("dates cannot consist entirely of whitespace"))
1778 elif date[0] == "<":
1777 elif date[0] == "<":
1779 if not date[1:]:
1778 if not date[1:]:
1780 raise Abort(_("invalid day spec, use '<DATE'"))
1779 raise Abort(_("invalid day spec, use '<DATE'"))
1781 when = upper(date[1:])
1780 when = upper(date[1:])
1782 return lambda x: x <= when
1781 return lambda x: x <= when
1783 elif date[0] == ">":
1782 elif date[0] == ">":
1784 if not date[1:]:
1783 if not date[1:]:
1785 raise Abort(_("invalid day spec, use '>DATE'"))
1784 raise Abort(_("invalid day spec, use '>DATE'"))
1786 when = lower(date[1:])
1785 when = lower(date[1:])
1787 return lambda x: x >= when
1786 return lambda x: x >= when
1788 elif date[0] == "-":
1787 elif date[0] == "-":
1789 try:
1788 try:
1790 days = int(date[1:])
1789 days = int(date[1:])
1791 except ValueError:
1790 except ValueError:
1792 raise Abort(_("invalid day spec: %s") % date[1:])
1791 raise Abort(_("invalid day spec: %s") % date[1:])
1793 if days < 0:
1792 if days < 0:
1794 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1793 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1795 % date[1:])
1794 % date[1:])
1796 when = makedate()[0] - days * 3600 * 24
1795 when = makedate()[0] - days * 3600 * 24
1797 return lambda x: x >= when
1796 return lambda x: x >= when
1798 elif " to " in date:
1797 elif " to " in date:
1799 a, b = date.split(" to ")
1798 a, b = date.split(" to ")
1800 start, stop = lower(a), upper(b)
1799 start, stop = lower(a), upper(b)
1801 return lambda x: x >= start and x <= stop
1800 return lambda x: x >= start and x <= stop
1802 else:
1801 else:
1803 start, stop = lower(date), upper(date)
1802 start, stop = lower(date), upper(date)
1804 return lambda x: x >= start and x <= stop
1803 return lambda x: x >= start and x <= stop
1805
1804
1806 def stringmatcher(pattern):
1805 def stringmatcher(pattern):
1807 """
1806 """
1808 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1807 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1809 returns the matcher name, pattern, and matcher function.
1808 returns the matcher name, pattern, and matcher function.
1810 missing or unknown prefixes are treated as literal matches.
1809 missing or unknown prefixes are treated as literal matches.
1811
1810
1812 helper for tests:
1811 helper for tests:
1813 >>> def test(pattern, *tests):
1812 >>> def test(pattern, *tests):
1814 ... kind, pattern, matcher = stringmatcher(pattern)
1813 ... kind, pattern, matcher = stringmatcher(pattern)
1815 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1814 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1816
1815
1817 exact matching (no prefix):
1816 exact matching (no prefix):
1818 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1817 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1819 ('literal', 'abcdefg', [False, False, True])
1818 ('literal', 'abcdefg', [False, False, True])
1820
1819
1821 regex matching ('re:' prefix)
1820 regex matching ('re:' prefix)
1822 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1821 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1823 ('re', 'a.+b', [False, False, True])
1822 ('re', 'a.+b', [False, False, True])
1824
1823
1825 force exact matches ('literal:' prefix)
1824 force exact matches ('literal:' prefix)
1826 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1825 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1827 ('literal', 're:foobar', [False, True])
1826 ('literal', 're:foobar', [False, True])
1828
1827
1829 unknown prefixes are ignored and treated as literals
1828 unknown prefixes are ignored and treated as literals
1830 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1829 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1831 ('literal', 'foo:bar', [False, False, True])
1830 ('literal', 'foo:bar', [False, False, True])
1832 """
1831 """
1833 if pattern.startswith('re:'):
1832 if pattern.startswith('re:'):
1834 pattern = pattern[3:]
1833 pattern = pattern[3:]
1835 try:
1834 try:
1836 regex = remod.compile(pattern)
1835 regex = remod.compile(pattern)
1837 except remod.error as e:
1836 except remod.error as e:
1838 raise error.ParseError(_('invalid regular expression: %s')
1837 raise error.ParseError(_('invalid regular expression: %s')
1839 % e)
1838 % e)
1840 return 're', pattern, regex.search
1839 return 're', pattern, regex.search
1841 elif pattern.startswith('literal:'):
1840 elif pattern.startswith('literal:'):
1842 pattern = pattern[8:]
1841 pattern = pattern[8:]
1843 return 'literal', pattern, pattern.__eq__
1842 return 'literal', pattern, pattern.__eq__
1844
1843
1845 def shortuser(user):
1844 def shortuser(user):
1846 """Return a short representation of a user name or email address."""
1845 """Return a short representation of a user name or email address."""
1847 f = user.find('@')
1846 f = user.find('@')
1848 if f >= 0:
1847 if f >= 0:
1849 user = user[:f]
1848 user = user[:f]
1850 f = user.find('<')
1849 f = user.find('<')
1851 if f >= 0:
1850 if f >= 0:
1852 user = user[f + 1:]
1851 user = user[f + 1:]
1853 f = user.find(' ')
1852 f = user.find(' ')
1854 if f >= 0:
1853 if f >= 0:
1855 user = user[:f]
1854 user = user[:f]
1856 f = user.find('.')
1855 f = user.find('.')
1857 if f >= 0:
1856 if f >= 0:
1858 user = user[:f]
1857 user = user[:f]
1859 return user
1858 return user
1860
1859
1861 def emailuser(user):
1860 def emailuser(user):
1862 """Return the user portion of an email address."""
1861 """Return the user portion of an email address."""
1863 f = user.find('@')
1862 f = user.find('@')
1864 if f >= 0:
1863 if f >= 0:
1865 user = user[:f]
1864 user = user[:f]
1866 f = user.find('<')
1865 f = user.find('<')
1867 if f >= 0:
1866 if f >= 0:
1868 user = user[f + 1:]
1867 user = user[f + 1:]
1869 return user
1868 return user
1870
1869
1871 def email(author):
1870 def email(author):
1872 '''get email of author.'''
1871 '''get email of author.'''
1873 r = author.find('>')
1872 r = author.find('>')
1874 if r == -1:
1873 if r == -1:
1875 r = None
1874 r = None
1876 return author[author.find('<') + 1:r]
1875 return author[author.find('<') + 1:r]
1877
1876
1878 def ellipsis(text, maxlength=400):
1877 def ellipsis(text, maxlength=400):
1879 """Trim string to at most maxlength (default: 400) columns in display."""
1878 """Trim string to at most maxlength (default: 400) columns in display."""
1880 return encoding.trim(text, maxlength, ellipsis='...')
1879 return encoding.trim(text, maxlength, ellipsis='...')
1881
1880
1882 def unitcountfn(*unittable):
1881 def unitcountfn(*unittable):
1883 '''return a function that renders a readable count of some quantity'''
1882 '''return a function that renders a readable count of some quantity'''
1884
1883
1885 def go(count):
1884 def go(count):
1886 for multiplier, divisor, format in unittable:
1885 for multiplier, divisor, format in unittable:
1887 if count >= divisor * multiplier:
1886 if count >= divisor * multiplier:
1888 return format % (count / float(divisor))
1887 return format % (count / float(divisor))
1889 return unittable[-1][2] % count
1888 return unittable[-1][2] % count
1890
1889
1891 return go
1890 return go
1892
1891
1893 bytecount = unitcountfn(
1892 bytecount = unitcountfn(
1894 (100, 1 << 30, _('%.0f GB')),
1893 (100, 1 << 30, _('%.0f GB')),
1895 (10, 1 << 30, _('%.1f GB')),
1894 (10, 1 << 30, _('%.1f GB')),
1896 (1, 1 << 30, _('%.2f GB')),
1895 (1, 1 << 30, _('%.2f GB')),
1897 (100, 1 << 20, _('%.0f MB')),
1896 (100, 1 << 20, _('%.0f MB')),
1898 (10, 1 << 20, _('%.1f MB')),
1897 (10, 1 << 20, _('%.1f MB')),
1899 (1, 1 << 20, _('%.2f MB')),
1898 (1, 1 << 20, _('%.2f MB')),
1900 (100, 1 << 10, _('%.0f KB')),
1899 (100, 1 << 10, _('%.0f KB')),
1901 (10, 1 << 10, _('%.1f KB')),
1900 (10, 1 << 10, _('%.1f KB')),
1902 (1, 1 << 10, _('%.2f KB')),
1901 (1, 1 << 10, _('%.2f KB')),
1903 (1, 1, _('%.0f bytes')),
1902 (1, 1, _('%.0f bytes')),
1904 )
1903 )
1905
1904
1906 def uirepr(s):
1905 def uirepr(s):
1907 # Avoid double backslash in Windows path repr()
1906 # Avoid double backslash in Windows path repr()
1908 return repr(s).replace('\\\\', '\\')
1907 return repr(s).replace('\\\\', '\\')
1909
1908
1910 # delay import of textwrap
1909 # delay import of textwrap
1911 def MBTextWrapper(**kwargs):
1910 def MBTextWrapper(**kwargs):
1912 class tw(textwrap.TextWrapper):
1911 class tw(textwrap.TextWrapper):
1913 """
1912 """
1914 Extend TextWrapper for width-awareness.
1913 Extend TextWrapper for width-awareness.
1915
1914
1916 Neither number of 'bytes' in any encoding nor 'characters' is
1915 Neither number of 'bytes' in any encoding nor 'characters' is
1917 appropriate to calculate terminal columns for specified string.
1916 appropriate to calculate terminal columns for specified string.
1918
1917
1919 Original TextWrapper implementation uses built-in 'len()' directly,
1918 Original TextWrapper implementation uses built-in 'len()' directly,
1920 so overriding is needed to use width information of each characters.
1919 so overriding is needed to use width information of each characters.
1921
1920
1922 In addition, characters classified into 'ambiguous' width are
1921 In addition, characters classified into 'ambiguous' width are
1923 treated as wide in East Asian area, but as narrow in other.
1922 treated as wide in East Asian area, but as narrow in other.
1924
1923
1925 This requires use decision to determine width of such characters.
1924 This requires use decision to determine width of such characters.
1926 """
1925 """
1927 def _cutdown(self, ucstr, space_left):
1926 def _cutdown(self, ucstr, space_left):
1928 l = 0
1927 l = 0
1929 colwidth = encoding.ucolwidth
1928 colwidth = encoding.ucolwidth
1930 for i in xrange(len(ucstr)):
1929 for i in xrange(len(ucstr)):
1931 l += colwidth(ucstr[i])
1930 l += colwidth(ucstr[i])
1932 if space_left < l:
1931 if space_left < l:
1933 return (ucstr[:i], ucstr[i:])
1932 return (ucstr[:i], ucstr[i:])
1934 return ucstr, ''
1933 return ucstr, ''
1935
1934
1936 # overriding of base class
1935 # overriding of base class
1937 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1936 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1938 space_left = max(width - cur_len, 1)
1937 space_left = max(width - cur_len, 1)
1939
1938
1940 if self.break_long_words:
1939 if self.break_long_words:
1941 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1940 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1942 cur_line.append(cut)
1941 cur_line.append(cut)
1943 reversed_chunks[-1] = res
1942 reversed_chunks[-1] = res
1944 elif not cur_line:
1943 elif not cur_line:
1945 cur_line.append(reversed_chunks.pop())
1944 cur_line.append(reversed_chunks.pop())
1946
1945
1947 # this overriding code is imported from TextWrapper of Python 2.6
1946 # this overriding code is imported from TextWrapper of Python 2.6
1948 # to calculate columns of string by 'encoding.ucolwidth()'
1947 # to calculate columns of string by 'encoding.ucolwidth()'
1949 def _wrap_chunks(self, chunks):
1948 def _wrap_chunks(self, chunks):
1950 colwidth = encoding.ucolwidth
1949 colwidth = encoding.ucolwidth
1951
1950
1952 lines = []
1951 lines = []
1953 if self.width <= 0:
1952 if self.width <= 0:
1954 raise ValueError("invalid width %r (must be > 0)" % self.width)
1953 raise ValueError("invalid width %r (must be > 0)" % self.width)
1955
1954
1956 # Arrange in reverse order so items can be efficiently popped
1955 # Arrange in reverse order so items can be efficiently popped
1957 # from a stack of chucks.
1956 # from a stack of chucks.
1958 chunks.reverse()
1957 chunks.reverse()
1959
1958
1960 while chunks:
1959 while chunks:
1961
1960
1962 # Start the list of chunks that will make up the current line.
1961 # Start the list of chunks that will make up the current line.
1963 # cur_len is just the length of all the chunks in cur_line.
1962 # cur_len is just the length of all the chunks in cur_line.
1964 cur_line = []
1963 cur_line = []
1965 cur_len = 0
1964 cur_len = 0
1966
1965
1967 # Figure out which static string will prefix this line.
1966 # Figure out which static string will prefix this line.
1968 if lines:
1967 if lines:
1969 indent = self.subsequent_indent
1968 indent = self.subsequent_indent
1970 else:
1969 else:
1971 indent = self.initial_indent
1970 indent = self.initial_indent
1972
1971
1973 # Maximum width for this line.
1972 # Maximum width for this line.
1974 width = self.width - len(indent)
1973 width = self.width - len(indent)
1975
1974
1976 # First chunk on line is whitespace -- drop it, unless this
1975 # First chunk on line is whitespace -- drop it, unless this
1977 # is the very beginning of the text (i.e. no lines started yet).
1976 # is the very beginning of the text (i.e. no lines started yet).
1978 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1977 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1979 del chunks[-1]
1978 del chunks[-1]
1980
1979
1981 while chunks:
1980 while chunks:
1982 l = colwidth(chunks[-1])
1981 l = colwidth(chunks[-1])
1983
1982
1984 # Can at least squeeze this chunk onto the current line.
1983 # Can at least squeeze this chunk onto the current line.
1985 if cur_len + l <= width:
1984 if cur_len + l <= width:
1986 cur_line.append(chunks.pop())
1985 cur_line.append(chunks.pop())
1987 cur_len += l
1986 cur_len += l
1988
1987
1989 # Nope, this line is full.
1988 # Nope, this line is full.
1990 else:
1989 else:
1991 break
1990 break
1992
1991
1993 # The current line is full, and the next chunk is too big to
1992 # The current line is full, and the next chunk is too big to
1994 # fit on *any* line (not just this one).
1993 # fit on *any* line (not just this one).
1995 if chunks and colwidth(chunks[-1]) > width:
1994 if chunks and colwidth(chunks[-1]) > width:
1996 self._handle_long_word(chunks, cur_line, cur_len, width)
1995 self._handle_long_word(chunks, cur_line, cur_len, width)
1997
1996
1998 # If the last chunk on this line is all whitespace, drop it.
1997 # If the last chunk on this line is all whitespace, drop it.
1999 if (self.drop_whitespace and
1998 if (self.drop_whitespace and
2000 cur_line and cur_line[-1].strip() == ''):
1999 cur_line and cur_line[-1].strip() == ''):
2001 del cur_line[-1]
2000 del cur_line[-1]
2002
2001
2003 # Convert current line back to a string and store it in list
2002 # Convert current line back to a string and store it in list
2004 # of all lines (return value).
2003 # of all lines (return value).
2005 if cur_line:
2004 if cur_line:
2006 lines.append(indent + ''.join(cur_line))
2005 lines.append(indent + ''.join(cur_line))
2007
2006
2008 return lines
2007 return lines
2009
2008
2010 global MBTextWrapper
2009 global MBTextWrapper
2011 MBTextWrapper = tw
2010 MBTextWrapper = tw
2012 return tw(**kwargs)
2011 return tw(**kwargs)
2013
2012
2014 def wrap(line, width, initindent='', hangindent=''):
2013 def wrap(line, width, initindent='', hangindent=''):
2015 maxindent = max(len(hangindent), len(initindent))
2014 maxindent = max(len(hangindent), len(initindent))
2016 if width <= maxindent:
2015 if width <= maxindent:
2017 # adjust for weird terminal size
2016 # adjust for weird terminal size
2018 width = max(78, maxindent + 1)
2017 width = max(78, maxindent + 1)
2019 line = line.decode(encoding.encoding, encoding.encodingmode)
2018 line = line.decode(encoding.encoding, encoding.encodingmode)
2020 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2019 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2021 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2020 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2022 wrapper = MBTextWrapper(width=width,
2021 wrapper = MBTextWrapper(width=width,
2023 initial_indent=initindent,
2022 initial_indent=initindent,
2024 subsequent_indent=hangindent)
2023 subsequent_indent=hangindent)
2025 return wrapper.fill(line).encode(encoding.encoding)
2024 return wrapper.fill(line).encode(encoding.encoding)
2026
2025
2027 def iterlines(iterator):
2026 def iterlines(iterator):
2028 for chunk in iterator:
2027 for chunk in iterator:
2029 for line in chunk.splitlines():
2028 for line in chunk.splitlines():
2030 yield line
2029 yield line
2031
2030
2032 def expandpath(path):
2031 def expandpath(path):
2033 return os.path.expanduser(os.path.expandvars(path))
2032 return os.path.expanduser(os.path.expandvars(path))
2034
2033
2035 def hgcmd():
2034 def hgcmd():
2036 """Return the command used to execute current hg
2035 """Return the command used to execute current hg
2037
2036
2038 This is different from hgexecutable() because on Windows we want
2037 This is different from hgexecutable() because on Windows we want
2039 to avoid things opening new shell windows like batch files, so we
2038 to avoid things opening new shell windows like batch files, so we
2040 get either the python call or current executable.
2039 get either the python call or current executable.
2041 """
2040 """
2042 if mainfrozen():
2041 if mainfrozen():
2043 if getattr(sys, 'frozen', None) == 'macosx_app':
2042 if getattr(sys, 'frozen', None) == 'macosx_app':
2044 # Env variable set by py2app
2043 # Env variable set by py2app
2045 return [os.environ['EXECUTABLEPATH']]
2044 return [os.environ['EXECUTABLEPATH']]
2046 else:
2045 else:
2047 return [sys.executable]
2046 return [sys.executable]
2048 return gethgcmd()
2047 return gethgcmd()
2049
2048
2050 def rundetached(args, condfn):
2049 def rundetached(args, condfn):
2051 """Execute the argument list in a detached process.
2050 """Execute the argument list in a detached process.
2052
2051
2053 condfn is a callable which is called repeatedly and should return
2052 condfn is a callable which is called repeatedly and should return
2054 True once the child process is known to have started successfully.
2053 True once the child process is known to have started successfully.
2055 At this point, the child process PID is returned. If the child
2054 At this point, the child process PID is returned. If the child
2056 process fails to start or finishes before condfn() evaluates to
2055 process fails to start or finishes before condfn() evaluates to
2057 True, return -1.
2056 True, return -1.
2058 """
2057 """
2059 # Windows case is easier because the child process is either
2058 # Windows case is easier because the child process is either
2060 # successfully starting and validating the condition or exiting
2059 # successfully starting and validating the condition or exiting
2061 # on failure. We just poll on its PID. On Unix, if the child
2060 # on failure. We just poll on its PID. On Unix, if the child
2062 # process fails to start, it will be left in a zombie state until
2061 # process fails to start, it will be left in a zombie state until
2063 # the parent wait on it, which we cannot do since we expect a long
2062 # the parent wait on it, which we cannot do since we expect a long
2064 # running process on success. Instead we listen for SIGCHLD telling
2063 # running process on success. Instead we listen for SIGCHLD telling
2065 # us our child process terminated.
2064 # us our child process terminated.
2066 terminated = set()
2065 terminated = set()
2067 def handler(signum, frame):
2066 def handler(signum, frame):
2068 terminated.add(os.wait())
2067 terminated.add(os.wait())
2069 prevhandler = None
2068 prevhandler = None
2070 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2069 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2071 if SIGCHLD is not None:
2070 if SIGCHLD is not None:
2072 prevhandler = signal.signal(SIGCHLD, handler)
2071 prevhandler = signal.signal(SIGCHLD, handler)
2073 try:
2072 try:
2074 pid = spawndetached(args)
2073 pid = spawndetached(args)
2075 while not condfn():
2074 while not condfn():
2076 if ((pid in terminated or not testpid(pid))
2075 if ((pid in terminated or not testpid(pid))
2077 and not condfn()):
2076 and not condfn()):
2078 return -1
2077 return -1
2079 time.sleep(0.1)
2078 time.sleep(0.1)
2080 return pid
2079 return pid
2081 finally:
2080 finally:
2082 if prevhandler is not None:
2081 if prevhandler is not None:
2083 signal.signal(signal.SIGCHLD, prevhandler)
2082 signal.signal(signal.SIGCHLD, prevhandler)
2084
2083
2085 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2084 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2086 """Return the result of interpolating items in the mapping into string s.
2085 """Return the result of interpolating items in the mapping into string s.
2087
2086
2088 prefix is a single character string, or a two character string with
2087 prefix is a single character string, or a two character string with
2089 a backslash as the first character if the prefix needs to be escaped in
2088 a backslash as the first character if the prefix needs to be escaped in
2090 a regular expression.
2089 a regular expression.
2091
2090
2092 fn is an optional function that will be applied to the replacement text
2091 fn is an optional function that will be applied to the replacement text
2093 just before replacement.
2092 just before replacement.
2094
2093
2095 escape_prefix is an optional flag that allows using doubled prefix for
2094 escape_prefix is an optional flag that allows using doubled prefix for
2096 its escaping.
2095 its escaping.
2097 """
2096 """
2098 fn = fn or (lambda s: s)
2097 fn = fn or (lambda s: s)
2099 patterns = '|'.join(mapping.keys())
2098 patterns = '|'.join(mapping.keys())
2100 if escape_prefix:
2099 if escape_prefix:
2101 patterns += '|' + prefix
2100 patterns += '|' + prefix
2102 if len(prefix) > 1:
2101 if len(prefix) > 1:
2103 prefix_char = prefix[1:]
2102 prefix_char = prefix[1:]
2104 else:
2103 else:
2105 prefix_char = prefix
2104 prefix_char = prefix
2106 mapping[prefix_char] = prefix_char
2105 mapping[prefix_char] = prefix_char
2107 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2106 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2108 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2107 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2109
2108
2110 def getport(port):
2109 def getport(port):
2111 """Return the port for a given network service.
2110 """Return the port for a given network service.
2112
2111
2113 If port is an integer, it's returned as is. If it's a string, it's
2112 If port is an integer, it's returned as is. If it's a string, it's
2114 looked up using socket.getservbyname(). If there's no matching
2113 looked up using socket.getservbyname(). If there's no matching
2115 service, error.Abort is raised.
2114 service, error.Abort is raised.
2116 """
2115 """
2117 try:
2116 try:
2118 return int(port)
2117 return int(port)
2119 except ValueError:
2118 except ValueError:
2120 pass
2119 pass
2121
2120
2122 try:
2121 try:
2123 return socket.getservbyname(port)
2122 return socket.getservbyname(port)
2124 except socket.error:
2123 except socket.error:
2125 raise Abort(_("no port number associated with service '%s'") % port)
2124 raise Abort(_("no port number associated with service '%s'") % port)
2126
2125
2127 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2126 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2128 '0': False, 'no': False, 'false': False, 'off': False,
2127 '0': False, 'no': False, 'false': False, 'off': False,
2129 'never': False}
2128 'never': False}
2130
2129
2131 def parsebool(s):
2130 def parsebool(s):
2132 """Parse s into a boolean.
2131 """Parse s into a boolean.
2133
2132
2134 If s is not a valid boolean, returns None.
2133 If s is not a valid boolean, returns None.
2135 """
2134 """
2136 return _booleans.get(s.lower(), None)
2135 return _booleans.get(s.lower(), None)
2137
2136
2138 _hexdig = '0123456789ABCDEFabcdef'
2137 _hexdig = '0123456789ABCDEFabcdef'
2139 _hextochr = dict((a + b, chr(int(a + b, 16)))
2138 _hextochr = dict((a + b, chr(int(a + b, 16)))
2140 for a in _hexdig for b in _hexdig)
2139 for a in _hexdig for b in _hexdig)
2141
2140
2142 def _urlunquote(s):
2141 def _urlunquote(s):
2143 """Decode HTTP/HTML % encoding.
2142 """Decode HTTP/HTML % encoding.
2144
2143
2145 >>> _urlunquote('abc%20def')
2144 >>> _urlunquote('abc%20def')
2146 'abc def'
2145 'abc def'
2147 """
2146 """
2148 res = s.split('%')
2147 res = s.split('%')
2149 # fastpath
2148 # fastpath
2150 if len(res) == 1:
2149 if len(res) == 1:
2151 return s
2150 return s
2152 s = res[0]
2151 s = res[0]
2153 for item in res[1:]:
2152 for item in res[1:]:
2154 try:
2153 try:
2155 s += _hextochr[item[:2]] + item[2:]
2154 s += _hextochr[item[:2]] + item[2:]
2156 except KeyError:
2155 except KeyError:
2157 s += '%' + item
2156 s += '%' + item
2158 except UnicodeDecodeError:
2157 except UnicodeDecodeError:
2159 s += unichr(int(item[:2], 16)) + item[2:]
2158 s += unichr(int(item[:2], 16)) + item[2:]
2160 return s
2159 return s
2161
2160
2162 class url(object):
2161 class url(object):
2163 r"""Reliable URL parser.
2162 r"""Reliable URL parser.
2164
2163
2165 This parses URLs and provides attributes for the following
2164 This parses URLs and provides attributes for the following
2166 components:
2165 components:
2167
2166
2168 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2167 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2169
2168
2170 Missing components are set to None. The only exception is
2169 Missing components are set to None. The only exception is
2171 fragment, which is set to '' if present but empty.
2170 fragment, which is set to '' if present but empty.
2172
2171
2173 If parsefragment is False, fragment is included in query. If
2172 If parsefragment is False, fragment is included in query. If
2174 parsequery is False, query is included in path. If both are
2173 parsequery is False, query is included in path. If both are
2175 False, both fragment and query are included in path.
2174 False, both fragment and query are included in path.
2176
2175
2177 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2176 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2178
2177
2179 Note that for backward compatibility reasons, bundle URLs do not
2178 Note that for backward compatibility reasons, bundle URLs do not
2180 take host names. That means 'bundle://../' has a path of '../'.
2179 take host names. That means 'bundle://../' has a path of '../'.
2181
2180
2182 Examples:
2181 Examples:
2183
2182
2184 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2183 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2185 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2184 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2186 >>> url('ssh://[::1]:2200//home/joe/repo')
2185 >>> url('ssh://[::1]:2200//home/joe/repo')
2187 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2186 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2188 >>> url('file:///home/joe/repo')
2187 >>> url('file:///home/joe/repo')
2189 <url scheme: 'file', path: '/home/joe/repo'>
2188 <url scheme: 'file', path: '/home/joe/repo'>
2190 >>> url('file:///c:/temp/foo/')
2189 >>> url('file:///c:/temp/foo/')
2191 <url scheme: 'file', path: 'c:/temp/foo/'>
2190 <url scheme: 'file', path: 'c:/temp/foo/'>
2192 >>> url('bundle:foo')
2191 >>> url('bundle:foo')
2193 <url scheme: 'bundle', path: 'foo'>
2192 <url scheme: 'bundle', path: 'foo'>
2194 >>> url('bundle://../foo')
2193 >>> url('bundle://../foo')
2195 <url scheme: 'bundle', path: '../foo'>
2194 <url scheme: 'bundle', path: '../foo'>
2196 >>> url(r'c:\foo\bar')
2195 >>> url(r'c:\foo\bar')
2197 <url path: 'c:\\foo\\bar'>
2196 <url path: 'c:\\foo\\bar'>
2198 >>> url(r'\\blah\blah\blah')
2197 >>> url(r'\\blah\blah\blah')
2199 <url path: '\\\\blah\\blah\\blah'>
2198 <url path: '\\\\blah\\blah\\blah'>
2200 >>> url(r'\\blah\blah\blah#baz')
2199 >>> url(r'\\blah\blah\blah#baz')
2201 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2200 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2202 >>> url(r'file:///C:\users\me')
2201 >>> url(r'file:///C:\users\me')
2203 <url scheme: 'file', path: 'C:\\users\\me'>
2202 <url scheme: 'file', path: 'C:\\users\\me'>
2204
2203
2205 Authentication credentials:
2204 Authentication credentials:
2206
2205
2207 >>> url('ssh://joe:xyz@x/repo')
2206 >>> url('ssh://joe:xyz@x/repo')
2208 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2207 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2209 >>> url('ssh://joe@x/repo')
2208 >>> url('ssh://joe@x/repo')
2210 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2209 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2211
2210
2212 Query strings and fragments:
2211 Query strings and fragments:
2213
2212
2214 >>> url('http://host/a?b#c')
2213 >>> url('http://host/a?b#c')
2215 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2214 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2216 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2215 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2217 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2216 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2218 """
2217 """
2219
2218
2220 _safechars = "!~*'()+"
2219 _safechars = "!~*'()+"
2221 _safepchars = "/!~*'()+:\\"
2220 _safepchars = "/!~*'()+:\\"
2222 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2221 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2223
2222
2224 def __init__(self, path, parsequery=True, parsefragment=True):
2223 def __init__(self, path, parsequery=True, parsefragment=True):
2225 # We slowly chomp away at path until we have only the path left
2224 # We slowly chomp away at path until we have only the path left
2226 self.scheme = self.user = self.passwd = self.host = None
2225 self.scheme = self.user = self.passwd = self.host = None
2227 self.port = self.path = self.query = self.fragment = None
2226 self.port = self.path = self.query = self.fragment = None
2228 self._localpath = True
2227 self._localpath = True
2229 self._hostport = ''
2228 self._hostport = ''
2230 self._origpath = path
2229 self._origpath = path
2231
2230
2232 if parsefragment and '#' in path:
2231 if parsefragment and '#' in path:
2233 path, self.fragment = path.split('#', 1)
2232 path, self.fragment = path.split('#', 1)
2234 if not path:
2233 if not path:
2235 path = None
2234 path = None
2236
2235
2237 # special case for Windows drive letters and UNC paths
2236 # special case for Windows drive letters and UNC paths
2238 if hasdriveletter(path) or path.startswith(r'\\'):
2237 if hasdriveletter(path) or path.startswith(r'\\'):
2239 self.path = path
2238 self.path = path
2240 return
2239 return
2241
2240
2242 # For compatibility reasons, we can't handle bundle paths as
2241 # For compatibility reasons, we can't handle bundle paths as
2243 # normal URLS
2242 # normal URLS
2244 if path.startswith('bundle:'):
2243 if path.startswith('bundle:'):
2245 self.scheme = 'bundle'
2244 self.scheme = 'bundle'
2246 path = path[7:]
2245 path = path[7:]
2247 if path.startswith('//'):
2246 if path.startswith('//'):
2248 path = path[2:]
2247 path = path[2:]
2249 self.path = path
2248 self.path = path
2250 return
2249 return
2251
2250
2252 if self._matchscheme(path):
2251 if self._matchscheme(path):
2253 parts = path.split(':', 1)
2252 parts = path.split(':', 1)
2254 if parts[0]:
2253 if parts[0]:
2255 self.scheme, path = parts
2254 self.scheme, path = parts
2256 self._localpath = False
2255 self._localpath = False
2257
2256
2258 if not path:
2257 if not path:
2259 path = None
2258 path = None
2260 if self._localpath:
2259 if self._localpath:
2261 self.path = ''
2260 self.path = ''
2262 return
2261 return
2263 else:
2262 else:
2264 if self._localpath:
2263 if self._localpath:
2265 self.path = path
2264 self.path = path
2266 return
2265 return
2267
2266
2268 if parsequery and '?' in path:
2267 if parsequery and '?' in path:
2269 path, self.query = path.split('?', 1)
2268 path, self.query = path.split('?', 1)
2270 if not path:
2269 if not path:
2271 path = None
2270 path = None
2272 if not self.query:
2271 if not self.query:
2273 self.query = None
2272 self.query = None
2274
2273
2275 # // is required to specify a host/authority
2274 # // is required to specify a host/authority
2276 if path and path.startswith('//'):
2275 if path and path.startswith('//'):
2277 parts = path[2:].split('/', 1)
2276 parts = path[2:].split('/', 1)
2278 if len(parts) > 1:
2277 if len(parts) > 1:
2279 self.host, path = parts
2278 self.host, path = parts
2280 else:
2279 else:
2281 self.host = parts[0]
2280 self.host = parts[0]
2282 path = None
2281 path = None
2283 if not self.host:
2282 if not self.host:
2284 self.host = None
2283 self.host = None
2285 # path of file:///d is /d
2284 # path of file:///d is /d
2286 # path of file:///d:/ is d:/, not /d:/
2285 # path of file:///d:/ is d:/, not /d:/
2287 if path and not hasdriveletter(path):
2286 if path and not hasdriveletter(path):
2288 path = '/' + path
2287 path = '/' + path
2289
2288
2290 if self.host and '@' in self.host:
2289 if self.host and '@' in self.host:
2291 self.user, self.host = self.host.rsplit('@', 1)
2290 self.user, self.host = self.host.rsplit('@', 1)
2292 if ':' in self.user:
2291 if ':' in self.user:
2293 self.user, self.passwd = self.user.split(':', 1)
2292 self.user, self.passwd = self.user.split(':', 1)
2294 if not self.host:
2293 if not self.host:
2295 self.host = None
2294 self.host = None
2296
2295
2297 # Don't split on colons in IPv6 addresses without ports
2296 # Don't split on colons in IPv6 addresses without ports
2298 if (self.host and ':' in self.host and
2297 if (self.host and ':' in self.host and
2299 not (self.host.startswith('[') and self.host.endswith(']'))):
2298 not (self.host.startswith('[') and self.host.endswith(']'))):
2300 self._hostport = self.host
2299 self._hostport = self.host
2301 self.host, self.port = self.host.rsplit(':', 1)
2300 self.host, self.port = self.host.rsplit(':', 1)
2302 if not self.host:
2301 if not self.host:
2303 self.host = None
2302 self.host = None
2304
2303
2305 if (self.host and self.scheme == 'file' and
2304 if (self.host and self.scheme == 'file' and
2306 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2305 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2307 raise Abort(_('file:// URLs can only refer to localhost'))
2306 raise Abort(_('file:// URLs can only refer to localhost'))
2308
2307
2309 self.path = path
2308 self.path = path
2310
2309
2311 # leave the query string escaped
2310 # leave the query string escaped
2312 for a in ('user', 'passwd', 'host', 'port',
2311 for a in ('user', 'passwd', 'host', 'port',
2313 'path', 'fragment'):
2312 'path', 'fragment'):
2314 v = getattr(self, a)
2313 v = getattr(self, a)
2315 if v is not None:
2314 if v is not None:
2316 setattr(self, a, _urlunquote(v))
2315 setattr(self, a, _urlunquote(v))
2317
2316
2318 def __repr__(self):
2317 def __repr__(self):
2319 attrs = []
2318 attrs = []
2320 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2319 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2321 'query', 'fragment'):
2320 'query', 'fragment'):
2322 v = getattr(self, a)
2321 v = getattr(self, a)
2323 if v is not None:
2322 if v is not None:
2324 attrs.append('%s: %r' % (a, v))
2323 attrs.append('%s: %r' % (a, v))
2325 return '<url %s>' % ', '.join(attrs)
2324 return '<url %s>' % ', '.join(attrs)
2326
2325
2327 def __str__(self):
2326 def __str__(self):
2328 r"""Join the URL's components back into a URL string.
2327 r"""Join the URL's components back into a URL string.
2329
2328
2330 Examples:
2329 Examples:
2331
2330
2332 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2331 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2333 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2332 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2334 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2333 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2335 'http://user:pw@host:80/?foo=bar&baz=42'
2334 'http://user:pw@host:80/?foo=bar&baz=42'
2336 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2335 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2337 'http://user:pw@host:80/?foo=bar%3dbaz'
2336 'http://user:pw@host:80/?foo=bar%3dbaz'
2338 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2337 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2339 'ssh://user:pw@[::1]:2200//home/joe#'
2338 'ssh://user:pw@[::1]:2200//home/joe#'
2340 >>> str(url('http://localhost:80//'))
2339 >>> str(url('http://localhost:80//'))
2341 'http://localhost:80//'
2340 'http://localhost:80//'
2342 >>> str(url('http://localhost:80/'))
2341 >>> str(url('http://localhost:80/'))
2343 'http://localhost:80/'
2342 'http://localhost:80/'
2344 >>> str(url('http://localhost:80'))
2343 >>> str(url('http://localhost:80'))
2345 'http://localhost:80/'
2344 'http://localhost:80/'
2346 >>> str(url('bundle:foo'))
2345 >>> str(url('bundle:foo'))
2347 'bundle:foo'
2346 'bundle:foo'
2348 >>> str(url('bundle://../foo'))
2347 >>> str(url('bundle://../foo'))
2349 'bundle:../foo'
2348 'bundle:../foo'
2350 >>> str(url('path'))
2349 >>> str(url('path'))
2351 'path'
2350 'path'
2352 >>> str(url('file:///tmp/foo/bar'))
2351 >>> str(url('file:///tmp/foo/bar'))
2353 'file:///tmp/foo/bar'
2352 'file:///tmp/foo/bar'
2354 >>> str(url('file:///c:/tmp/foo/bar'))
2353 >>> str(url('file:///c:/tmp/foo/bar'))
2355 'file:///c:/tmp/foo/bar'
2354 'file:///c:/tmp/foo/bar'
2356 >>> print url(r'bundle:foo\bar')
2355 >>> print url(r'bundle:foo\bar')
2357 bundle:foo\bar
2356 bundle:foo\bar
2358 >>> print url(r'file:///D:\data\hg')
2357 >>> print url(r'file:///D:\data\hg')
2359 file:///D:\data\hg
2358 file:///D:\data\hg
2360 """
2359 """
2361 if self._localpath:
2360 if self._localpath:
2362 s = self.path
2361 s = self.path
2363 if self.scheme == 'bundle':
2362 if self.scheme == 'bundle':
2364 s = 'bundle:' + s
2363 s = 'bundle:' + s
2365 if self.fragment:
2364 if self.fragment:
2366 s += '#' + self.fragment
2365 s += '#' + self.fragment
2367 return s
2366 return s
2368
2367
2369 s = self.scheme + ':'
2368 s = self.scheme + ':'
2370 if self.user or self.passwd or self.host:
2369 if self.user or self.passwd or self.host:
2371 s += '//'
2370 s += '//'
2372 elif self.scheme and (not self.path or self.path.startswith('/')
2371 elif self.scheme and (not self.path or self.path.startswith('/')
2373 or hasdriveletter(self.path)):
2372 or hasdriveletter(self.path)):
2374 s += '//'
2373 s += '//'
2375 if hasdriveletter(self.path):
2374 if hasdriveletter(self.path):
2376 s += '/'
2375 s += '/'
2377 if self.user:
2376 if self.user:
2378 s += urllib.quote(self.user, safe=self._safechars)
2377 s += urllib.quote(self.user, safe=self._safechars)
2379 if self.passwd:
2378 if self.passwd:
2380 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2379 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2381 if self.user or self.passwd:
2380 if self.user or self.passwd:
2382 s += '@'
2381 s += '@'
2383 if self.host:
2382 if self.host:
2384 if not (self.host.startswith('[') and self.host.endswith(']')):
2383 if not (self.host.startswith('[') and self.host.endswith(']')):
2385 s += urllib.quote(self.host)
2384 s += urllib.quote(self.host)
2386 else:
2385 else:
2387 s += self.host
2386 s += self.host
2388 if self.port:
2387 if self.port:
2389 s += ':' + urllib.quote(self.port)
2388 s += ':' + urllib.quote(self.port)
2390 if self.host:
2389 if self.host:
2391 s += '/'
2390 s += '/'
2392 if self.path:
2391 if self.path:
2393 # TODO: similar to the query string, we should not unescape the
2392 # TODO: similar to the query string, we should not unescape the
2394 # path when we store it, the path might contain '%2f' = '/',
2393 # path when we store it, the path might contain '%2f' = '/',
2395 # which we should *not* escape.
2394 # which we should *not* escape.
2396 s += urllib.quote(self.path, safe=self._safepchars)
2395 s += urllib.quote(self.path, safe=self._safepchars)
2397 if self.query:
2396 if self.query:
2398 # we store the query in escaped form.
2397 # we store the query in escaped form.
2399 s += '?' + self.query
2398 s += '?' + self.query
2400 if self.fragment is not None:
2399 if self.fragment is not None:
2401 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2400 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2402 return s
2401 return s
2403
2402
2404 def authinfo(self):
2403 def authinfo(self):
2405 user, passwd = self.user, self.passwd
2404 user, passwd = self.user, self.passwd
2406 try:
2405 try:
2407 self.user, self.passwd = None, None
2406 self.user, self.passwd = None, None
2408 s = str(self)
2407 s = str(self)
2409 finally:
2408 finally:
2410 self.user, self.passwd = user, passwd
2409 self.user, self.passwd = user, passwd
2411 if not self.user:
2410 if not self.user:
2412 return (s, None)
2411 return (s, None)
2413 # authinfo[1] is passed to urllib2 password manager, and its
2412 # authinfo[1] is passed to urllib2 password manager, and its
2414 # URIs must not contain credentials. The host is passed in the
2413 # URIs must not contain credentials. The host is passed in the
2415 # URIs list because Python < 2.4.3 uses only that to search for
2414 # URIs list because Python < 2.4.3 uses only that to search for
2416 # a password.
2415 # a password.
2417 return (s, (None, (s, self.host),
2416 return (s, (None, (s, self.host),
2418 self.user, self.passwd or ''))
2417 self.user, self.passwd or ''))
2419
2418
2420 def isabs(self):
2419 def isabs(self):
2421 if self.scheme and self.scheme != 'file':
2420 if self.scheme and self.scheme != 'file':
2422 return True # remote URL
2421 return True # remote URL
2423 if hasdriveletter(self.path):
2422 if hasdriveletter(self.path):
2424 return True # absolute for our purposes - can't be joined()
2423 return True # absolute for our purposes - can't be joined()
2425 if self.path.startswith(r'\\'):
2424 if self.path.startswith(r'\\'):
2426 return True # Windows UNC path
2425 return True # Windows UNC path
2427 if self.path.startswith('/'):
2426 if self.path.startswith('/'):
2428 return True # POSIX-style
2427 return True # POSIX-style
2429 return False
2428 return False
2430
2429
2431 def localpath(self):
2430 def localpath(self):
2432 if self.scheme == 'file' or self.scheme == 'bundle':
2431 if self.scheme == 'file' or self.scheme == 'bundle':
2433 path = self.path or '/'
2432 path = self.path or '/'
2434 # For Windows, we need to promote hosts containing drive
2433 # For Windows, we need to promote hosts containing drive
2435 # letters to paths with drive letters.
2434 # letters to paths with drive letters.
2436 if hasdriveletter(self._hostport):
2435 if hasdriveletter(self._hostport):
2437 path = self._hostport + '/' + self.path
2436 path = self._hostport + '/' + self.path
2438 elif (self.host is not None and self.path
2437 elif (self.host is not None and self.path
2439 and not hasdriveletter(path)):
2438 and not hasdriveletter(path)):
2440 path = '/' + path
2439 path = '/' + path
2441 return path
2440 return path
2442 return self._origpath
2441 return self._origpath
2443
2442
2444 def islocal(self):
2443 def islocal(self):
2445 '''whether localpath will return something that posixfile can open'''
2444 '''whether localpath will return something that posixfile can open'''
2446 return (not self.scheme or self.scheme == 'file'
2445 return (not self.scheme or self.scheme == 'file'
2447 or self.scheme == 'bundle')
2446 or self.scheme == 'bundle')
2448
2447
2449 def hasscheme(path):
2448 def hasscheme(path):
2450 return bool(url(path).scheme)
2449 return bool(url(path).scheme)
2451
2450
2452 def hasdriveletter(path):
2451 def hasdriveletter(path):
2453 return path and path[1:2] == ':' and path[0:1].isalpha()
2452 return path and path[1:2] == ':' and path[0:1].isalpha()
2454
2453
2455 def urllocalpath(path):
2454 def urllocalpath(path):
2456 return url(path, parsequery=False, parsefragment=False).localpath()
2455 return url(path, parsequery=False, parsefragment=False).localpath()
2457
2456
2458 def hidepassword(u):
2457 def hidepassword(u):
2459 '''hide user credential in a url string'''
2458 '''hide user credential in a url string'''
2460 u = url(u)
2459 u = url(u)
2461 if u.passwd:
2460 if u.passwd:
2462 u.passwd = '***'
2461 u.passwd = '***'
2463 return str(u)
2462 return str(u)
2464
2463
2465 def removeauth(u):
2464 def removeauth(u):
2466 '''remove all authentication information from a url string'''
2465 '''remove all authentication information from a url string'''
2467 u = url(u)
2466 u = url(u)
2468 u.user = u.passwd = None
2467 u.user = u.passwd = None
2469 return str(u)
2468 return str(u)
2470
2469
2471 def isatty(fp):
2470 def isatty(fp):
2472 try:
2471 try:
2473 return fp.isatty()
2472 return fp.isatty()
2474 except AttributeError:
2473 except AttributeError:
2475 return False
2474 return False
2476
2475
2477 timecount = unitcountfn(
2476 timecount = unitcountfn(
2478 (1, 1e3, _('%.0f s')),
2477 (1, 1e3, _('%.0f s')),
2479 (100, 1, _('%.1f s')),
2478 (100, 1, _('%.1f s')),
2480 (10, 1, _('%.2f s')),
2479 (10, 1, _('%.2f s')),
2481 (1, 1, _('%.3f s')),
2480 (1, 1, _('%.3f s')),
2482 (100, 0.001, _('%.1f ms')),
2481 (100, 0.001, _('%.1f ms')),
2483 (10, 0.001, _('%.2f ms')),
2482 (10, 0.001, _('%.2f ms')),
2484 (1, 0.001, _('%.3f ms')),
2483 (1, 0.001, _('%.3f ms')),
2485 (100, 0.000001, _('%.1f us')),
2484 (100, 0.000001, _('%.1f us')),
2486 (10, 0.000001, _('%.2f us')),
2485 (10, 0.000001, _('%.2f us')),
2487 (1, 0.000001, _('%.3f us')),
2486 (1, 0.000001, _('%.3f us')),
2488 (100, 0.000000001, _('%.1f ns')),
2487 (100, 0.000000001, _('%.1f ns')),
2489 (10, 0.000000001, _('%.2f ns')),
2488 (10, 0.000000001, _('%.2f ns')),
2490 (1, 0.000000001, _('%.3f ns')),
2489 (1, 0.000000001, _('%.3f ns')),
2491 )
2490 )
2492
2491
2493 _timenesting = [0]
2492 _timenesting = [0]
2494
2493
2495 def timed(func):
2494 def timed(func):
2496 '''Report the execution time of a function call to stderr.
2495 '''Report the execution time of a function call to stderr.
2497
2496
2498 During development, use as a decorator when you need to measure
2497 During development, use as a decorator when you need to measure
2499 the cost of a function, e.g. as follows:
2498 the cost of a function, e.g. as follows:
2500
2499
2501 @util.timed
2500 @util.timed
2502 def foo(a, b, c):
2501 def foo(a, b, c):
2503 pass
2502 pass
2504 '''
2503 '''
2505
2504
2506 def wrapper(*args, **kwargs):
2505 def wrapper(*args, **kwargs):
2507 start = time.time()
2506 start = time.time()
2508 indent = 2
2507 indent = 2
2509 _timenesting[0] += indent
2508 _timenesting[0] += indent
2510 try:
2509 try:
2511 return func(*args, **kwargs)
2510 return func(*args, **kwargs)
2512 finally:
2511 finally:
2513 elapsed = time.time() - start
2512 elapsed = time.time() - start
2514 _timenesting[0] -= indent
2513 _timenesting[0] -= indent
2515 sys.stderr.write('%s%s: %s\n' %
2514 sys.stderr.write('%s%s: %s\n' %
2516 (' ' * _timenesting[0], func.__name__,
2515 (' ' * _timenesting[0], func.__name__,
2517 timecount(elapsed)))
2516 timecount(elapsed)))
2518 return wrapper
2517 return wrapper
2519
2518
2520 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2519 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2521 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2520 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2522
2521
2523 def sizetoint(s):
2522 def sizetoint(s):
2524 '''Convert a space specifier to a byte count.
2523 '''Convert a space specifier to a byte count.
2525
2524
2526 >>> sizetoint('30')
2525 >>> sizetoint('30')
2527 30
2526 30
2528 >>> sizetoint('2.2kb')
2527 >>> sizetoint('2.2kb')
2529 2252
2528 2252
2530 >>> sizetoint('6M')
2529 >>> sizetoint('6M')
2531 6291456
2530 6291456
2532 '''
2531 '''
2533 t = s.strip().lower()
2532 t = s.strip().lower()
2534 try:
2533 try:
2535 for k, u in _sizeunits:
2534 for k, u in _sizeunits:
2536 if t.endswith(k):
2535 if t.endswith(k):
2537 return int(float(t[:-len(k)]) * u)
2536 return int(float(t[:-len(k)]) * u)
2538 return int(t)
2537 return int(t)
2539 except ValueError:
2538 except ValueError:
2540 raise error.ParseError(_("couldn't parse size: %s") % s)
2539 raise error.ParseError(_("couldn't parse size: %s") % s)
2541
2540
2542 class hooks(object):
2541 class hooks(object):
2543 '''A collection of hook functions that can be used to extend a
2542 '''A collection of hook functions that can be used to extend a
2544 function's behavior. Hooks are called in lexicographic order,
2543 function's behavior. Hooks are called in lexicographic order,
2545 based on the names of their sources.'''
2544 based on the names of their sources.'''
2546
2545
2547 def __init__(self):
2546 def __init__(self):
2548 self._hooks = []
2547 self._hooks = []
2549
2548
2550 def add(self, source, hook):
2549 def add(self, source, hook):
2551 self._hooks.append((source, hook))
2550 self._hooks.append((source, hook))
2552
2551
2553 def __call__(self, *args):
2552 def __call__(self, *args):
2554 self._hooks.sort(key=lambda x: x[0])
2553 self._hooks.sort(key=lambda x: x[0])
2555 results = []
2554 results = []
2556 for source, hook in self._hooks:
2555 for source, hook in self._hooks:
2557 results.append(hook(*args))
2556 results.append(hook(*args))
2558 return results
2557 return results
2559
2558
2560 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2559 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2561 '''Yields lines for a nicely formatted stacktrace.
2560 '''Yields lines for a nicely formatted stacktrace.
2562 Skips the 'skip' last entries.
2561 Skips the 'skip' last entries.
2563 Each file+linenumber is formatted according to fileline.
2562 Each file+linenumber is formatted according to fileline.
2564 Each line is formatted according to line.
2563 Each line is formatted according to line.
2565 If line is None, it yields:
2564 If line is None, it yields:
2566 length of longest filepath+line number,
2565 length of longest filepath+line number,
2567 filepath+linenumber,
2566 filepath+linenumber,
2568 function
2567 function
2569
2568
2570 Not be used in production code but very convenient while developing.
2569 Not be used in production code but very convenient while developing.
2571 '''
2570 '''
2572 entries = [(fileline % (fn, ln), func)
2571 entries = [(fileline % (fn, ln), func)
2573 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2572 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2574 if entries:
2573 if entries:
2575 fnmax = max(len(entry[0]) for entry in entries)
2574 fnmax = max(len(entry[0]) for entry in entries)
2576 for fnln, func in entries:
2575 for fnln, func in entries:
2577 if line is None:
2576 if line is None:
2578 yield (fnmax, fnln, func)
2577 yield (fnmax, fnln, func)
2579 else:
2578 else:
2580 yield line % (fnmax, fnln, func)
2579 yield line % (fnmax, fnln, func)
2581
2580
2582 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2581 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2583 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2582 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2584 Skips the 'skip' last entries. By default it will flush stdout first.
2583 Skips the 'skip' last entries. By default it will flush stdout first.
2585 It can be used everywhere and intentionally does not require an ui object.
2584 It can be used everywhere and intentionally does not require an ui object.
2586 Not be used in production code but very convenient while developing.
2585 Not be used in production code but very convenient while developing.
2587 '''
2586 '''
2588 if otherf:
2587 if otherf:
2589 otherf.flush()
2588 otherf.flush()
2590 f.write('%s at:\n' % msg)
2589 f.write('%s at:\n' % msg)
2591 for line in getstackframes(skip + 1):
2590 for line in getstackframes(skip + 1):
2592 f.write(line)
2591 f.write(line)
2593 f.flush()
2592 f.flush()
2594
2593
2595 class dirs(object):
2594 class dirs(object):
2596 '''a multiset of directory names from a dirstate or manifest'''
2595 '''a multiset of directory names from a dirstate or manifest'''
2597
2596
2598 def __init__(self, map, skip=None):
2597 def __init__(self, map, skip=None):
2599 self._dirs = {}
2598 self._dirs = {}
2600 addpath = self.addpath
2599 addpath = self.addpath
2601 if safehasattr(map, 'iteritems') and skip is not None:
2600 if safehasattr(map, 'iteritems') and skip is not None:
2602 for f, s in map.iteritems():
2601 for f, s in map.iteritems():
2603 if s[0] != skip:
2602 if s[0] != skip:
2604 addpath(f)
2603 addpath(f)
2605 else:
2604 else:
2606 for f in map:
2605 for f in map:
2607 addpath(f)
2606 addpath(f)
2608
2607
2609 def addpath(self, path):
2608 def addpath(self, path):
2610 dirs = self._dirs
2609 dirs = self._dirs
2611 for base in finddirs(path):
2610 for base in finddirs(path):
2612 if base in dirs:
2611 if base in dirs:
2613 dirs[base] += 1
2612 dirs[base] += 1
2614 return
2613 return
2615 dirs[base] = 1
2614 dirs[base] = 1
2616
2615
2617 def delpath(self, path):
2616 def delpath(self, path):
2618 dirs = self._dirs
2617 dirs = self._dirs
2619 for base in finddirs(path):
2618 for base in finddirs(path):
2620 if dirs[base] > 1:
2619 if dirs[base] > 1:
2621 dirs[base] -= 1
2620 dirs[base] -= 1
2622 return
2621 return
2623 del dirs[base]
2622 del dirs[base]
2624
2623
2625 def __iter__(self):
2624 def __iter__(self):
2626 return self._dirs.iterkeys()
2625 return self._dirs.iterkeys()
2627
2626
2628 def __contains__(self, d):
2627 def __contains__(self, d):
2629 return d in self._dirs
2628 return d in self._dirs
2630
2629
2631 if safehasattr(parsers, 'dirs'):
2630 if safehasattr(parsers, 'dirs'):
2632 dirs = parsers.dirs
2631 dirs = parsers.dirs
2633
2632
2634 def finddirs(path):
2633 def finddirs(path):
2635 pos = path.rfind('/')
2634 pos = path.rfind('/')
2636 while pos != -1:
2635 while pos != -1:
2637 yield path[:pos]
2636 yield path[:pos]
2638 pos = path.rfind('/', 0, pos)
2637 pos = path.rfind('/', 0, pos)
2639
2638
2640 # compression utility
2639 # compression utility
2641
2640
2642 class nocompress(object):
2641 class nocompress(object):
2643 def compress(self, x):
2642 def compress(self, x):
2644 return x
2643 return x
2645 def flush(self):
2644 def flush(self):
2646 return ""
2645 return ""
2647
2646
2648 compressors = {
2647 compressors = {
2649 None: nocompress,
2648 None: nocompress,
2650 # lambda to prevent early import
2649 # lambda to prevent early import
2651 'BZ': lambda: bz2.BZ2Compressor(),
2650 'BZ': lambda: bz2.BZ2Compressor(),
2652 'GZ': lambda: zlib.compressobj(),
2651 'GZ': lambda: zlib.compressobj(),
2653 }
2652 }
2654 # also support the old form by courtesies
2653 # also support the old form by courtesies
2655 compressors['UN'] = compressors[None]
2654 compressors['UN'] = compressors[None]
2656
2655
2657 def _makedecompressor(decompcls):
2656 def _makedecompressor(decompcls):
2658 def generator(f):
2657 def generator(f):
2659 d = decompcls()
2658 d = decompcls()
2660 for chunk in filechunkiter(f):
2659 for chunk in filechunkiter(f):
2661 yield d.decompress(chunk)
2660 yield d.decompress(chunk)
2662 def func(fh):
2661 def func(fh):
2663 return chunkbuffer(generator(fh))
2662 return chunkbuffer(generator(fh))
2664 return func
2663 return func
2665
2664
2666 class ctxmanager(object):
2665 class ctxmanager(object):
2667 '''A context manager for use in 'with' blocks to allow multiple
2666 '''A context manager for use in 'with' blocks to allow multiple
2668 contexts to be entered at once. This is both safer and more
2667 contexts to be entered at once. This is both safer and more
2669 flexible than contextlib.nested.
2668 flexible than contextlib.nested.
2670
2669
2671 Once Mercurial supports Python 2.7+, this will become mostly
2670 Once Mercurial supports Python 2.7+, this will become mostly
2672 unnecessary.
2671 unnecessary.
2673 '''
2672 '''
2674
2673
2675 def __init__(self, *args):
2674 def __init__(self, *args):
2676 '''Accepts a list of no-argument functions that return context
2675 '''Accepts a list of no-argument functions that return context
2677 managers. These will be invoked at __call__ time.'''
2676 managers. These will be invoked at __call__ time.'''
2678 self._pending = args
2677 self._pending = args
2679 self._atexit = []
2678 self._atexit = []
2680
2679
2681 def __enter__(self):
2680 def __enter__(self):
2682 return self
2681 return self
2683
2682
2684 def enter(self):
2683 def enter(self):
2685 '''Create and enter context managers in the order in which they were
2684 '''Create and enter context managers in the order in which they were
2686 passed to the constructor.'''
2685 passed to the constructor.'''
2687 values = []
2686 values = []
2688 for func in self._pending:
2687 for func in self._pending:
2689 obj = func()
2688 obj = func()
2690 values.append(obj.__enter__())
2689 values.append(obj.__enter__())
2691 self._atexit.append(obj.__exit__)
2690 self._atexit.append(obj.__exit__)
2692 del self._pending
2691 del self._pending
2693 return values
2692 return values
2694
2693
2695 def atexit(self, func, *args, **kwargs):
2694 def atexit(self, func, *args, **kwargs):
2696 '''Add a function to call when this context manager exits. The
2695 '''Add a function to call when this context manager exits. The
2697 ordering of multiple atexit calls is unspecified, save that
2696 ordering of multiple atexit calls is unspecified, save that
2698 they will happen before any __exit__ functions.'''
2697 they will happen before any __exit__ functions.'''
2699 def wrapper(exc_type, exc_val, exc_tb):
2698 def wrapper(exc_type, exc_val, exc_tb):
2700 func(*args, **kwargs)
2699 func(*args, **kwargs)
2701 self._atexit.append(wrapper)
2700 self._atexit.append(wrapper)
2702 return func
2701 return func
2703
2702
2704 def __exit__(self, exc_type, exc_val, exc_tb):
2703 def __exit__(self, exc_type, exc_val, exc_tb):
2705 '''Context managers are exited in the reverse order from which
2704 '''Context managers are exited in the reverse order from which
2706 they were created.'''
2705 they were created.'''
2707 received = exc_type is not None
2706 received = exc_type is not None
2708 suppressed = False
2707 suppressed = False
2709 pending = None
2708 pending = None
2710 self._atexit.reverse()
2709 self._atexit.reverse()
2711 for exitfunc in self._atexit:
2710 for exitfunc in self._atexit:
2712 try:
2711 try:
2713 if exitfunc(exc_type, exc_val, exc_tb):
2712 if exitfunc(exc_type, exc_val, exc_tb):
2714 suppressed = True
2713 suppressed = True
2715 exc_type = None
2714 exc_type = None
2716 exc_val = None
2715 exc_val = None
2717 exc_tb = None
2716 exc_tb = None
2718 except BaseException:
2717 except BaseException:
2719 pending = sys.exc_info()
2718 pending = sys.exc_info()
2720 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2719 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2721 del self._atexit
2720 del self._atexit
2722 if pending:
2721 if pending:
2723 raise exc_val
2722 raise exc_val
2724 return received and suppressed
2723 return received and suppressed
2725
2724
2726 def _bz2():
2725 def _bz2():
2727 d = bz2.BZ2Decompressor()
2726 d = bz2.BZ2Decompressor()
2728 # Bzip2 stream start with BZ, but we stripped it.
2727 # Bzip2 stream start with BZ, but we stripped it.
2729 # we put it back for good measure.
2728 # we put it back for good measure.
2730 d.decompress('BZ')
2729 d.decompress('BZ')
2731 return d
2730 return d
2732
2731
2733 decompressors = {None: lambda fh: fh,
2732 decompressors = {None: lambda fh: fh,
2734 '_truncatedBZ': _makedecompressor(_bz2),
2733 '_truncatedBZ': _makedecompressor(_bz2),
2735 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2734 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2736 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2735 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2737 }
2736 }
2738 # also support the old form by courtesies
2737 # also support the old form by courtesies
2739 decompressors['UN'] = decompressors[None]
2738 decompressors['UN'] = decompressors[None]
2740
2739
2741 # convenient shortcut
2740 # convenient shortcut
2742 dst = debugstacktrace
2741 dst = debugstacktrace
@@ -1,679 +1,692
1 commit date test
1 commit date test
2
2
3 $ hg init test
3 $ hg init test
4 $ cd test
4 $ cd test
5 $ echo foo > foo
5 $ echo foo > foo
6 $ hg add foo
6 $ hg add foo
7 $ cat > $TESTTMP/checkeditform.sh <<EOF
7 $ cat > $TESTTMP/checkeditform.sh <<EOF
8 > env | grep HGEDITFORM
8 > env | grep HGEDITFORM
9 > true
9 > true
10 > EOF
10 > EOF
11 $ HGEDITOR="sh $TESTTMP/checkeditform.sh" hg commit -m ""
11 $ HGEDITOR="sh $TESTTMP/checkeditform.sh" hg commit -m ""
12 HGEDITFORM=commit.normal.normal
12 HGEDITFORM=commit.normal.normal
13 abort: empty commit message
13 abort: empty commit message
14 [255]
14 [255]
15 $ hg commit -d '0 0' -m commit-1
15 $ hg commit -d '0 0' -m commit-1
16 $ echo foo >> foo
16 $ echo foo >> foo
17 $ hg commit -d '1 4444444' -m commit-3
17 $ hg commit -d '1 4444444' -m commit-3
18 abort: impossible time zone offset: 4444444
18 abort: impossible time zone offset: 4444444
19 [255]
19 [255]
20 $ hg commit -d '1 15.1' -m commit-4
20 $ hg commit -d '1 15.1' -m commit-4
21 abort: invalid date: '1\t15.1'
21 abort: invalid date: '1\t15.1'
22 [255]
22 [255]
23 $ hg commit -d 'foo bar' -m commit-5
23 $ hg commit -d 'foo bar' -m commit-5
24 abort: invalid date: 'foo bar'
24 abort: invalid date: 'foo bar'
25 [255]
25 [255]
26 $ hg commit -d ' 1 4444' -m commit-6
26 $ hg commit -d ' 1 4444' -m commit-6
27 $ hg commit -d '111111111111 0' -m commit-7
27 $ hg commit -d '111111111111 0' -m commit-7
28 abort: date exceeds 32 bits: 111111111111
28 abort: date exceeds 32 bits: 111111111111
29 [255]
29 [255]
30 $ hg commit -d '-7654321 3600' -m commit-7
30 $ hg commit -d '-111111111111 0' -m commit-7
31 abort: negative date value: -7654321
31 abort: date exceeds 32 bits: -111111111111
32 [255]
33 $ echo foo >> foo
34 $ hg commit -d '1901-12-13 20:45:53 +0000' -m commit-7-2
35 $ echo foo >> foo
36 $ hg commit -d '-2147483647 0' -m commit-7-3
37 $ hg log -T '{rev} {date|isodatesec}\n' -l2
38 3 1901-12-13 20:45:53 +0000
39 2 1901-12-13 20:45:53 +0000
40 $ hg commit -d '1901-12-13 20:45:52 +0000' -m commit-7
41 abort: date exceeds 32 bits: -2147483648
42 [255]
43 $ hg commit -d '-2147483648 0' -m commit-7
44 abort: date exceeds 32 bits: -2147483648
32 [255]
45 [255]
33
46
34 commit added file that has been deleted
47 commit added file that has been deleted
35
48
36 $ echo bar > bar
49 $ echo bar > bar
37 $ hg add bar
50 $ hg add bar
38 $ rm bar
51 $ rm bar
39 $ hg commit -m commit-8
52 $ hg commit -m commit-8
40 nothing changed (1 missing files, see 'hg status')
53 nothing changed (1 missing files, see 'hg status')
41 [1]
54 [1]
42 $ hg commit -m commit-8-2 bar
55 $ hg commit -m commit-8-2 bar
43 abort: bar: file not found!
56 abort: bar: file not found!
44 [255]
57 [255]
45
58
46 $ hg -q revert -a --no-backup
59 $ hg -q revert -a --no-backup
47
60
48 $ mkdir dir
61 $ mkdir dir
49 $ echo boo > dir/file
62 $ echo boo > dir/file
50 $ hg add
63 $ hg add
51 adding dir/file (glob)
64 adding dir/file (glob)
52 $ hg -v commit -m commit-9 dir
65 $ hg -v commit -m commit-9 dir
53 committing files:
66 committing files:
54 dir/file
67 dir/file
55 committing manifest
68 committing manifest
56 committing changelog
69 committing changelog
57 committed changeset 2:d2a76177cb42
70 committed changeset 4:76aab26859d7
58
71
59 $ echo > dir.file
72 $ echo > dir.file
60 $ hg add
73 $ hg add
61 adding dir.file
74 adding dir.file
62 $ hg commit -m commit-10 dir dir.file
75 $ hg commit -m commit-10 dir dir.file
63 abort: dir: no match under directory!
76 abort: dir: no match under directory!
64 [255]
77 [255]
65
78
66 $ echo >> dir/file
79 $ echo >> dir/file
67 $ mkdir bleh
80 $ mkdir bleh
68 $ mkdir dir2
81 $ mkdir dir2
69 $ cd bleh
82 $ cd bleh
70 $ hg commit -m commit-11 .
83 $ hg commit -m commit-11 .
71 abort: bleh: no match under directory!
84 abort: bleh: no match under directory!
72 [255]
85 [255]
73 $ hg commit -m commit-12 ../dir ../dir2
86 $ hg commit -m commit-12 ../dir ../dir2
74 abort: dir2: no match under directory!
87 abort: dir2: no match under directory!
75 [255]
88 [255]
76 $ hg -v commit -m commit-13 ../dir
89 $ hg -v commit -m commit-13 ../dir
77 committing files:
90 committing files:
78 dir/file
91 dir/file
79 committing manifest
92 committing manifest
80 committing changelog
93 committing changelog
81 committed changeset 3:1cd62a2d8db5
94 committed changeset 5:9a50557f1baf
82 $ cd ..
95 $ cd ..
83
96
84 $ hg commit -m commit-14 does-not-exist
97 $ hg commit -m commit-14 does-not-exist
85 abort: does-not-exist: * (glob)
98 abort: does-not-exist: * (glob)
86 [255]
99 [255]
87
100
88 #if symlink
101 #if symlink
89 $ ln -s foo baz
102 $ ln -s foo baz
90 $ hg commit -m commit-15 baz
103 $ hg commit -m commit-15 baz
91 abort: baz: file not tracked!
104 abort: baz: file not tracked!
92 [255]
105 [255]
93 #endif
106 #endif
94
107
95 $ touch quux
108 $ touch quux
96 $ hg commit -m commit-16 quux
109 $ hg commit -m commit-16 quux
97 abort: quux: file not tracked!
110 abort: quux: file not tracked!
98 [255]
111 [255]
99 $ echo >> dir/file
112 $ echo >> dir/file
100 $ hg -v commit -m commit-17 dir/file
113 $ hg -v commit -m commit-17 dir/file
101 committing files:
114 committing files:
102 dir/file
115 dir/file
103 committing manifest
116 committing manifest
104 committing changelog
117 committing changelog
105 committed changeset 4:49176991390e
118 committed changeset 6:4b4c75bf422d
106
119
107 An empty date was interpreted as epoch origin
120 An empty date was interpreted as epoch origin
108
121
109 $ echo foo >> foo
122 $ echo foo >> foo
110 $ hg commit -d '' -m commit-no-date
123 $ hg commit -d '' -m commit-no-date
111 $ hg tip --template '{date|isodate}\n' | grep '1970'
124 $ hg tip --template '{date|isodate}\n' | grep '1970'
112 [1]
125 [1]
113
126
114 Make sure we do not obscure unknown requires file entries (issue2649)
127 Make sure we do not obscure unknown requires file entries (issue2649)
115
128
116 $ echo foo >> foo
129 $ echo foo >> foo
117 $ echo fake >> .hg/requires
130 $ echo fake >> .hg/requires
118 $ hg commit -m bla
131 $ hg commit -m bla
119 abort: repository requires features unknown to this Mercurial: fake!
132 abort: repository requires features unknown to this Mercurial: fake!
120 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
133 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
121 [255]
134 [255]
122
135
123 $ cd ..
136 $ cd ..
124
137
125
138
126 partial subdir commit test
139 partial subdir commit test
127
140
128 $ hg init test2
141 $ hg init test2
129 $ cd test2
142 $ cd test2
130 $ mkdir foo
143 $ mkdir foo
131 $ echo foo > foo/foo
144 $ echo foo > foo/foo
132 $ mkdir bar
145 $ mkdir bar
133 $ echo bar > bar/bar
146 $ echo bar > bar/bar
134 $ hg add
147 $ hg add
135 adding bar/bar (glob)
148 adding bar/bar (glob)
136 adding foo/foo (glob)
149 adding foo/foo (glob)
137 $ HGEDITOR=cat hg ci -e -m commit-subdir-1 foo
150 $ HGEDITOR=cat hg ci -e -m commit-subdir-1 foo
138 commit-subdir-1
151 commit-subdir-1
139
152
140
153
141 HG: Enter commit message. Lines beginning with 'HG:' are removed.
154 HG: Enter commit message. Lines beginning with 'HG:' are removed.
142 HG: Leave message empty to abort commit.
155 HG: Leave message empty to abort commit.
143 HG: --
156 HG: --
144 HG: user: test
157 HG: user: test
145 HG: branch 'default'
158 HG: branch 'default'
146 HG: added foo/foo
159 HG: added foo/foo
147
160
148
161
149 $ hg ci -m commit-subdir-2 bar
162 $ hg ci -m commit-subdir-2 bar
150
163
151 subdir log 1
164 subdir log 1
152
165
153 $ hg log -v foo
166 $ hg log -v foo
154 changeset: 0:f97e73a25882
167 changeset: 0:f97e73a25882
155 user: test
168 user: test
156 date: Thu Jan 01 00:00:00 1970 +0000
169 date: Thu Jan 01 00:00:00 1970 +0000
157 files: foo/foo
170 files: foo/foo
158 description:
171 description:
159 commit-subdir-1
172 commit-subdir-1
160
173
161
174
162
175
163 subdir log 2
176 subdir log 2
164
177
165 $ hg log -v bar
178 $ hg log -v bar
166 changeset: 1:aa809156d50d
179 changeset: 1:aa809156d50d
167 tag: tip
180 tag: tip
168 user: test
181 user: test
169 date: Thu Jan 01 00:00:00 1970 +0000
182 date: Thu Jan 01 00:00:00 1970 +0000
170 files: bar/bar
183 files: bar/bar
171 description:
184 description:
172 commit-subdir-2
185 commit-subdir-2
173
186
174
187
175
188
176 full log
189 full log
177
190
178 $ hg log -v
191 $ hg log -v
179 changeset: 1:aa809156d50d
192 changeset: 1:aa809156d50d
180 tag: tip
193 tag: tip
181 user: test
194 user: test
182 date: Thu Jan 01 00:00:00 1970 +0000
195 date: Thu Jan 01 00:00:00 1970 +0000
183 files: bar/bar
196 files: bar/bar
184 description:
197 description:
185 commit-subdir-2
198 commit-subdir-2
186
199
187
200
188 changeset: 0:f97e73a25882
201 changeset: 0:f97e73a25882
189 user: test
202 user: test
190 date: Thu Jan 01 00:00:00 1970 +0000
203 date: Thu Jan 01 00:00:00 1970 +0000
191 files: foo/foo
204 files: foo/foo
192 description:
205 description:
193 commit-subdir-1
206 commit-subdir-1
194
207
195
208
196 $ cd ..
209 $ cd ..
197
210
198
211
199 dot and subdir commit test
212 dot and subdir commit test
200
213
201 $ hg init test3
214 $ hg init test3
202 $ echo commit-foo-subdir > commit-log-test
215 $ echo commit-foo-subdir > commit-log-test
203 $ cd test3
216 $ cd test3
204 $ mkdir foo
217 $ mkdir foo
205 $ echo foo content > foo/plain-file
218 $ echo foo content > foo/plain-file
206 $ hg add foo/plain-file
219 $ hg add foo/plain-file
207 $ HGEDITOR=cat hg ci --edit -l ../commit-log-test foo
220 $ HGEDITOR=cat hg ci --edit -l ../commit-log-test foo
208 commit-foo-subdir
221 commit-foo-subdir
209
222
210
223
211 HG: Enter commit message. Lines beginning with 'HG:' are removed.
224 HG: Enter commit message. Lines beginning with 'HG:' are removed.
212 HG: Leave message empty to abort commit.
225 HG: Leave message empty to abort commit.
213 HG: --
226 HG: --
214 HG: user: test
227 HG: user: test
215 HG: branch 'default'
228 HG: branch 'default'
216 HG: added foo/plain-file
229 HG: added foo/plain-file
217
230
218
231
219 $ echo modified foo content > foo/plain-file
232 $ echo modified foo content > foo/plain-file
220 $ hg ci -m commit-foo-dot .
233 $ hg ci -m commit-foo-dot .
221
234
222 full log
235 full log
223
236
224 $ hg log -v
237 $ hg log -v
225 changeset: 1:95b38e3a5b2e
238 changeset: 1:95b38e3a5b2e
226 tag: tip
239 tag: tip
227 user: test
240 user: test
228 date: Thu Jan 01 00:00:00 1970 +0000
241 date: Thu Jan 01 00:00:00 1970 +0000
229 files: foo/plain-file
242 files: foo/plain-file
230 description:
243 description:
231 commit-foo-dot
244 commit-foo-dot
232
245
233
246
234 changeset: 0:65d4e9386227
247 changeset: 0:65d4e9386227
235 user: test
248 user: test
236 date: Thu Jan 01 00:00:00 1970 +0000
249 date: Thu Jan 01 00:00:00 1970 +0000
237 files: foo/plain-file
250 files: foo/plain-file
238 description:
251 description:
239 commit-foo-subdir
252 commit-foo-subdir
240
253
241
254
242
255
243 subdir log
256 subdir log
244
257
245 $ cd foo
258 $ cd foo
246 $ hg log .
259 $ hg log .
247 changeset: 1:95b38e3a5b2e
260 changeset: 1:95b38e3a5b2e
248 tag: tip
261 tag: tip
249 user: test
262 user: test
250 date: Thu Jan 01 00:00:00 1970 +0000
263 date: Thu Jan 01 00:00:00 1970 +0000
251 summary: commit-foo-dot
264 summary: commit-foo-dot
252
265
253 changeset: 0:65d4e9386227
266 changeset: 0:65d4e9386227
254 user: test
267 user: test
255 date: Thu Jan 01 00:00:00 1970 +0000
268 date: Thu Jan 01 00:00:00 1970 +0000
256 summary: commit-foo-subdir
269 summary: commit-foo-subdir
257
270
258 $ cd ..
271 $ cd ..
259 $ cd ..
272 $ cd ..
260
273
261 Issue1049: Hg permits partial commit of merge without warning
274 Issue1049: Hg permits partial commit of merge without warning
262
275
263 $ hg init issue1049
276 $ hg init issue1049
264 $ cd issue1049
277 $ cd issue1049
265 $ echo a > a
278 $ echo a > a
266 $ hg ci -Ama
279 $ hg ci -Ama
267 adding a
280 adding a
268 $ echo a >> a
281 $ echo a >> a
269 $ hg ci -mb
282 $ hg ci -mb
270 $ hg up 0
283 $ hg up 0
271 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
284 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
272 $ echo b >> a
285 $ echo b >> a
273 $ hg ci -mc
286 $ hg ci -mc
274 created new head
287 created new head
275 $ HGMERGE=true hg merge
288 $ HGMERGE=true hg merge
276 merging a
289 merging a
277 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
290 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
278 (branch merge, don't forget to commit)
291 (branch merge, don't forget to commit)
279
292
280 should fail because we are specifying a file name
293 should fail because we are specifying a file name
281
294
282 $ hg ci -mmerge a
295 $ hg ci -mmerge a
283 abort: cannot partially commit a merge (do not specify files or patterns)
296 abort: cannot partially commit a merge (do not specify files or patterns)
284 [255]
297 [255]
285
298
286 should fail because we are specifying a pattern
299 should fail because we are specifying a pattern
287
300
288 $ hg ci -mmerge -I a
301 $ hg ci -mmerge -I a
289 abort: cannot partially commit a merge (do not specify files or patterns)
302 abort: cannot partially commit a merge (do not specify files or patterns)
290 [255]
303 [255]
291
304
292 should succeed
305 should succeed
293
306
294 $ HGEDITOR="sh $TESTTMP/checkeditform.sh" hg ci -mmerge --edit
307 $ HGEDITOR="sh $TESTTMP/checkeditform.sh" hg ci -mmerge --edit
295 HGEDITFORM=commit.normal.merge
308 HGEDITFORM=commit.normal.merge
296 $ cd ..
309 $ cd ..
297
310
298
311
299 test commit message content
312 test commit message content
300
313
301 $ hg init commitmsg
314 $ hg init commitmsg
302 $ cd commitmsg
315 $ cd commitmsg
303 $ echo changed > changed
316 $ echo changed > changed
304 $ echo removed > removed
317 $ echo removed > removed
305 $ hg book activebookmark
318 $ hg book activebookmark
306 $ hg ci -qAm init
319 $ hg ci -qAm init
307
320
308 $ hg rm removed
321 $ hg rm removed
309 $ echo changed >> changed
322 $ echo changed >> changed
310 $ echo added > added
323 $ echo added > added
311 $ hg add added
324 $ hg add added
312 $ HGEDITOR=cat hg ci -A
325 $ HGEDITOR=cat hg ci -A
313
326
314
327
315 HG: Enter commit message. Lines beginning with 'HG:' are removed.
328 HG: Enter commit message. Lines beginning with 'HG:' are removed.
316 HG: Leave message empty to abort commit.
329 HG: Leave message empty to abort commit.
317 HG: --
330 HG: --
318 HG: user: test
331 HG: user: test
319 HG: branch 'default'
332 HG: branch 'default'
320 HG: bookmark 'activebookmark'
333 HG: bookmark 'activebookmark'
321 HG: added added
334 HG: added added
322 HG: changed changed
335 HG: changed changed
323 HG: removed removed
336 HG: removed removed
324 abort: empty commit message
337 abort: empty commit message
325 [255]
338 [255]
326
339
327 test saving last-message.txt
340 test saving last-message.txt
328
341
329 $ hg init sub
342 $ hg init sub
330 $ echo a > sub/a
343 $ echo a > sub/a
331 $ hg -R sub add sub/a
344 $ hg -R sub add sub/a
332 $ cat > sub/.hg/hgrc <<EOF
345 $ cat > sub/.hg/hgrc <<EOF
333 > [hooks]
346 > [hooks]
334 > precommit.test-saving-last-message = false
347 > precommit.test-saving-last-message = false
335 > EOF
348 > EOF
336
349
337 $ echo 'sub = sub' > .hgsub
350 $ echo 'sub = sub' > .hgsub
338 $ hg add .hgsub
351 $ hg add .hgsub
339
352
340 $ cat > $TESTTMP/editor.sh <<EOF
353 $ cat > $TESTTMP/editor.sh <<EOF
341 > echo "==== before editing:"
354 > echo "==== before editing:"
342 > cat \$1
355 > cat \$1
343 > echo "===="
356 > echo "===="
344 > echo "test saving last-message.txt" >> \$1
357 > echo "test saving last-message.txt" >> \$1
345 > EOF
358 > EOF
346
359
347 $ rm -f .hg/last-message.txt
360 $ rm -f .hg/last-message.txt
348 $ HGEDITOR="sh $TESTTMP/editor.sh" hg commit -S -q
361 $ HGEDITOR="sh $TESTTMP/editor.sh" hg commit -S -q
349 ==== before editing:
362 ==== before editing:
350
363
351
364
352 HG: Enter commit message. Lines beginning with 'HG:' are removed.
365 HG: Enter commit message. Lines beginning with 'HG:' are removed.
353 HG: Leave message empty to abort commit.
366 HG: Leave message empty to abort commit.
354 HG: --
367 HG: --
355 HG: user: test
368 HG: user: test
356 HG: branch 'default'
369 HG: branch 'default'
357 HG: bookmark 'activebookmark'
370 HG: bookmark 'activebookmark'
358 HG: subrepo sub
371 HG: subrepo sub
359 HG: added .hgsub
372 HG: added .hgsub
360 HG: added added
373 HG: added added
361 HG: changed .hgsubstate
374 HG: changed .hgsubstate
362 HG: changed changed
375 HG: changed changed
363 HG: removed removed
376 HG: removed removed
364 ====
377 ====
365 abort: precommit.test-saving-last-message hook exited with status 1 (in subrepo sub)
378 abort: precommit.test-saving-last-message hook exited with status 1 (in subrepo sub)
366 [255]
379 [255]
367 $ cat .hg/last-message.txt
380 $ cat .hg/last-message.txt
368
381
369
382
370 test saving last-message.txt
383 test saving last-message.txt
371
384
372 test that '[committemplate] changeset' definition and commit log
385 test that '[committemplate] changeset' definition and commit log
373 specific template keywords work well
386 specific template keywords work well
374
387
375 $ cat >> .hg/hgrc <<EOF
388 $ cat >> .hg/hgrc <<EOF
376 > [committemplate]
389 > [committemplate]
377 > changeset.commit.normal = HG: this is "commit.normal" template
390 > changeset.commit.normal = HG: this is "commit.normal" template
378 > HG: {extramsg}
391 > HG: {extramsg}
379 > {if(activebookmark,
392 > {if(activebookmark,
380 > "HG: bookmark '{activebookmark}' is activated\n",
393 > "HG: bookmark '{activebookmark}' is activated\n",
381 > "HG: no bookmark is activated\n")}{subrepos %
394 > "HG: no bookmark is activated\n")}{subrepos %
382 > "HG: subrepo '{subrepo}' is changed\n"}
395 > "HG: subrepo '{subrepo}' is changed\n"}
383 >
396 >
384 > changeset.commit = HG: this is "commit" template
397 > changeset.commit = HG: this is "commit" template
385 > HG: {extramsg}
398 > HG: {extramsg}
386 > {if(activebookmark,
399 > {if(activebookmark,
387 > "HG: bookmark '{activebookmark}' is activated\n",
400 > "HG: bookmark '{activebookmark}' is activated\n",
388 > "HG: no bookmark is activated\n")}{subrepos %
401 > "HG: no bookmark is activated\n")}{subrepos %
389 > "HG: subrepo '{subrepo}' is changed\n"}
402 > "HG: subrepo '{subrepo}' is changed\n"}
390 >
403 >
391 > changeset = HG: this is customized commit template
404 > changeset = HG: this is customized commit template
392 > HG: {extramsg}
405 > HG: {extramsg}
393 > {if(activebookmark,
406 > {if(activebookmark,
394 > "HG: bookmark '{activebookmark}' is activated\n",
407 > "HG: bookmark '{activebookmark}' is activated\n",
395 > "HG: no bookmark is activated\n")}{subrepos %
408 > "HG: no bookmark is activated\n")}{subrepos %
396 > "HG: subrepo '{subrepo}' is changed\n"}
409 > "HG: subrepo '{subrepo}' is changed\n"}
397 > EOF
410 > EOF
398
411
399 $ hg init sub2
412 $ hg init sub2
400 $ echo a > sub2/a
413 $ echo a > sub2/a
401 $ hg -R sub2 add sub2/a
414 $ hg -R sub2 add sub2/a
402 $ echo 'sub2 = sub2' >> .hgsub
415 $ echo 'sub2 = sub2' >> .hgsub
403
416
404 $ HGEDITOR=cat hg commit -S -q
417 $ HGEDITOR=cat hg commit -S -q
405 HG: this is "commit.normal" template
418 HG: this is "commit.normal" template
406 HG: Leave message empty to abort commit.
419 HG: Leave message empty to abort commit.
407 HG: bookmark 'activebookmark' is activated
420 HG: bookmark 'activebookmark' is activated
408 HG: subrepo 'sub' is changed
421 HG: subrepo 'sub' is changed
409 HG: subrepo 'sub2' is changed
422 HG: subrepo 'sub2' is changed
410 abort: empty commit message
423 abort: empty commit message
411 [255]
424 [255]
412
425
413 $ cat >> .hg/hgrc <<EOF
426 $ cat >> .hg/hgrc <<EOF
414 > [committemplate]
427 > [committemplate]
415 > changeset.commit.normal =
428 > changeset.commit.normal =
416 > # now, "changeset.commit" should be chosen for "hg commit"
429 > # now, "changeset.commit" should be chosen for "hg commit"
417 > EOF
430 > EOF
418
431
419 $ hg bookmark --inactive activebookmark
432 $ hg bookmark --inactive activebookmark
420 $ hg forget .hgsub
433 $ hg forget .hgsub
421 $ HGEDITOR=cat hg commit -q
434 $ HGEDITOR=cat hg commit -q
422 HG: this is "commit" template
435 HG: this is "commit" template
423 HG: Leave message empty to abort commit.
436 HG: Leave message empty to abort commit.
424 HG: no bookmark is activated
437 HG: no bookmark is activated
425 abort: empty commit message
438 abort: empty commit message
426 [255]
439 [255]
427
440
428 $ cat >> .hg/hgrc <<EOF
441 $ cat >> .hg/hgrc <<EOF
429 > [committemplate]
442 > [committemplate]
430 > changeset.commit =
443 > changeset.commit =
431 > # now, "changeset" should be chosen for "hg commit"
444 > # now, "changeset" should be chosen for "hg commit"
432 > EOF
445 > EOF
433
446
434 $ HGEDITOR=cat hg commit -q
447 $ HGEDITOR=cat hg commit -q
435 HG: this is customized commit template
448 HG: this is customized commit template
436 HG: Leave message empty to abort commit.
449 HG: Leave message empty to abort commit.
437 HG: no bookmark is activated
450 HG: no bookmark is activated
438 abort: empty commit message
451 abort: empty commit message
439 [255]
452 [255]
440
453
441 $ cat >> .hg/hgrc <<EOF
454 $ cat >> .hg/hgrc <<EOF
442 > [committemplate]
455 > [committemplate]
443 > changeset = {desc}
456 > changeset = {desc}
444 > HG: mods={file_mods}
457 > HG: mods={file_mods}
445 > HG: adds={file_adds}
458 > HG: adds={file_adds}
446 > HG: dels={file_dels}
459 > HG: dels={file_dels}
447 > HG: files={files}
460 > HG: files={files}
448 > HG:
461 > HG:
449 > {splitlines(diff()) % 'HG: {line}\n'
462 > {splitlines(diff()) % 'HG: {line}\n'
450 > }HG:
463 > }HG:
451 > HG: mods={file_mods}
464 > HG: mods={file_mods}
452 > HG: adds={file_adds}
465 > HG: adds={file_adds}
453 > HG: dels={file_dels}
466 > HG: dels={file_dels}
454 > HG: files={files}\n
467 > HG: files={files}\n
455 > EOF
468 > EOF
456 $ hg status -amr
469 $ hg status -amr
457 M changed
470 M changed
458 A added
471 A added
459 R removed
472 R removed
460 $ HGEDITOR=cat hg commit -q -e -m "foo bar" changed
473 $ HGEDITOR=cat hg commit -q -e -m "foo bar" changed
461 foo bar
474 foo bar
462 HG: mods=changed
475 HG: mods=changed
463 HG: adds=
476 HG: adds=
464 HG: dels=
477 HG: dels=
465 HG: files=changed
478 HG: files=changed
466 HG:
479 HG:
467 HG: --- a/changed Thu Jan 01 00:00:00 1970 +0000
480 HG: --- a/changed Thu Jan 01 00:00:00 1970 +0000
468 HG: +++ b/changed Thu Jan 01 00:00:00 1970 +0000
481 HG: +++ b/changed Thu Jan 01 00:00:00 1970 +0000
469 HG: @@ -1,1 +1,2 @@
482 HG: @@ -1,1 +1,2 @@
470 HG: changed
483 HG: changed
471 HG: +changed
484 HG: +changed
472 HG:
485 HG:
473 HG: mods=changed
486 HG: mods=changed
474 HG: adds=
487 HG: adds=
475 HG: dels=
488 HG: dels=
476 HG: files=changed
489 HG: files=changed
477 $ hg status -amr
490 $ hg status -amr
478 A added
491 A added
479 R removed
492 R removed
480 $ hg parents --template "M {file_mods}\nA {file_adds}\nR {file_dels}\n"
493 $ hg parents --template "M {file_mods}\nA {file_adds}\nR {file_dels}\n"
481 M changed
494 M changed
482 A
495 A
483 R
496 R
484 $ hg rollback -q
497 $ hg rollback -q
485
498
486 $ cat >> .hg/hgrc <<EOF
499 $ cat >> .hg/hgrc <<EOF
487 > [committemplate]
500 > [committemplate]
488 > changeset = {desc}
501 > changeset = {desc}
489 > HG: mods={file_mods}
502 > HG: mods={file_mods}
490 > HG: adds={file_adds}
503 > HG: adds={file_adds}
491 > HG: dels={file_dels}
504 > HG: dels={file_dels}
492 > HG: files={files}
505 > HG: files={files}
493 > HG:
506 > HG:
494 > {splitlines(diff("changed")) % 'HG: {line}\n'
507 > {splitlines(diff("changed")) % 'HG: {line}\n'
495 > }HG:
508 > }HG:
496 > HG: mods={file_mods}
509 > HG: mods={file_mods}
497 > HG: adds={file_adds}
510 > HG: adds={file_adds}
498 > HG: dels={file_dels}
511 > HG: dels={file_dels}
499 > HG: files={files}
512 > HG: files={files}
500 > HG:
513 > HG:
501 > {splitlines(diff("added")) % 'HG: {line}\n'
514 > {splitlines(diff("added")) % 'HG: {line}\n'
502 > }HG:
515 > }HG:
503 > HG: mods={file_mods}
516 > HG: mods={file_mods}
504 > HG: adds={file_adds}
517 > HG: adds={file_adds}
505 > HG: dels={file_dels}
518 > HG: dels={file_dels}
506 > HG: files={files}
519 > HG: files={files}
507 > HG:
520 > HG:
508 > {splitlines(diff("removed")) % 'HG: {line}\n'
521 > {splitlines(diff("removed")) % 'HG: {line}\n'
509 > }HG:
522 > }HG:
510 > HG: mods={file_mods}
523 > HG: mods={file_mods}
511 > HG: adds={file_adds}
524 > HG: adds={file_adds}
512 > HG: dels={file_dels}
525 > HG: dels={file_dels}
513 > HG: files={files}\n
526 > HG: files={files}\n
514 > EOF
527 > EOF
515 $ HGEDITOR=cat hg commit -q -e -m "foo bar" added removed
528 $ HGEDITOR=cat hg commit -q -e -m "foo bar" added removed
516 foo bar
529 foo bar
517 HG: mods=
530 HG: mods=
518 HG: adds=added
531 HG: adds=added
519 HG: dels=removed
532 HG: dels=removed
520 HG: files=added removed
533 HG: files=added removed
521 HG:
534 HG:
522 HG:
535 HG:
523 HG: mods=
536 HG: mods=
524 HG: adds=added
537 HG: adds=added
525 HG: dels=removed
538 HG: dels=removed
526 HG: files=added removed
539 HG: files=added removed
527 HG:
540 HG:
528 HG: --- /dev/null Thu Jan 01 00:00:00 1970 +0000
541 HG: --- /dev/null Thu Jan 01 00:00:00 1970 +0000
529 HG: +++ b/added Thu Jan 01 00:00:00 1970 +0000
542 HG: +++ b/added Thu Jan 01 00:00:00 1970 +0000
530 HG: @@ -0,0 +1,1 @@
543 HG: @@ -0,0 +1,1 @@
531 HG: +added
544 HG: +added
532 HG:
545 HG:
533 HG: mods=
546 HG: mods=
534 HG: adds=added
547 HG: adds=added
535 HG: dels=removed
548 HG: dels=removed
536 HG: files=added removed
549 HG: files=added removed
537 HG:
550 HG:
538 HG: --- a/removed Thu Jan 01 00:00:00 1970 +0000
551 HG: --- a/removed Thu Jan 01 00:00:00 1970 +0000
539 HG: +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
552 HG: +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
540 HG: @@ -1,1 +0,0 @@
553 HG: @@ -1,1 +0,0 @@
541 HG: -removed
554 HG: -removed
542 HG:
555 HG:
543 HG: mods=
556 HG: mods=
544 HG: adds=added
557 HG: adds=added
545 HG: dels=removed
558 HG: dels=removed
546 HG: files=added removed
559 HG: files=added removed
547 $ hg status -amr
560 $ hg status -amr
548 M changed
561 M changed
549 $ hg parents --template "M {file_mods}\nA {file_adds}\nR {file_dels}\n"
562 $ hg parents --template "M {file_mods}\nA {file_adds}\nR {file_dels}\n"
550 M
563 M
551 A added
564 A added
552 R removed
565 R removed
553 $ hg rollback -q
566 $ hg rollback -q
554
567
555 $ cat >> .hg/hgrc <<EOF
568 $ cat >> .hg/hgrc <<EOF
556 > # disable customizing for subsequent tests
569 > # disable customizing for subsequent tests
557 > [committemplate]
570 > [committemplate]
558 > changeset =
571 > changeset =
559 > EOF
572 > EOF
560
573
561 $ cd ..
574 $ cd ..
562
575
563
576
564 commit copy
577 commit copy
565
578
566 $ hg init dir2
579 $ hg init dir2
567 $ cd dir2
580 $ cd dir2
568 $ echo bleh > bar
581 $ echo bleh > bar
569 $ hg add bar
582 $ hg add bar
570 $ hg ci -m 'add bar'
583 $ hg ci -m 'add bar'
571
584
572 $ hg cp bar foo
585 $ hg cp bar foo
573 $ echo >> bar
586 $ echo >> bar
574 $ hg ci -m 'cp bar foo; change bar'
587 $ hg ci -m 'cp bar foo; change bar'
575
588
576 $ hg debugrename foo
589 $ hg debugrename foo
577 foo renamed from bar:26d3ca0dfd18e44d796b564e38dd173c9668d3a9
590 foo renamed from bar:26d3ca0dfd18e44d796b564e38dd173c9668d3a9
578 $ hg debugindex bar
591 $ hg debugindex bar
579 rev offset length ..... linkrev nodeid p1 p2 (re)
592 rev offset length ..... linkrev nodeid p1 p2 (re)
580 0 0 6 ..... 0 26d3ca0dfd18 000000000000 000000000000 (re)
593 0 0 6 ..... 0 26d3ca0dfd18 000000000000 000000000000 (re)
581 1 6 7 ..... 1 d267bddd54f7 26d3ca0dfd18 000000000000 (re)
594 1 6 7 ..... 1 d267bddd54f7 26d3ca0dfd18 000000000000 (re)
582
595
583 Test making empty commits
596 Test making empty commits
584 $ hg commit --config ui.allowemptycommit=True -m "empty commit"
597 $ hg commit --config ui.allowemptycommit=True -m "empty commit"
585 $ hg log -r . -v --stat
598 $ hg log -r . -v --stat
586 changeset: 2:d809f3644287
599 changeset: 2:d809f3644287
587 tag: tip
600 tag: tip
588 user: test
601 user: test
589 date: Thu Jan 01 00:00:00 1970 +0000
602 date: Thu Jan 01 00:00:00 1970 +0000
590 description:
603 description:
591 empty commit
604 empty commit
592
605
593
606
594
607
595 verify pathauditor blocks evil filepaths
608 verify pathauditor blocks evil filepaths
596 $ cat > evil-commit.py <<EOF
609 $ cat > evil-commit.py <<EOF
597 > from mercurial import ui, hg, context, node
610 > from mercurial import ui, hg, context, node
598 > notrc = u".h\u200cg".encode('utf-8') + '/hgrc'
611 > notrc = u".h\u200cg".encode('utf-8') + '/hgrc'
599 > u = ui.ui()
612 > u = ui.ui()
600 > r = hg.repository(u, '.')
613 > r = hg.repository(u, '.')
601 > def filectxfn(repo, memctx, path):
614 > def filectxfn(repo, memctx, path):
602 > return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned')
615 > return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned')
603 > c = context.memctx(r, [r['tip'].node(), node.nullid],
616 > c = context.memctx(r, [r['tip'].node(), node.nullid],
604 > 'evil', [notrc], filectxfn, 0)
617 > 'evil', [notrc], filectxfn, 0)
605 > r.commitctx(c)
618 > r.commitctx(c)
606 > EOF
619 > EOF
607 $ $PYTHON evil-commit.py
620 $ $PYTHON evil-commit.py
608 #if windows
621 #if windows
609 $ hg co --clean tip
622 $ hg co --clean tip
610 abort: path contains illegal component: .h\xe2\x80\x8cg\\hgrc (esc)
623 abort: path contains illegal component: .h\xe2\x80\x8cg\\hgrc (esc)
611 [255]
624 [255]
612 #else
625 #else
613 $ hg co --clean tip
626 $ hg co --clean tip
614 abort: path contains illegal component: .h\xe2\x80\x8cg/hgrc (esc)
627 abort: path contains illegal component: .h\xe2\x80\x8cg/hgrc (esc)
615 [255]
628 [255]
616 #endif
629 #endif
617
630
618 $ hg rollback -f
631 $ hg rollback -f
619 repository tip rolled back to revision 2 (undo commit)
632 repository tip rolled back to revision 2 (undo commit)
620 $ cat > evil-commit.py <<EOF
633 $ cat > evil-commit.py <<EOF
621 > from mercurial import ui, hg, context, node
634 > from mercurial import ui, hg, context, node
622 > notrc = "HG~1/hgrc"
635 > notrc = "HG~1/hgrc"
623 > u = ui.ui()
636 > u = ui.ui()
624 > r = hg.repository(u, '.')
637 > r = hg.repository(u, '.')
625 > def filectxfn(repo, memctx, path):
638 > def filectxfn(repo, memctx, path):
626 > return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned')
639 > return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned')
627 > c = context.memctx(r, [r['tip'].node(), node.nullid],
640 > c = context.memctx(r, [r['tip'].node(), node.nullid],
628 > 'evil', [notrc], filectxfn, 0)
641 > 'evil', [notrc], filectxfn, 0)
629 > r.commitctx(c)
642 > r.commitctx(c)
630 > EOF
643 > EOF
631 $ $PYTHON evil-commit.py
644 $ $PYTHON evil-commit.py
632 $ hg co --clean tip
645 $ hg co --clean tip
633 abort: path contains illegal component: HG~1/hgrc (glob)
646 abort: path contains illegal component: HG~1/hgrc (glob)
634 [255]
647 [255]
635
648
636 $ hg rollback -f
649 $ hg rollback -f
637 repository tip rolled back to revision 2 (undo commit)
650 repository tip rolled back to revision 2 (undo commit)
638 $ cat > evil-commit.py <<EOF
651 $ cat > evil-commit.py <<EOF
639 > from mercurial import ui, hg, context, node
652 > from mercurial import ui, hg, context, node
640 > notrc = "HG8B6C~2/hgrc"
653 > notrc = "HG8B6C~2/hgrc"
641 > u = ui.ui()
654 > u = ui.ui()
642 > r = hg.repository(u, '.')
655 > r = hg.repository(u, '.')
643 > def filectxfn(repo, memctx, path):
656 > def filectxfn(repo, memctx, path):
644 > return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned')
657 > return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned')
645 > c = context.memctx(r, [r['tip'].node(), node.nullid],
658 > c = context.memctx(r, [r['tip'].node(), node.nullid],
646 > 'evil', [notrc], filectxfn, 0)
659 > 'evil', [notrc], filectxfn, 0)
647 > r.commitctx(c)
660 > r.commitctx(c)
648 > EOF
661 > EOF
649 $ $PYTHON evil-commit.py
662 $ $PYTHON evil-commit.py
650 $ hg co --clean tip
663 $ hg co --clean tip
651 abort: path contains illegal component: HG8B6C~2/hgrc (glob)
664 abort: path contains illegal component: HG8B6C~2/hgrc (glob)
652 [255]
665 [255]
653
666
654 # test that an unmodified commit template message aborts
667 # test that an unmodified commit template message aborts
655
668
656 $ hg init unmodified_commit_template
669 $ hg init unmodified_commit_template
657 $ cd unmodified_commit_template
670 $ cd unmodified_commit_template
658 $ echo foo > foo
671 $ echo foo > foo
659 $ hg add foo
672 $ hg add foo
660 $ hg commit -m "foo"
673 $ hg commit -m "foo"
661 $ cat >> .hg/hgrc <<EOF
674 $ cat >> .hg/hgrc <<EOF
662 > [committemplate]
675 > [committemplate]
663 > changeset.commit = HI THIS IS NOT STRIPPED
676 > changeset.commit = HI THIS IS NOT STRIPPED
664 > HG: this is customized commit template
677 > HG: this is customized commit template
665 > HG: {extramsg}
678 > HG: {extramsg}
666 > {if(activebookmark,
679 > {if(activebookmark,
667 > "HG: bookmark '{activebookmark}' is activated\n",
680 > "HG: bookmark '{activebookmark}' is activated\n",
668 > "HG: no bookmark is activated\n")}{subrepos %
681 > "HG: no bookmark is activated\n")}{subrepos %
669 > "HG: subrepo '{subrepo}' is changed\n"}
682 > "HG: subrepo '{subrepo}' is changed\n"}
670 > EOF
683 > EOF
671 $ cat > $TESTTMP/notouching.sh <<EOF
684 $ cat > $TESTTMP/notouching.sh <<EOF
672 > true
685 > true
673 > EOF
686 > EOF
674 $ echo foo2 > foo2
687 $ echo foo2 > foo2
675 $ hg add foo2
688 $ hg add foo2
676 $ HGEDITOR="sh $TESTTMP/notouching.sh" hg commit
689 $ HGEDITOR="sh $TESTTMP/notouching.sh" hg commit
677 abort: commit message unchanged
690 abort: commit message unchanged
678 [255]
691 [255]
679 $ cd ..
692 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now