##// END OF EJS Templates
chgserver: extract utility to bind unix domain socket to long path...
Yuya Nishihara -
r29530:3239e2fd default
parent child Browse files
Show More
@@ -1,675 +1,664
1 # chgserver.py - command server extension for cHg
1 # chgserver.py - command server extension for cHg
2 #
2 #
3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """command server extension for cHg (EXPERIMENTAL)
8 """command server extension for cHg (EXPERIMENTAL)
9
9
10 'S' channel (read/write)
10 'S' channel (read/write)
11 propagate ui.system() request to client
11 propagate ui.system() request to client
12
12
13 'attachio' command
13 'attachio' command
14 attach client's stdio passed by sendmsg()
14 attach client's stdio passed by sendmsg()
15
15
16 'chdir' command
16 'chdir' command
17 change current directory
17 change current directory
18
18
19 'getpager' command
19 'getpager' command
20 checks if pager is enabled and which pager should be executed
20 checks if pager is enabled and which pager should be executed
21
21
22 'setenv' command
22 'setenv' command
23 replace os.environ completely
23 replace os.environ completely
24
24
25 'setumask' command
25 'setumask' command
26 set umask
26 set umask
27
27
28 'validate' command
28 'validate' command
29 reload the config and check if the server is up to date
29 reload the config and check if the server is up to date
30
30
31 Config
31 Config
32 ------
32 ------
33
33
34 ::
34 ::
35
35
36 [chgserver]
36 [chgserver]
37 idletimeout = 3600 # seconds, after which an idle server will exit
37 idletimeout = 3600 # seconds, after which an idle server will exit
38 skiphash = False # whether to skip config or env change checks
38 skiphash = False # whether to skip config or env change checks
39 """
39 """
40
40
41 from __future__ import absolute_import
41 from __future__ import absolute_import
42
42
43 import errno
43 import errno
44 import hashlib
44 import hashlib
45 import inspect
45 import inspect
46 import os
46 import os
47 import re
47 import re
48 import signal
48 import signal
49 import struct
49 import struct
50 import sys
50 import sys
51 import threading
51 import threading
52 import time
52 import time
53
53
54 from mercurial.i18n import _
54 from mercurial.i18n import _
55
55
56 from mercurial import (
56 from mercurial import (
57 cmdutil,
57 cmdutil,
58 commands,
58 commands,
59 commandserver,
59 commandserver,
60 dispatch,
60 dispatch,
61 error,
61 error,
62 extensions,
62 extensions,
63 osutil,
63 osutil,
64 util,
64 util,
65 )
65 )
66
66
67 socketserver = util.socketserver
67 socketserver = util.socketserver
68
68
69 # Note for extension authors: ONLY specify testedwith = 'internal' for
69 # Note for extension authors: ONLY specify testedwith = 'internal' for
70 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
70 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
71 # be specifying the version(s) of Mercurial they are tested with, or
71 # be specifying the version(s) of Mercurial they are tested with, or
72 # leave the attribute unspecified.
72 # leave the attribute unspecified.
73 testedwith = 'internal'
73 testedwith = 'internal'
74
74
75 _log = commandserver.log
75 _log = commandserver.log
76
76
77 def _hashlist(items):
77 def _hashlist(items):
78 """return sha1 hexdigest for a list"""
78 """return sha1 hexdigest for a list"""
79 return hashlib.sha1(str(items)).hexdigest()
79 return hashlib.sha1(str(items)).hexdigest()
80
80
81 # sensitive config sections affecting confighash
81 # sensitive config sections affecting confighash
82 _configsections = [
82 _configsections = [
83 'alias', # affects global state commands.table
83 'alias', # affects global state commands.table
84 'extdiff', # uisetup will register new commands
84 'extdiff', # uisetup will register new commands
85 'extensions',
85 'extensions',
86 ]
86 ]
87
87
88 # sensitive environment variables affecting confighash
88 # sensitive environment variables affecting confighash
89 _envre = re.compile(r'''\A(?:
89 _envre = re.compile(r'''\A(?:
90 CHGHG
90 CHGHG
91 |HG.*
91 |HG.*
92 |LANG(?:UAGE)?
92 |LANG(?:UAGE)?
93 |LC_.*
93 |LC_.*
94 |LD_.*
94 |LD_.*
95 |PATH
95 |PATH
96 |PYTHON.*
96 |PYTHON.*
97 |TERM(?:INFO)?
97 |TERM(?:INFO)?
98 |TZ
98 |TZ
99 )\Z''', re.X)
99 )\Z''', re.X)
100
100
101 def _confighash(ui):
101 def _confighash(ui):
102 """return a quick hash for detecting config/env changes
102 """return a quick hash for detecting config/env changes
103
103
104 confighash is the hash of sensitive config items and environment variables.
104 confighash is the hash of sensitive config items and environment variables.
105
105
106 for chgserver, it is designed that once confighash changes, the server is
106 for chgserver, it is designed that once confighash changes, the server is
107 not qualified to serve its client and should redirect the client to a new
107 not qualified to serve its client and should redirect the client to a new
108 server. different from mtimehash, confighash change will not mark the
108 server. different from mtimehash, confighash change will not mark the
109 server outdated and exit since the user can have different configs at the
109 server outdated and exit since the user can have different configs at the
110 same time.
110 same time.
111 """
111 """
112 sectionitems = []
112 sectionitems = []
113 for section in _configsections:
113 for section in _configsections:
114 sectionitems.append(ui.configitems(section))
114 sectionitems.append(ui.configitems(section))
115 sectionhash = _hashlist(sectionitems)
115 sectionhash = _hashlist(sectionitems)
116 envitems = [(k, v) for k, v in os.environ.iteritems() if _envre.match(k)]
116 envitems = [(k, v) for k, v in os.environ.iteritems() if _envre.match(k)]
117 envhash = _hashlist(sorted(envitems))
117 envhash = _hashlist(sorted(envitems))
118 return sectionhash[:6] + envhash[:6]
118 return sectionhash[:6] + envhash[:6]
119
119
120 def _getmtimepaths(ui):
120 def _getmtimepaths(ui):
121 """get a list of paths that should be checked to detect change
121 """get a list of paths that should be checked to detect change
122
122
123 The list will include:
123 The list will include:
124 - extensions (will not cover all files for complex extensions)
124 - extensions (will not cover all files for complex extensions)
125 - mercurial/__version__.py
125 - mercurial/__version__.py
126 - python binary
126 - python binary
127 """
127 """
128 modules = [m for n, m in extensions.extensions(ui)]
128 modules = [m for n, m in extensions.extensions(ui)]
129 try:
129 try:
130 from mercurial import __version__
130 from mercurial import __version__
131 modules.append(__version__)
131 modules.append(__version__)
132 except ImportError:
132 except ImportError:
133 pass
133 pass
134 files = [sys.executable]
134 files = [sys.executable]
135 for m in modules:
135 for m in modules:
136 try:
136 try:
137 files.append(inspect.getabsfile(m))
137 files.append(inspect.getabsfile(m))
138 except TypeError:
138 except TypeError:
139 pass
139 pass
140 return sorted(set(files))
140 return sorted(set(files))
141
141
142 def _mtimehash(paths):
142 def _mtimehash(paths):
143 """return a quick hash for detecting file changes
143 """return a quick hash for detecting file changes
144
144
145 mtimehash calls stat on given paths and calculate a hash based on size and
145 mtimehash calls stat on given paths and calculate a hash based on size and
146 mtime of each file. mtimehash does not read file content because reading is
146 mtime of each file. mtimehash does not read file content because reading is
147 expensive. therefore it's not 100% reliable for detecting content changes.
147 expensive. therefore it's not 100% reliable for detecting content changes.
148 it's possible to return different hashes for same file contents.
148 it's possible to return different hashes for same file contents.
149 it's also possible to return a same hash for different file contents for
149 it's also possible to return a same hash for different file contents for
150 some carefully crafted situation.
150 some carefully crafted situation.
151
151
152 for chgserver, it is designed that once mtimehash changes, the server is
152 for chgserver, it is designed that once mtimehash changes, the server is
153 considered outdated immediately and should no longer provide service.
153 considered outdated immediately and should no longer provide service.
154
154
155 mtimehash is not included in confighash because we only know the paths of
155 mtimehash is not included in confighash because we only know the paths of
156 extensions after importing them (there is imp.find_module but that faces
156 extensions after importing them (there is imp.find_module but that faces
157 race conditions). We need to calculate confighash without importing.
157 race conditions). We need to calculate confighash without importing.
158 """
158 """
159 def trystat(path):
159 def trystat(path):
160 try:
160 try:
161 st = os.stat(path)
161 st = os.stat(path)
162 return (st.st_mtime, st.st_size)
162 return (st.st_mtime, st.st_size)
163 except OSError:
163 except OSError:
164 # could be ENOENT, EPERM etc. not fatal in any case
164 # could be ENOENT, EPERM etc. not fatal in any case
165 pass
165 pass
166 return _hashlist(map(trystat, paths))[:12]
166 return _hashlist(map(trystat, paths))[:12]
167
167
168 class hashstate(object):
168 class hashstate(object):
169 """a structure storing confighash, mtimehash, paths used for mtimehash"""
169 """a structure storing confighash, mtimehash, paths used for mtimehash"""
170 def __init__(self, confighash, mtimehash, mtimepaths):
170 def __init__(self, confighash, mtimehash, mtimepaths):
171 self.confighash = confighash
171 self.confighash = confighash
172 self.mtimehash = mtimehash
172 self.mtimehash = mtimehash
173 self.mtimepaths = mtimepaths
173 self.mtimepaths = mtimepaths
174
174
175 @staticmethod
175 @staticmethod
176 def fromui(ui, mtimepaths=None):
176 def fromui(ui, mtimepaths=None):
177 if mtimepaths is None:
177 if mtimepaths is None:
178 mtimepaths = _getmtimepaths(ui)
178 mtimepaths = _getmtimepaths(ui)
179 confighash = _confighash(ui)
179 confighash = _confighash(ui)
180 mtimehash = _mtimehash(mtimepaths)
180 mtimehash = _mtimehash(mtimepaths)
181 _log('confighash = %s mtimehash = %s\n' % (confighash, mtimehash))
181 _log('confighash = %s mtimehash = %s\n' % (confighash, mtimehash))
182 return hashstate(confighash, mtimehash, mtimepaths)
182 return hashstate(confighash, mtimehash, mtimepaths)
183
183
184 # copied from hgext/pager.py:uisetup()
184 # copied from hgext/pager.py:uisetup()
185 def _setuppagercmd(ui, options, cmd):
185 def _setuppagercmd(ui, options, cmd):
186 if not ui.formatted():
186 if not ui.formatted():
187 return
187 return
188
188
189 p = ui.config("pager", "pager", os.environ.get("PAGER"))
189 p = ui.config("pager", "pager", os.environ.get("PAGER"))
190 usepager = False
190 usepager = False
191 always = util.parsebool(options['pager'])
191 always = util.parsebool(options['pager'])
192 auto = options['pager'] == 'auto'
192 auto = options['pager'] == 'auto'
193
193
194 if not p:
194 if not p:
195 pass
195 pass
196 elif always:
196 elif always:
197 usepager = True
197 usepager = True
198 elif not auto:
198 elif not auto:
199 usepager = False
199 usepager = False
200 else:
200 else:
201 attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff']
201 attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff']
202 attend = ui.configlist('pager', 'attend', attended)
202 attend = ui.configlist('pager', 'attend', attended)
203 ignore = ui.configlist('pager', 'ignore')
203 ignore = ui.configlist('pager', 'ignore')
204 cmds, _ = cmdutil.findcmd(cmd, commands.table)
204 cmds, _ = cmdutil.findcmd(cmd, commands.table)
205
205
206 for cmd in cmds:
206 for cmd in cmds:
207 var = 'attend-%s' % cmd
207 var = 'attend-%s' % cmd
208 if ui.config('pager', var):
208 if ui.config('pager', var):
209 usepager = ui.configbool('pager', var)
209 usepager = ui.configbool('pager', var)
210 break
210 break
211 if (cmd in attend or
211 if (cmd in attend or
212 (cmd not in ignore and not attend)):
212 (cmd not in ignore and not attend)):
213 usepager = True
213 usepager = True
214 break
214 break
215
215
216 if usepager:
216 if usepager:
217 ui.setconfig('ui', 'formatted', ui.formatted(), 'pager')
217 ui.setconfig('ui', 'formatted', ui.formatted(), 'pager')
218 ui.setconfig('ui', 'interactive', False, 'pager')
218 ui.setconfig('ui', 'interactive', False, 'pager')
219 return p
219 return p
220
220
221 def _newchgui(srcui, csystem):
221 def _newchgui(srcui, csystem):
222 class chgui(srcui.__class__):
222 class chgui(srcui.__class__):
223 def __init__(self, src=None):
223 def __init__(self, src=None):
224 super(chgui, self).__init__(src)
224 super(chgui, self).__init__(src)
225 if src:
225 if src:
226 self._csystem = getattr(src, '_csystem', csystem)
226 self._csystem = getattr(src, '_csystem', csystem)
227 else:
227 else:
228 self._csystem = csystem
228 self._csystem = csystem
229
229
230 def system(self, cmd, environ=None, cwd=None, onerr=None,
230 def system(self, cmd, environ=None, cwd=None, onerr=None,
231 errprefix=None):
231 errprefix=None):
232 # fallback to the original system method if the output needs to be
232 # fallback to the original system method if the output needs to be
233 # captured (to self._buffers), or the output stream is not stdout
233 # captured (to self._buffers), or the output stream is not stdout
234 # (e.g. stderr, cStringIO), because the chg client is not aware of
234 # (e.g. stderr, cStringIO), because the chg client is not aware of
235 # these situations and will behave differently (write to stdout).
235 # these situations and will behave differently (write to stdout).
236 if (any(s[1] for s in self._bufferstates)
236 if (any(s[1] for s in self._bufferstates)
237 or not util.safehasattr(self.fout, 'fileno')
237 or not util.safehasattr(self.fout, 'fileno')
238 or self.fout.fileno() != sys.stdout.fileno()):
238 or self.fout.fileno() != sys.stdout.fileno()):
239 return super(chgui, self).system(cmd, environ, cwd, onerr,
239 return super(chgui, self).system(cmd, environ, cwd, onerr,
240 errprefix)
240 errprefix)
241 # copied from mercurial/util.py:system()
241 # copied from mercurial/util.py:system()
242 self.flush()
242 self.flush()
243 def py2shell(val):
243 def py2shell(val):
244 if val is None or val is False:
244 if val is None or val is False:
245 return '0'
245 return '0'
246 if val is True:
246 if val is True:
247 return '1'
247 return '1'
248 return str(val)
248 return str(val)
249 env = os.environ.copy()
249 env = os.environ.copy()
250 if environ:
250 if environ:
251 env.update((k, py2shell(v)) for k, v in environ.iteritems())
251 env.update((k, py2shell(v)) for k, v in environ.iteritems())
252 env['HG'] = util.hgexecutable()
252 env['HG'] = util.hgexecutable()
253 rc = self._csystem(cmd, env, cwd)
253 rc = self._csystem(cmd, env, cwd)
254 if rc and onerr:
254 if rc and onerr:
255 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
255 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
256 util.explainexit(rc)[0])
256 util.explainexit(rc)[0])
257 if errprefix:
257 if errprefix:
258 errmsg = '%s: %s' % (errprefix, errmsg)
258 errmsg = '%s: %s' % (errprefix, errmsg)
259 raise onerr(errmsg)
259 raise onerr(errmsg)
260 return rc
260 return rc
261
261
262 return chgui(srcui)
262 return chgui(srcui)
263
263
264 def _loadnewui(srcui, args):
264 def _loadnewui(srcui, args):
265 newui = srcui.__class__()
265 newui = srcui.__class__()
266 for a in ['fin', 'fout', 'ferr', 'environ']:
266 for a in ['fin', 'fout', 'ferr', 'environ']:
267 setattr(newui, a, getattr(srcui, a))
267 setattr(newui, a, getattr(srcui, a))
268 if util.safehasattr(srcui, '_csystem'):
268 if util.safehasattr(srcui, '_csystem'):
269 newui._csystem = srcui._csystem
269 newui._csystem = srcui._csystem
270
270
271 # internal config: extensions.chgserver
271 # internal config: extensions.chgserver
272 newui.setconfig('extensions', 'chgserver',
272 newui.setconfig('extensions', 'chgserver',
273 srcui.config('extensions', 'chgserver'), '--config')
273 srcui.config('extensions', 'chgserver'), '--config')
274
274
275 # command line args
275 # command line args
276 args = args[:]
276 args = args[:]
277 dispatch._parseconfig(newui, dispatch._earlygetopt(['--config'], args))
277 dispatch._parseconfig(newui, dispatch._earlygetopt(['--config'], args))
278
278
279 # stolen from tortoisehg.util.copydynamicconfig()
279 # stolen from tortoisehg.util.copydynamicconfig()
280 for section, name, value in srcui.walkconfig():
280 for section, name, value in srcui.walkconfig():
281 source = srcui.configsource(section, name)
281 source = srcui.configsource(section, name)
282 if ':' in source or source == '--config':
282 if ':' in source or source == '--config':
283 # path:line or command line
283 # path:line or command line
284 continue
284 continue
285 if source == 'none':
285 if source == 'none':
286 # ui.configsource returns 'none' by default
286 # ui.configsource returns 'none' by default
287 source = ''
287 source = ''
288 newui.setconfig(section, name, value, source)
288 newui.setconfig(section, name, value, source)
289
289
290 # load wd and repo config, copied from dispatch.py
290 # load wd and repo config, copied from dispatch.py
291 cwds = dispatch._earlygetopt(['--cwd'], args)
291 cwds = dispatch._earlygetopt(['--cwd'], args)
292 cwd = cwds and os.path.realpath(cwds[-1]) or None
292 cwd = cwds and os.path.realpath(cwds[-1]) or None
293 rpath = dispatch._earlygetopt(["-R", "--repository", "--repo"], args)
293 rpath = dispatch._earlygetopt(["-R", "--repository", "--repo"], args)
294 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
294 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
295
295
296 return (newui, newlui)
296 return (newui, newlui)
297
297
298 class channeledsystem(object):
298 class channeledsystem(object):
299 """Propagate ui.system() request in the following format:
299 """Propagate ui.system() request in the following format:
300
300
301 payload length (unsigned int),
301 payload length (unsigned int),
302 cmd, '\0',
302 cmd, '\0',
303 cwd, '\0',
303 cwd, '\0',
304 envkey, '=', val, '\0',
304 envkey, '=', val, '\0',
305 ...
305 ...
306 envkey, '=', val
306 envkey, '=', val
307
307
308 and waits:
308 and waits:
309
309
310 exitcode length (unsigned int),
310 exitcode length (unsigned int),
311 exitcode (int)
311 exitcode (int)
312 """
312 """
313 def __init__(self, in_, out, channel):
313 def __init__(self, in_, out, channel):
314 self.in_ = in_
314 self.in_ = in_
315 self.out = out
315 self.out = out
316 self.channel = channel
316 self.channel = channel
317
317
318 def __call__(self, cmd, environ, cwd):
318 def __call__(self, cmd, environ, cwd):
319 args = [util.quotecommand(cmd), os.path.abspath(cwd or '.')]
319 args = [util.quotecommand(cmd), os.path.abspath(cwd or '.')]
320 args.extend('%s=%s' % (k, v) for k, v in environ.iteritems())
320 args.extend('%s=%s' % (k, v) for k, v in environ.iteritems())
321 data = '\0'.join(args)
321 data = '\0'.join(args)
322 self.out.write(struct.pack('>cI', self.channel, len(data)))
322 self.out.write(struct.pack('>cI', self.channel, len(data)))
323 self.out.write(data)
323 self.out.write(data)
324 self.out.flush()
324 self.out.flush()
325
325
326 length = self.in_.read(4)
326 length = self.in_.read(4)
327 length, = struct.unpack('>I', length)
327 length, = struct.unpack('>I', length)
328 if length != 4:
328 if length != 4:
329 raise error.Abort(_('invalid response'))
329 raise error.Abort(_('invalid response'))
330 rc, = struct.unpack('>i', self.in_.read(4))
330 rc, = struct.unpack('>i', self.in_.read(4))
331 return rc
331 return rc
332
332
333 _iochannels = [
333 _iochannels = [
334 # server.ch, ui.fp, mode
334 # server.ch, ui.fp, mode
335 ('cin', 'fin', 'rb'),
335 ('cin', 'fin', 'rb'),
336 ('cout', 'fout', 'wb'),
336 ('cout', 'fout', 'wb'),
337 ('cerr', 'ferr', 'wb'),
337 ('cerr', 'ferr', 'wb'),
338 ]
338 ]
339
339
340 class chgcmdserver(commandserver.server):
340 class chgcmdserver(commandserver.server):
341 def __init__(self, ui, repo, fin, fout, sock, hashstate, baseaddress):
341 def __init__(self, ui, repo, fin, fout, sock, hashstate, baseaddress):
342 super(chgcmdserver, self).__init__(
342 super(chgcmdserver, self).__init__(
343 _newchgui(ui, channeledsystem(fin, fout, 'S')), repo, fin, fout)
343 _newchgui(ui, channeledsystem(fin, fout, 'S')), repo, fin, fout)
344 self.clientsock = sock
344 self.clientsock = sock
345 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
345 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
346 self.hashstate = hashstate
346 self.hashstate = hashstate
347 self.baseaddress = baseaddress
347 self.baseaddress = baseaddress
348 if hashstate is not None:
348 if hashstate is not None:
349 self.capabilities = self.capabilities.copy()
349 self.capabilities = self.capabilities.copy()
350 self.capabilities['validate'] = chgcmdserver.validate
350 self.capabilities['validate'] = chgcmdserver.validate
351
351
352 def cleanup(self):
352 def cleanup(self):
353 super(chgcmdserver, self).cleanup()
353 super(chgcmdserver, self).cleanup()
354 # dispatch._runcatch() does not flush outputs if exception is not
354 # dispatch._runcatch() does not flush outputs if exception is not
355 # handled by dispatch._dispatch()
355 # handled by dispatch._dispatch()
356 self.ui.flush()
356 self.ui.flush()
357 self._restoreio()
357 self._restoreio()
358
358
359 def attachio(self):
359 def attachio(self):
360 """Attach to client's stdio passed via unix domain socket; all
360 """Attach to client's stdio passed via unix domain socket; all
361 channels except cresult will no longer be used
361 channels except cresult will no longer be used
362 """
362 """
363 # tell client to sendmsg() with 1-byte payload, which makes it
363 # tell client to sendmsg() with 1-byte payload, which makes it
364 # distinctive from "attachio\n" command consumed by client.read()
364 # distinctive from "attachio\n" command consumed by client.read()
365 self.clientsock.sendall(struct.pack('>cI', 'I', 1))
365 self.clientsock.sendall(struct.pack('>cI', 'I', 1))
366 clientfds = osutil.recvfds(self.clientsock.fileno())
366 clientfds = osutil.recvfds(self.clientsock.fileno())
367 _log('received fds: %r\n' % clientfds)
367 _log('received fds: %r\n' % clientfds)
368
368
369 ui = self.ui
369 ui = self.ui
370 ui.flush()
370 ui.flush()
371 first = self._saveio()
371 first = self._saveio()
372 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
372 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
373 assert fd > 0
373 assert fd > 0
374 fp = getattr(ui, fn)
374 fp = getattr(ui, fn)
375 os.dup2(fd, fp.fileno())
375 os.dup2(fd, fp.fileno())
376 os.close(fd)
376 os.close(fd)
377 if not first:
377 if not first:
378 continue
378 continue
379 # reset buffering mode when client is first attached. as we want
379 # reset buffering mode when client is first attached. as we want
380 # to see output immediately on pager, the mode stays unchanged
380 # to see output immediately on pager, the mode stays unchanged
381 # when client re-attached. ferr is unchanged because it should
381 # when client re-attached. ferr is unchanged because it should
382 # be unbuffered no matter if it is a tty or not.
382 # be unbuffered no matter if it is a tty or not.
383 if fn == 'ferr':
383 if fn == 'ferr':
384 newfp = fp
384 newfp = fp
385 else:
385 else:
386 # make it line buffered explicitly because the default is
386 # make it line buffered explicitly because the default is
387 # decided on first write(), where fout could be a pager.
387 # decided on first write(), where fout could be a pager.
388 if fp.isatty():
388 if fp.isatty():
389 bufsize = 1 # line buffered
389 bufsize = 1 # line buffered
390 else:
390 else:
391 bufsize = -1 # system default
391 bufsize = -1 # system default
392 newfp = os.fdopen(fp.fileno(), mode, bufsize)
392 newfp = os.fdopen(fp.fileno(), mode, bufsize)
393 setattr(ui, fn, newfp)
393 setattr(ui, fn, newfp)
394 setattr(self, cn, newfp)
394 setattr(self, cn, newfp)
395
395
396 self.cresult.write(struct.pack('>i', len(clientfds)))
396 self.cresult.write(struct.pack('>i', len(clientfds)))
397
397
398 def _saveio(self):
398 def _saveio(self):
399 if self._oldios:
399 if self._oldios:
400 return False
400 return False
401 ui = self.ui
401 ui = self.ui
402 for cn, fn, _mode in _iochannels:
402 for cn, fn, _mode in _iochannels:
403 ch = getattr(self, cn)
403 ch = getattr(self, cn)
404 fp = getattr(ui, fn)
404 fp = getattr(ui, fn)
405 fd = os.dup(fp.fileno())
405 fd = os.dup(fp.fileno())
406 self._oldios.append((ch, fp, fd))
406 self._oldios.append((ch, fp, fd))
407 return True
407 return True
408
408
409 def _restoreio(self):
409 def _restoreio(self):
410 ui = self.ui
410 ui = self.ui
411 for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels):
411 for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels):
412 newfp = getattr(ui, fn)
412 newfp = getattr(ui, fn)
413 # close newfp while it's associated with client; otherwise it
413 # close newfp while it's associated with client; otherwise it
414 # would be closed when newfp is deleted
414 # would be closed when newfp is deleted
415 if newfp is not fp:
415 if newfp is not fp:
416 newfp.close()
416 newfp.close()
417 # restore original fd: fp is open again
417 # restore original fd: fp is open again
418 os.dup2(fd, fp.fileno())
418 os.dup2(fd, fp.fileno())
419 os.close(fd)
419 os.close(fd)
420 setattr(self, cn, ch)
420 setattr(self, cn, ch)
421 setattr(ui, fn, fp)
421 setattr(ui, fn, fp)
422 del self._oldios[:]
422 del self._oldios[:]
423
423
424 def validate(self):
424 def validate(self):
425 """Reload the config and check if the server is up to date
425 """Reload the config and check if the server is up to date
426
426
427 Read a list of '\0' separated arguments.
427 Read a list of '\0' separated arguments.
428 Write a non-empty list of '\0' separated instruction strings or '\0'
428 Write a non-empty list of '\0' separated instruction strings or '\0'
429 if the list is empty.
429 if the list is empty.
430 An instruction string could be either:
430 An instruction string could be either:
431 - "unlink $path", the client should unlink the path to stop the
431 - "unlink $path", the client should unlink the path to stop the
432 outdated server.
432 outdated server.
433 - "redirect $path", the client should attempt to connect to $path
433 - "redirect $path", the client should attempt to connect to $path
434 first. If it does not work, start a new server. It implies
434 first. If it does not work, start a new server. It implies
435 "reconnect".
435 "reconnect".
436 - "exit $n", the client should exit directly with code n.
436 - "exit $n", the client should exit directly with code n.
437 This may happen if we cannot parse the config.
437 This may happen if we cannot parse the config.
438 - "reconnect", the client should close the connection and
438 - "reconnect", the client should close the connection and
439 reconnect.
439 reconnect.
440 If neither "reconnect" nor "redirect" is included in the instruction
440 If neither "reconnect" nor "redirect" is included in the instruction
441 list, the client can continue with this server after completing all
441 list, the client can continue with this server after completing all
442 the instructions.
442 the instructions.
443 """
443 """
444 args = self._readlist()
444 args = self._readlist()
445 try:
445 try:
446 self.ui, lui = _loadnewui(self.ui, args)
446 self.ui, lui = _loadnewui(self.ui, args)
447 except error.ParseError as inst:
447 except error.ParseError as inst:
448 dispatch._formatparse(self.ui.warn, inst)
448 dispatch._formatparse(self.ui.warn, inst)
449 self.ui.flush()
449 self.ui.flush()
450 self.cresult.write('exit 255')
450 self.cresult.write('exit 255')
451 return
451 return
452 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
452 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
453 insts = []
453 insts = []
454 if newhash.mtimehash != self.hashstate.mtimehash:
454 if newhash.mtimehash != self.hashstate.mtimehash:
455 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
455 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
456 insts.append('unlink %s' % addr)
456 insts.append('unlink %s' % addr)
457 # mtimehash is empty if one or more extensions fail to load.
457 # mtimehash is empty if one or more extensions fail to load.
458 # to be compatible with hg, still serve the client this time.
458 # to be compatible with hg, still serve the client this time.
459 if self.hashstate.mtimehash:
459 if self.hashstate.mtimehash:
460 insts.append('reconnect')
460 insts.append('reconnect')
461 if newhash.confighash != self.hashstate.confighash:
461 if newhash.confighash != self.hashstate.confighash:
462 addr = _hashaddress(self.baseaddress, newhash.confighash)
462 addr = _hashaddress(self.baseaddress, newhash.confighash)
463 insts.append('redirect %s' % addr)
463 insts.append('redirect %s' % addr)
464 _log('validate: %s\n' % insts)
464 _log('validate: %s\n' % insts)
465 self.cresult.write('\0'.join(insts) or '\0')
465 self.cresult.write('\0'.join(insts) or '\0')
466
466
467 def chdir(self):
467 def chdir(self):
468 """Change current directory
468 """Change current directory
469
469
470 Note that the behavior of --cwd option is bit different from this.
470 Note that the behavior of --cwd option is bit different from this.
471 It does not affect --config parameter.
471 It does not affect --config parameter.
472 """
472 """
473 path = self._readstr()
473 path = self._readstr()
474 if not path:
474 if not path:
475 return
475 return
476 _log('chdir to %r\n' % path)
476 _log('chdir to %r\n' % path)
477 os.chdir(path)
477 os.chdir(path)
478
478
479 def setumask(self):
479 def setumask(self):
480 """Change umask"""
480 """Change umask"""
481 mask = struct.unpack('>I', self._read(4))[0]
481 mask = struct.unpack('>I', self._read(4))[0]
482 _log('setumask %r\n' % mask)
482 _log('setumask %r\n' % mask)
483 os.umask(mask)
483 os.umask(mask)
484
484
485 def getpager(self):
485 def getpager(self):
486 """Read cmdargs and write pager command to r-channel if enabled
486 """Read cmdargs and write pager command to r-channel if enabled
487
487
488 If pager isn't enabled, this writes '\0' because channeledoutput
488 If pager isn't enabled, this writes '\0' because channeledoutput
489 does not allow to write empty data.
489 does not allow to write empty data.
490 """
490 """
491 args = self._readlist()
491 args = self._readlist()
492 try:
492 try:
493 cmd, _func, args, options, _cmdoptions = dispatch._parse(self.ui,
493 cmd, _func, args, options, _cmdoptions = dispatch._parse(self.ui,
494 args)
494 args)
495 except (error.Abort, error.AmbiguousCommand, error.CommandError,
495 except (error.Abort, error.AmbiguousCommand, error.CommandError,
496 error.UnknownCommand):
496 error.UnknownCommand):
497 cmd = None
497 cmd = None
498 options = {}
498 options = {}
499 if not cmd or 'pager' not in options:
499 if not cmd or 'pager' not in options:
500 self.cresult.write('\0')
500 self.cresult.write('\0')
501 return
501 return
502
502
503 pagercmd = _setuppagercmd(self.ui, options, cmd)
503 pagercmd = _setuppagercmd(self.ui, options, cmd)
504 if pagercmd:
504 if pagercmd:
505 # Python's SIGPIPE is SIG_IGN by default. change to SIG_DFL so
505 # Python's SIGPIPE is SIG_IGN by default. change to SIG_DFL so
506 # we can exit if the pipe to the pager is closed
506 # we can exit if the pipe to the pager is closed
507 if util.safehasattr(signal, 'SIGPIPE') and \
507 if util.safehasattr(signal, 'SIGPIPE') and \
508 signal.getsignal(signal.SIGPIPE) == signal.SIG_IGN:
508 signal.getsignal(signal.SIGPIPE) == signal.SIG_IGN:
509 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
509 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
510 self.cresult.write(pagercmd)
510 self.cresult.write(pagercmd)
511 else:
511 else:
512 self.cresult.write('\0')
512 self.cresult.write('\0')
513
513
514 def setenv(self):
514 def setenv(self):
515 """Clear and update os.environ
515 """Clear and update os.environ
516
516
517 Note that not all variables can make an effect on the running process.
517 Note that not all variables can make an effect on the running process.
518 """
518 """
519 l = self._readlist()
519 l = self._readlist()
520 try:
520 try:
521 newenv = dict(s.split('=', 1) for s in l)
521 newenv = dict(s.split('=', 1) for s in l)
522 except ValueError:
522 except ValueError:
523 raise ValueError('unexpected value in setenv request')
523 raise ValueError('unexpected value in setenv request')
524 _log('setenv: %r\n' % sorted(newenv.keys()))
524 _log('setenv: %r\n' % sorted(newenv.keys()))
525 os.environ.clear()
525 os.environ.clear()
526 os.environ.update(newenv)
526 os.environ.update(newenv)
527
527
528 capabilities = commandserver.server.capabilities.copy()
528 capabilities = commandserver.server.capabilities.copy()
529 capabilities.update({'attachio': attachio,
529 capabilities.update({'attachio': attachio,
530 'chdir': chdir,
530 'chdir': chdir,
531 'getpager': getpager,
531 'getpager': getpager,
532 'setenv': setenv,
532 'setenv': setenv,
533 'setumask': setumask})
533 'setumask': setumask})
534
534
535 class _requesthandler(commandserver._requesthandler):
535 class _requesthandler(commandserver._requesthandler):
536 def _createcmdserver(self):
536 def _createcmdserver(self):
537 ui = self.server.ui
537 ui = self.server.ui
538 repo = self.server.repo
538 repo = self.server.repo
539 return chgcmdserver(ui, repo, self.rfile, self.wfile, self.connection,
539 return chgcmdserver(ui, repo, self.rfile, self.wfile, self.connection,
540 self.server.hashstate, self.server.baseaddress)
540 self.server.hashstate, self.server.baseaddress)
541
541
542 def _tempaddress(address):
542 def _tempaddress(address):
543 return '%s.%d.tmp' % (address, os.getpid())
543 return '%s.%d.tmp' % (address, os.getpid())
544
544
545 def _hashaddress(address, hashstr):
545 def _hashaddress(address, hashstr):
546 return '%s-%s' % (address, hashstr)
546 return '%s-%s' % (address, hashstr)
547
547
548 class AutoExitMixIn: # use old-style to comply with SocketServer design
548 class AutoExitMixIn: # use old-style to comply with SocketServer design
549 lastactive = time.time()
549 lastactive = time.time()
550 idletimeout = 3600 # default 1 hour
550 idletimeout = 3600 # default 1 hour
551
551
552 def startautoexitthread(self):
552 def startautoexitthread(self):
553 # note: the auto-exit check here is cheap enough to not use a thread,
553 # note: the auto-exit check here is cheap enough to not use a thread,
554 # be done in serve_forever. however SocketServer is hook-unfriendly,
554 # be done in serve_forever. however SocketServer is hook-unfriendly,
555 # you simply cannot hook serve_forever without copying a lot of code.
555 # you simply cannot hook serve_forever without copying a lot of code.
556 # besides, serve_forever's docstring suggests using thread.
556 # besides, serve_forever's docstring suggests using thread.
557 thread = threading.Thread(target=self._autoexitloop)
557 thread = threading.Thread(target=self._autoexitloop)
558 thread.daemon = True
558 thread.daemon = True
559 thread.start()
559 thread.start()
560
560
561 def _autoexitloop(self, interval=1):
561 def _autoexitloop(self, interval=1):
562 while True:
562 while True:
563 time.sleep(interval)
563 time.sleep(interval)
564 if not self.issocketowner():
564 if not self.issocketowner():
565 _log('%s is not owned, exiting.\n' % self.server_address)
565 _log('%s is not owned, exiting.\n' % self.server_address)
566 break
566 break
567 if time.time() - self.lastactive > self.idletimeout:
567 if time.time() - self.lastactive > self.idletimeout:
568 _log('being idle too long. exiting.\n')
568 _log('being idle too long. exiting.\n')
569 break
569 break
570 self.shutdown()
570 self.shutdown()
571
571
572 def process_request(self, request, address):
572 def process_request(self, request, address):
573 self.lastactive = time.time()
573 self.lastactive = time.time()
574 return socketserver.ForkingMixIn.process_request(
574 return socketserver.ForkingMixIn.process_request(
575 self, request, address)
575 self, request, address)
576
576
577 def server_bind(self):
577 def server_bind(self):
578 # use a unique temp address so we can stat the file and do ownership
578 # use a unique temp address so we can stat the file and do ownership
579 # check later
579 # check later
580 tempaddress = _tempaddress(self.server_address)
580 tempaddress = _tempaddress(self.server_address)
581 # use relative path instead of full path at bind() if possible, since
581 util.bindunixsocket(self.socket, tempaddress)
582 # AF_UNIX path has very small length limit (107 chars) on common
583 # platforms (see sys/un.h)
584 dirname, basename = os.path.split(tempaddress)
585 bakwdfd = None
586 if dirname:
587 bakwdfd = os.open('.', os.O_DIRECTORY)
588 os.chdir(dirname)
589 self.socket.bind(basename)
590 if bakwdfd:
591 os.fchdir(bakwdfd)
592 os.close(bakwdfd)
593 self._socketstat = os.stat(tempaddress)
582 self._socketstat = os.stat(tempaddress)
594 # rename will replace the old socket file if exists atomically. the
583 # rename will replace the old socket file if exists atomically. the
595 # old server will detect ownership change and exit.
584 # old server will detect ownership change and exit.
596 util.rename(tempaddress, self.server_address)
585 util.rename(tempaddress, self.server_address)
597
586
598 def issocketowner(self):
587 def issocketowner(self):
599 try:
588 try:
600 stat = os.stat(self.server_address)
589 stat = os.stat(self.server_address)
601 return (stat.st_ino == self._socketstat.st_ino and
590 return (stat.st_ino == self._socketstat.st_ino and
602 stat.st_mtime == self._socketstat.st_mtime)
591 stat.st_mtime == self._socketstat.st_mtime)
603 except OSError:
592 except OSError:
604 return False
593 return False
605
594
606 def unlinksocketfile(self):
595 def unlinksocketfile(self):
607 if not self.issocketowner():
596 if not self.issocketowner():
608 return
597 return
609 # it is possible to have a race condition here that we may
598 # it is possible to have a race condition here that we may
610 # remove another server's socket file. but that's okay
599 # remove another server's socket file. but that's okay
611 # since that server will detect and exit automatically and
600 # since that server will detect and exit automatically and
612 # the client will start a new server on demand.
601 # the client will start a new server on demand.
613 try:
602 try:
614 os.unlink(self.server_address)
603 os.unlink(self.server_address)
615 except OSError as exc:
604 except OSError as exc:
616 if exc.errno != errno.ENOENT:
605 if exc.errno != errno.ENOENT:
617 raise
606 raise
618
607
619 class chgunixservice(commandserver.unixservice):
608 class chgunixservice(commandserver.unixservice):
620 def init(self):
609 def init(self):
621 if self.repo:
610 if self.repo:
622 # one chgserver can serve multiple repos. drop repo infomation
611 # one chgserver can serve multiple repos. drop repo infomation
623 self.ui.setconfig('bundle', 'mainreporoot', '', 'repo')
612 self.ui.setconfig('bundle', 'mainreporoot', '', 'repo')
624 self.repo = None
613 self.repo = None
625 self._inithashstate()
614 self._inithashstate()
626 self._checkextensions()
615 self._checkextensions()
627 class cls(AutoExitMixIn, socketserver.ForkingMixIn,
616 class cls(AutoExitMixIn, socketserver.ForkingMixIn,
628 socketserver.UnixStreamServer):
617 socketserver.UnixStreamServer):
629 ui = self.ui
618 ui = self.ui
630 repo = self.repo
619 repo = self.repo
631 hashstate = self.hashstate
620 hashstate = self.hashstate
632 baseaddress = self.baseaddress
621 baseaddress = self.baseaddress
633 self.server = cls(self.address, _requesthandler)
622 self.server = cls(self.address, _requesthandler)
634 self.server.idletimeout = self.ui.configint(
623 self.server.idletimeout = self.ui.configint(
635 'chgserver', 'idletimeout', self.server.idletimeout)
624 'chgserver', 'idletimeout', self.server.idletimeout)
636 self.server.startautoexitthread()
625 self.server.startautoexitthread()
637 self._createsymlink()
626 self._createsymlink()
638
627
639 def _inithashstate(self):
628 def _inithashstate(self):
640 self.baseaddress = self.address
629 self.baseaddress = self.address
641 if self.ui.configbool('chgserver', 'skiphash', False):
630 if self.ui.configbool('chgserver', 'skiphash', False):
642 self.hashstate = None
631 self.hashstate = None
643 return
632 return
644 self.hashstate = hashstate.fromui(self.ui)
633 self.hashstate = hashstate.fromui(self.ui)
645 self.address = _hashaddress(self.address, self.hashstate.confighash)
634 self.address = _hashaddress(self.address, self.hashstate.confighash)
646
635
647 def _checkextensions(self):
636 def _checkextensions(self):
648 if not self.hashstate:
637 if not self.hashstate:
649 return
638 return
650 if extensions.notloaded():
639 if extensions.notloaded():
651 # one or more extensions failed to load. mtimehash becomes
640 # one or more extensions failed to load. mtimehash becomes
652 # meaningless because we do not know the paths of those extensions.
641 # meaningless because we do not know the paths of those extensions.
653 # set mtimehash to an illegal hash value to invalidate the server.
642 # set mtimehash to an illegal hash value to invalidate the server.
654 self.hashstate.mtimehash = ''
643 self.hashstate.mtimehash = ''
655
644
656 def _createsymlink(self):
645 def _createsymlink(self):
657 if self.baseaddress == self.address:
646 if self.baseaddress == self.address:
658 return
647 return
659 tempaddress = _tempaddress(self.baseaddress)
648 tempaddress = _tempaddress(self.baseaddress)
660 os.symlink(os.path.basename(self.address), tempaddress)
649 os.symlink(os.path.basename(self.address), tempaddress)
661 util.rename(tempaddress, self.baseaddress)
650 util.rename(tempaddress, self.baseaddress)
662
651
663 def run(self):
652 def run(self):
664 try:
653 try:
665 self.server.serve_forever()
654 self.server.serve_forever()
666 finally:
655 finally:
667 self.server.unlinksocketfile()
656 self.server.unlinksocketfile()
668
657
669 def uisetup(ui):
658 def uisetup(ui):
670 commandserver._servicemap['chgunix'] = chgunixservice
659 commandserver._servicemap['chgunix'] = chgunixservice
671
660
672 # CHGINTERNALMARK is temporarily set by chg client to detect if chg will
661 # CHGINTERNALMARK is temporarily set by chg client to detect if chg will
673 # start another chg. drop it to avoid possible side effects.
662 # start another chg. drop it to avoid possible side effects.
674 if 'CHGINTERNALMARK' in os.environ:
663 if 'CHGINTERNALMARK' in os.environ:
675 del os.environ['CHGINTERNALMARK']
664 del os.environ['CHGINTERNALMARK']
@@ -1,600 +1,615
1 # posix.py - Posix utility function implementations for Mercurial
1 # posix.py - Posix utility function implementations for Mercurial
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import fcntl
11 import fcntl
12 import getpass
12 import getpass
13 import grp
13 import grp
14 import os
14 import os
15 import pwd
15 import pwd
16 import re
16 import re
17 import select
17 import select
18 import stat
18 import stat
19 import sys
19 import sys
20 import tempfile
20 import tempfile
21 import unicodedata
21 import unicodedata
22
22
23 from .i18n import _
23 from .i18n import _
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 )
26 )
27
27
28 posixfile = open
28 posixfile = open
29 normpath = os.path.normpath
29 normpath = os.path.normpath
30 samestat = os.path.samestat
30 samestat = os.path.samestat
31 try:
31 try:
32 oslink = os.link
32 oslink = os.link
33 except AttributeError:
33 except AttributeError:
34 # Some platforms build Python without os.link on systems that are
34 # Some platforms build Python without os.link on systems that are
35 # vaguely unix-like but don't have hardlink support. For those
35 # vaguely unix-like but don't have hardlink support. For those
36 # poor souls, just say we tried and that it failed so we fall back
36 # poor souls, just say we tried and that it failed so we fall back
37 # to copies.
37 # to copies.
38 def oslink(src, dst):
38 def oslink(src, dst):
39 raise OSError(errno.EINVAL,
39 raise OSError(errno.EINVAL,
40 'hardlinks not supported: %s to %s' % (src, dst))
40 'hardlinks not supported: %s to %s' % (src, dst))
41 unlink = os.unlink
41 unlink = os.unlink
42 rename = os.rename
42 rename = os.rename
43 removedirs = os.removedirs
43 removedirs = os.removedirs
44 expandglobs = False
44 expandglobs = False
45
45
46 umask = os.umask(0)
46 umask = os.umask(0)
47 os.umask(umask)
47 os.umask(umask)
48
48
49 def split(p):
49 def split(p):
50 '''Same as posixpath.split, but faster
50 '''Same as posixpath.split, but faster
51
51
52 >>> import posixpath
52 >>> import posixpath
53 >>> for f in ['/absolute/path/to/file',
53 >>> for f in ['/absolute/path/to/file',
54 ... 'relative/path/to/file',
54 ... 'relative/path/to/file',
55 ... 'file_alone',
55 ... 'file_alone',
56 ... 'path/to/directory/',
56 ... 'path/to/directory/',
57 ... '/multiple/path//separators',
57 ... '/multiple/path//separators',
58 ... '/file_at_root',
58 ... '/file_at_root',
59 ... '///multiple_leading_separators_at_root',
59 ... '///multiple_leading_separators_at_root',
60 ... '']:
60 ... '']:
61 ... assert split(f) == posixpath.split(f), f
61 ... assert split(f) == posixpath.split(f), f
62 '''
62 '''
63 ht = p.rsplit('/', 1)
63 ht = p.rsplit('/', 1)
64 if len(ht) == 1:
64 if len(ht) == 1:
65 return '', p
65 return '', p
66 nh = ht[0].rstrip('/')
66 nh = ht[0].rstrip('/')
67 if nh:
67 if nh:
68 return nh, ht[1]
68 return nh, ht[1]
69 return ht[0] + '/', ht[1]
69 return ht[0] + '/', ht[1]
70
70
71 def openhardlinks():
71 def openhardlinks():
72 '''return true if it is safe to hold open file handles to hardlinks'''
72 '''return true if it is safe to hold open file handles to hardlinks'''
73 return True
73 return True
74
74
75 def nlinks(name):
75 def nlinks(name):
76 '''return number of hardlinks for the given file'''
76 '''return number of hardlinks for the given file'''
77 return os.lstat(name).st_nlink
77 return os.lstat(name).st_nlink
78
78
79 def parsepatchoutput(output_line):
79 def parsepatchoutput(output_line):
80 """parses the output produced by patch and returns the filename"""
80 """parses the output produced by patch and returns the filename"""
81 pf = output_line[14:]
81 pf = output_line[14:]
82 if os.sys.platform == 'OpenVMS':
82 if os.sys.platform == 'OpenVMS':
83 if pf[0] == '`':
83 if pf[0] == '`':
84 pf = pf[1:-1] # Remove the quotes
84 pf = pf[1:-1] # Remove the quotes
85 else:
85 else:
86 if pf.startswith("'") and pf.endswith("'") and " " in pf:
86 if pf.startswith("'") and pf.endswith("'") and " " in pf:
87 pf = pf[1:-1] # Remove the quotes
87 pf = pf[1:-1] # Remove the quotes
88 return pf
88 return pf
89
89
90 def sshargs(sshcmd, host, user, port):
90 def sshargs(sshcmd, host, user, port):
91 '''Build argument list for ssh'''
91 '''Build argument list for ssh'''
92 args = user and ("%s@%s" % (user, host)) or host
92 args = user and ("%s@%s" % (user, host)) or host
93 return port and ("%s -p %s" % (args, port)) or args
93 return port and ("%s -p %s" % (args, port)) or args
94
94
95 def isexec(f):
95 def isexec(f):
96 """check whether a file is executable"""
96 """check whether a file is executable"""
97 return (os.lstat(f).st_mode & 0o100 != 0)
97 return (os.lstat(f).st_mode & 0o100 != 0)
98
98
99 def setflags(f, l, x):
99 def setflags(f, l, x):
100 s = os.lstat(f).st_mode
100 s = os.lstat(f).st_mode
101 if l:
101 if l:
102 if not stat.S_ISLNK(s):
102 if not stat.S_ISLNK(s):
103 # switch file to link
103 # switch file to link
104 fp = open(f)
104 fp = open(f)
105 data = fp.read()
105 data = fp.read()
106 fp.close()
106 fp.close()
107 os.unlink(f)
107 os.unlink(f)
108 try:
108 try:
109 os.symlink(data, f)
109 os.symlink(data, f)
110 except OSError:
110 except OSError:
111 # failed to make a link, rewrite file
111 # failed to make a link, rewrite file
112 fp = open(f, "w")
112 fp = open(f, "w")
113 fp.write(data)
113 fp.write(data)
114 fp.close()
114 fp.close()
115 # no chmod needed at this point
115 # no chmod needed at this point
116 return
116 return
117 if stat.S_ISLNK(s):
117 if stat.S_ISLNK(s):
118 # switch link to file
118 # switch link to file
119 data = os.readlink(f)
119 data = os.readlink(f)
120 os.unlink(f)
120 os.unlink(f)
121 fp = open(f, "w")
121 fp = open(f, "w")
122 fp.write(data)
122 fp.write(data)
123 fp.close()
123 fp.close()
124 s = 0o666 & ~umask # avoid restatting for chmod
124 s = 0o666 & ~umask # avoid restatting for chmod
125
125
126 sx = s & 0o100
126 sx = s & 0o100
127 if x and not sx:
127 if x and not sx:
128 # Turn on +x for every +r bit when making a file executable
128 # Turn on +x for every +r bit when making a file executable
129 # and obey umask.
129 # and obey umask.
130 os.chmod(f, s | (s & 0o444) >> 2 & ~umask)
130 os.chmod(f, s | (s & 0o444) >> 2 & ~umask)
131 elif not x and sx:
131 elif not x and sx:
132 # Turn off all +x bits
132 # Turn off all +x bits
133 os.chmod(f, s & 0o666)
133 os.chmod(f, s & 0o666)
134
134
135 def copymode(src, dst, mode=None):
135 def copymode(src, dst, mode=None):
136 '''Copy the file mode from the file at path src to dst.
136 '''Copy the file mode from the file at path src to dst.
137 If src doesn't exist, we're using mode instead. If mode is None, we're
137 If src doesn't exist, we're using mode instead. If mode is None, we're
138 using umask.'''
138 using umask.'''
139 try:
139 try:
140 st_mode = os.lstat(src).st_mode & 0o777
140 st_mode = os.lstat(src).st_mode & 0o777
141 except OSError as inst:
141 except OSError as inst:
142 if inst.errno != errno.ENOENT:
142 if inst.errno != errno.ENOENT:
143 raise
143 raise
144 st_mode = mode
144 st_mode = mode
145 if st_mode is None:
145 if st_mode is None:
146 st_mode = ~umask
146 st_mode = ~umask
147 st_mode &= 0o666
147 st_mode &= 0o666
148 os.chmod(dst, st_mode)
148 os.chmod(dst, st_mode)
149
149
150 def checkexec(path):
150 def checkexec(path):
151 """
151 """
152 Check whether the given path is on a filesystem with UNIX-like exec flags
152 Check whether the given path is on a filesystem with UNIX-like exec flags
153
153
154 Requires a directory (like /foo/.hg)
154 Requires a directory (like /foo/.hg)
155 """
155 """
156
156
157 # VFAT on some Linux versions can flip mode but it doesn't persist
157 # VFAT on some Linux versions can flip mode but it doesn't persist
158 # a FS remount. Frequently we can detect it if files are created
158 # a FS remount. Frequently we can detect it if files are created
159 # with exec bit on.
159 # with exec bit on.
160
160
161 try:
161 try:
162 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
162 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
163 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
163 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
164 try:
164 try:
165 os.close(fh)
165 os.close(fh)
166 m = os.stat(fn).st_mode & 0o777
166 m = os.stat(fn).st_mode & 0o777
167 new_file_has_exec = m & EXECFLAGS
167 new_file_has_exec = m & EXECFLAGS
168 os.chmod(fn, m ^ EXECFLAGS)
168 os.chmod(fn, m ^ EXECFLAGS)
169 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0o777) == m)
169 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0o777) == m)
170 finally:
170 finally:
171 os.unlink(fn)
171 os.unlink(fn)
172 except (IOError, OSError):
172 except (IOError, OSError):
173 # we don't care, the user probably won't be able to commit anyway
173 # we don't care, the user probably won't be able to commit anyway
174 return False
174 return False
175 return not (new_file_has_exec or exec_flags_cannot_flip)
175 return not (new_file_has_exec or exec_flags_cannot_flip)
176
176
177 def checklink(path):
177 def checklink(path):
178 """check whether the given path is on a symlink-capable filesystem"""
178 """check whether the given path is on a symlink-capable filesystem"""
179 # mktemp is not racy because symlink creation will fail if the
179 # mktemp is not racy because symlink creation will fail if the
180 # file already exists
180 # file already exists
181 while True:
181 while True:
182 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
182 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
183 try:
183 try:
184 fd = tempfile.NamedTemporaryFile(dir=path, prefix='hg-checklink-')
184 fd = tempfile.NamedTemporaryFile(dir=path, prefix='hg-checklink-')
185 try:
185 try:
186 os.symlink(os.path.basename(fd.name), name)
186 os.symlink(os.path.basename(fd.name), name)
187 os.unlink(name)
187 os.unlink(name)
188 return True
188 return True
189 except OSError as inst:
189 except OSError as inst:
190 # link creation might race, try again
190 # link creation might race, try again
191 if inst[0] == errno.EEXIST:
191 if inst[0] == errno.EEXIST:
192 continue
192 continue
193 raise
193 raise
194 finally:
194 finally:
195 fd.close()
195 fd.close()
196 except AttributeError:
196 except AttributeError:
197 return False
197 return False
198 except OSError as inst:
198 except OSError as inst:
199 # sshfs might report failure while successfully creating the link
199 # sshfs might report failure while successfully creating the link
200 if inst[0] == errno.EIO and os.path.exists(name):
200 if inst[0] == errno.EIO and os.path.exists(name):
201 os.unlink(name)
201 os.unlink(name)
202 return False
202 return False
203
203
204 def checkosfilename(path):
204 def checkosfilename(path):
205 '''Check that the base-relative path is a valid filename on this platform.
205 '''Check that the base-relative path is a valid filename on this platform.
206 Returns None if the path is ok, or a UI string describing the problem.'''
206 Returns None if the path is ok, or a UI string describing the problem.'''
207 pass # on posix platforms, every path is ok
207 pass # on posix platforms, every path is ok
208
208
209 def setbinary(fd):
209 def setbinary(fd):
210 pass
210 pass
211
211
212 def pconvert(path):
212 def pconvert(path):
213 return path
213 return path
214
214
215 def localpath(path):
215 def localpath(path):
216 return path
216 return path
217
217
218 def samefile(fpath1, fpath2):
218 def samefile(fpath1, fpath2):
219 """Returns whether path1 and path2 refer to the same file. This is only
219 """Returns whether path1 and path2 refer to the same file. This is only
220 guaranteed to work for files, not directories."""
220 guaranteed to work for files, not directories."""
221 return os.path.samefile(fpath1, fpath2)
221 return os.path.samefile(fpath1, fpath2)
222
222
223 def samedevice(fpath1, fpath2):
223 def samedevice(fpath1, fpath2):
224 """Returns whether fpath1 and fpath2 are on the same device. This is only
224 """Returns whether fpath1 and fpath2 are on the same device. This is only
225 guaranteed to work for files, not directories."""
225 guaranteed to work for files, not directories."""
226 st1 = os.lstat(fpath1)
226 st1 = os.lstat(fpath1)
227 st2 = os.lstat(fpath2)
227 st2 = os.lstat(fpath2)
228 return st1.st_dev == st2.st_dev
228 return st1.st_dev == st2.st_dev
229
229
230 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
230 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
231 def normcase(path):
231 def normcase(path):
232 return path.lower()
232 return path.lower()
233
233
234 # what normcase does to ASCII strings
234 # what normcase does to ASCII strings
235 normcasespec = encoding.normcasespecs.lower
235 normcasespec = encoding.normcasespecs.lower
236 # fallback normcase function for non-ASCII strings
236 # fallback normcase function for non-ASCII strings
237 normcasefallback = normcase
237 normcasefallback = normcase
238
238
239 if sys.platform == 'darwin':
239 if sys.platform == 'darwin':
240
240
241 def normcase(path):
241 def normcase(path):
242 '''
242 '''
243 Normalize a filename for OS X-compatible comparison:
243 Normalize a filename for OS X-compatible comparison:
244 - escape-encode invalid characters
244 - escape-encode invalid characters
245 - decompose to NFD
245 - decompose to NFD
246 - lowercase
246 - lowercase
247 - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff]
247 - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff]
248
248
249 >>> normcase('UPPER')
249 >>> normcase('UPPER')
250 'upper'
250 'upper'
251 >>> normcase('Caf\xc3\xa9')
251 >>> normcase('Caf\xc3\xa9')
252 'cafe\\xcc\\x81'
252 'cafe\\xcc\\x81'
253 >>> normcase('\xc3\x89')
253 >>> normcase('\xc3\x89')
254 'e\\xcc\\x81'
254 'e\\xcc\\x81'
255 >>> normcase('\xb8\xca\xc3\xca\xbe\xc8.JPG') # issue3918
255 >>> normcase('\xb8\xca\xc3\xca\xbe\xc8.JPG') # issue3918
256 '%b8%ca%c3\\xca\\xbe%c8.jpg'
256 '%b8%ca%c3\\xca\\xbe%c8.jpg'
257 '''
257 '''
258
258
259 try:
259 try:
260 return encoding.asciilower(path) # exception for non-ASCII
260 return encoding.asciilower(path) # exception for non-ASCII
261 except UnicodeDecodeError:
261 except UnicodeDecodeError:
262 return normcasefallback(path)
262 return normcasefallback(path)
263
263
264 normcasespec = encoding.normcasespecs.lower
264 normcasespec = encoding.normcasespecs.lower
265
265
266 def normcasefallback(path):
266 def normcasefallback(path):
267 try:
267 try:
268 u = path.decode('utf-8')
268 u = path.decode('utf-8')
269 except UnicodeDecodeError:
269 except UnicodeDecodeError:
270 # OS X percent-encodes any bytes that aren't valid utf-8
270 # OS X percent-encodes any bytes that aren't valid utf-8
271 s = ''
271 s = ''
272 pos = 0
272 pos = 0
273 l = len(path)
273 l = len(path)
274 while pos < l:
274 while pos < l:
275 try:
275 try:
276 c = encoding.getutf8char(path, pos)
276 c = encoding.getutf8char(path, pos)
277 pos += len(c)
277 pos += len(c)
278 except ValueError:
278 except ValueError:
279 c = '%%%02X' % ord(path[pos])
279 c = '%%%02X' % ord(path[pos])
280 pos += 1
280 pos += 1
281 s += c
281 s += c
282
282
283 u = s.decode('utf-8')
283 u = s.decode('utf-8')
284
284
285 # Decompose then lowercase (HFS+ technote specifies lower)
285 # Decompose then lowercase (HFS+ technote specifies lower)
286 enc = unicodedata.normalize('NFD', u).lower().encode('utf-8')
286 enc = unicodedata.normalize('NFD', u).lower().encode('utf-8')
287 # drop HFS+ ignored characters
287 # drop HFS+ ignored characters
288 return encoding.hfsignoreclean(enc)
288 return encoding.hfsignoreclean(enc)
289
289
290 if sys.platform == 'cygwin':
290 if sys.platform == 'cygwin':
291 # workaround for cygwin, in which mount point part of path is
291 # workaround for cygwin, in which mount point part of path is
292 # treated as case sensitive, even though underlying NTFS is case
292 # treated as case sensitive, even though underlying NTFS is case
293 # insensitive.
293 # insensitive.
294
294
295 # default mount points
295 # default mount points
296 cygwinmountpoints = sorted([
296 cygwinmountpoints = sorted([
297 "/usr/bin",
297 "/usr/bin",
298 "/usr/lib",
298 "/usr/lib",
299 "/cygdrive",
299 "/cygdrive",
300 ], reverse=True)
300 ], reverse=True)
301
301
302 # use upper-ing as normcase as same as NTFS workaround
302 # use upper-ing as normcase as same as NTFS workaround
303 def normcase(path):
303 def normcase(path):
304 pathlen = len(path)
304 pathlen = len(path)
305 if (pathlen == 0) or (path[0] != os.sep):
305 if (pathlen == 0) or (path[0] != os.sep):
306 # treat as relative
306 # treat as relative
307 return encoding.upper(path)
307 return encoding.upper(path)
308
308
309 # to preserve case of mountpoint part
309 # to preserve case of mountpoint part
310 for mp in cygwinmountpoints:
310 for mp in cygwinmountpoints:
311 if not path.startswith(mp):
311 if not path.startswith(mp):
312 continue
312 continue
313
313
314 mplen = len(mp)
314 mplen = len(mp)
315 if mplen == pathlen: # mount point itself
315 if mplen == pathlen: # mount point itself
316 return mp
316 return mp
317 if path[mplen] == os.sep:
317 if path[mplen] == os.sep:
318 return mp + encoding.upper(path[mplen:])
318 return mp + encoding.upper(path[mplen:])
319
319
320 return encoding.upper(path)
320 return encoding.upper(path)
321
321
322 normcasespec = encoding.normcasespecs.other
322 normcasespec = encoding.normcasespecs.other
323 normcasefallback = normcase
323 normcasefallback = normcase
324
324
325 # Cygwin translates native ACLs to POSIX permissions,
325 # Cygwin translates native ACLs to POSIX permissions,
326 # but these translations are not supported by native
326 # but these translations are not supported by native
327 # tools, so the exec bit tends to be set erroneously.
327 # tools, so the exec bit tends to be set erroneously.
328 # Therefore, disable executable bit access on Cygwin.
328 # Therefore, disable executable bit access on Cygwin.
329 def checkexec(path):
329 def checkexec(path):
330 return False
330 return False
331
331
332 # Similarly, Cygwin's symlink emulation is likely to create
332 # Similarly, Cygwin's symlink emulation is likely to create
333 # problems when Mercurial is used from both Cygwin and native
333 # problems when Mercurial is used from both Cygwin and native
334 # Windows, with other native tools, or on shared volumes
334 # Windows, with other native tools, or on shared volumes
335 def checklink(path):
335 def checklink(path):
336 return False
336 return False
337
337
338 _needsshellquote = None
338 _needsshellquote = None
339 def shellquote(s):
339 def shellquote(s):
340 if os.sys.platform == 'OpenVMS':
340 if os.sys.platform == 'OpenVMS':
341 return '"%s"' % s
341 return '"%s"' % s
342 global _needsshellquote
342 global _needsshellquote
343 if _needsshellquote is None:
343 if _needsshellquote is None:
344 _needsshellquote = re.compile(r'[^a-zA-Z0-9._/+-]').search
344 _needsshellquote = re.compile(r'[^a-zA-Z0-9._/+-]').search
345 if s and not _needsshellquote(s):
345 if s and not _needsshellquote(s):
346 # "s" shouldn't have to be quoted
346 # "s" shouldn't have to be quoted
347 return s
347 return s
348 else:
348 else:
349 return "'%s'" % s.replace("'", "'\\''")
349 return "'%s'" % s.replace("'", "'\\''")
350
350
351 def quotecommand(cmd):
351 def quotecommand(cmd):
352 return cmd
352 return cmd
353
353
354 def popen(command, mode='r'):
354 def popen(command, mode='r'):
355 return os.popen(command, mode)
355 return os.popen(command, mode)
356
356
357 def testpid(pid):
357 def testpid(pid):
358 '''return False if pid dead, True if running or not sure'''
358 '''return False if pid dead, True if running or not sure'''
359 if os.sys.platform == 'OpenVMS':
359 if os.sys.platform == 'OpenVMS':
360 return True
360 return True
361 try:
361 try:
362 os.kill(pid, 0)
362 os.kill(pid, 0)
363 return True
363 return True
364 except OSError as inst:
364 except OSError as inst:
365 return inst.errno != errno.ESRCH
365 return inst.errno != errno.ESRCH
366
366
367 def explainexit(code):
367 def explainexit(code):
368 """return a 2-tuple (desc, code) describing a subprocess status
368 """return a 2-tuple (desc, code) describing a subprocess status
369 (codes from kill are negative - not os.system/wait encoding)"""
369 (codes from kill are negative - not os.system/wait encoding)"""
370 if code >= 0:
370 if code >= 0:
371 return _("exited with status %d") % code, code
371 return _("exited with status %d") % code, code
372 return _("killed by signal %d") % -code, -code
372 return _("killed by signal %d") % -code, -code
373
373
374 def isowner(st):
374 def isowner(st):
375 """Return True if the stat object st is from the current user."""
375 """Return True if the stat object st is from the current user."""
376 return st.st_uid == os.getuid()
376 return st.st_uid == os.getuid()
377
377
378 def findexe(command):
378 def findexe(command):
379 '''Find executable for command searching like which does.
379 '''Find executable for command searching like which does.
380 If command is a basename then PATH is searched for command.
380 If command is a basename then PATH is searched for command.
381 PATH isn't searched if command is an absolute or relative path.
381 PATH isn't searched if command is an absolute or relative path.
382 If command isn't found None is returned.'''
382 If command isn't found None is returned.'''
383 if sys.platform == 'OpenVMS':
383 if sys.platform == 'OpenVMS':
384 return command
384 return command
385
385
386 def findexisting(executable):
386 def findexisting(executable):
387 'Will return executable if existing file'
387 'Will return executable if existing file'
388 if os.path.isfile(executable) and os.access(executable, os.X_OK):
388 if os.path.isfile(executable) and os.access(executable, os.X_OK):
389 return executable
389 return executable
390 return None
390 return None
391
391
392 if os.sep in command:
392 if os.sep in command:
393 return findexisting(command)
393 return findexisting(command)
394
394
395 if sys.platform == 'plan9':
395 if sys.platform == 'plan9':
396 return findexisting(os.path.join('/bin', command))
396 return findexisting(os.path.join('/bin', command))
397
397
398 for path in os.environ.get('PATH', '').split(os.pathsep):
398 for path in os.environ.get('PATH', '').split(os.pathsep):
399 executable = findexisting(os.path.join(path, command))
399 executable = findexisting(os.path.join(path, command))
400 if executable is not None:
400 if executable is not None:
401 return executable
401 return executable
402 return None
402 return None
403
403
404 def setsignalhandler():
404 def setsignalhandler():
405 pass
405 pass
406
406
407 _wantedkinds = set([stat.S_IFREG, stat.S_IFLNK])
407 _wantedkinds = set([stat.S_IFREG, stat.S_IFLNK])
408
408
409 def statfiles(files):
409 def statfiles(files):
410 '''Stat each file in files. Yield each stat, or None if a file does not
410 '''Stat each file in files. Yield each stat, or None if a file does not
411 exist or has a type we don't care about.'''
411 exist or has a type we don't care about.'''
412 lstat = os.lstat
412 lstat = os.lstat
413 getkind = stat.S_IFMT
413 getkind = stat.S_IFMT
414 for nf in files:
414 for nf in files:
415 try:
415 try:
416 st = lstat(nf)
416 st = lstat(nf)
417 if getkind(st.st_mode) not in _wantedkinds:
417 if getkind(st.st_mode) not in _wantedkinds:
418 st = None
418 st = None
419 except OSError as err:
419 except OSError as err:
420 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
420 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
421 raise
421 raise
422 st = None
422 st = None
423 yield st
423 yield st
424
424
425 def getuser():
425 def getuser():
426 '''return name of current user'''
426 '''return name of current user'''
427 return getpass.getuser()
427 return getpass.getuser()
428
428
429 def username(uid=None):
429 def username(uid=None):
430 """Return the name of the user with the given uid.
430 """Return the name of the user with the given uid.
431
431
432 If uid is None, return the name of the current user."""
432 If uid is None, return the name of the current user."""
433
433
434 if uid is None:
434 if uid is None:
435 uid = os.getuid()
435 uid = os.getuid()
436 try:
436 try:
437 return pwd.getpwuid(uid)[0]
437 return pwd.getpwuid(uid)[0]
438 except KeyError:
438 except KeyError:
439 return str(uid)
439 return str(uid)
440
440
441 def groupname(gid=None):
441 def groupname(gid=None):
442 """Return the name of the group with the given gid.
442 """Return the name of the group with the given gid.
443
443
444 If gid is None, return the name of the current group."""
444 If gid is None, return the name of the current group."""
445
445
446 if gid is None:
446 if gid is None:
447 gid = os.getgid()
447 gid = os.getgid()
448 try:
448 try:
449 return grp.getgrgid(gid)[0]
449 return grp.getgrgid(gid)[0]
450 except KeyError:
450 except KeyError:
451 return str(gid)
451 return str(gid)
452
452
453 def groupmembers(name):
453 def groupmembers(name):
454 """Return the list of members of the group with the given
454 """Return the list of members of the group with the given
455 name, KeyError if the group does not exist.
455 name, KeyError if the group does not exist.
456 """
456 """
457 return list(grp.getgrnam(name).gr_mem)
457 return list(grp.getgrnam(name).gr_mem)
458
458
459 def spawndetached(args):
459 def spawndetached(args):
460 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
460 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
461 args[0], args)
461 args[0], args)
462
462
463 def gethgcmd():
463 def gethgcmd():
464 return sys.argv[:1]
464 return sys.argv[:1]
465
465
466 def termwidth():
466 def termwidth():
467 try:
467 try:
468 import array
468 import array
469 import termios
469 import termios
470 for dev in (sys.stderr, sys.stdout, sys.stdin):
470 for dev in (sys.stderr, sys.stdout, sys.stdin):
471 try:
471 try:
472 try:
472 try:
473 fd = dev.fileno()
473 fd = dev.fileno()
474 except AttributeError:
474 except AttributeError:
475 continue
475 continue
476 if not os.isatty(fd):
476 if not os.isatty(fd):
477 continue
477 continue
478 try:
478 try:
479 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
479 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
480 width = array.array('h', arri)[1]
480 width = array.array('h', arri)[1]
481 if width > 0:
481 if width > 0:
482 return width
482 return width
483 except AttributeError:
483 except AttributeError:
484 pass
484 pass
485 except ValueError:
485 except ValueError:
486 pass
486 pass
487 except IOError as e:
487 except IOError as e:
488 if e[0] == errno.EINVAL:
488 if e[0] == errno.EINVAL:
489 pass
489 pass
490 else:
490 else:
491 raise
491 raise
492 except ImportError:
492 except ImportError:
493 pass
493 pass
494 return 80
494 return 80
495
495
496 def makedir(path, notindexed):
496 def makedir(path, notindexed):
497 os.mkdir(path)
497 os.mkdir(path)
498
498
499 def unlinkpath(f, ignoremissing=False):
499 def unlinkpath(f, ignoremissing=False):
500 """unlink and remove the directory if it is empty"""
500 """unlink and remove the directory if it is empty"""
501 try:
501 try:
502 os.unlink(f)
502 os.unlink(f)
503 except OSError as e:
503 except OSError as e:
504 if not (ignoremissing and e.errno == errno.ENOENT):
504 if not (ignoremissing and e.errno == errno.ENOENT):
505 raise
505 raise
506 # try removing directories that might now be empty
506 # try removing directories that might now be empty
507 try:
507 try:
508 os.removedirs(os.path.dirname(f))
508 os.removedirs(os.path.dirname(f))
509 except OSError:
509 except OSError:
510 pass
510 pass
511
511
512 def lookupreg(key, name=None, scope=None):
512 def lookupreg(key, name=None, scope=None):
513 return None
513 return None
514
514
515 def hidewindow():
515 def hidewindow():
516 """Hide current shell window.
516 """Hide current shell window.
517
517
518 Used to hide the window opened when starting asynchronous
518 Used to hide the window opened when starting asynchronous
519 child process under Windows, unneeded on other systems.
519 child process under Windows, unneeded on other systems.
520 """
520 """
521 pass
521 pass
522
522
523 class cachestat(object):
523 class cachestat(object):
524 def __init__(self, path):
524 def __init__(self, path):
525 self.stat = os.stat(path)
525 self.stat = os.stat(path)
526
526
527 def cacheable(self):
527 def cacheable(self):
528 return bool(self.stat.st_ino)
528 return bool(self.stat.st_ino)
529
529
530 __hash__ = object.__hash__
530 __hash__ = object.__hash__
531
531
532 def __eq__(self, other):
532 def __eq__(self, other):
533 try:
533 try:
534 # Only dev, ino, size, mtime and atime are likely to change. Out
534 # Only dev, ino, size, mtime and atime are likely to change. Out
535 # of these, we shouldn't compare atime but should compare the
535 # of these, we shouldn't compare atime but should compare the
536 # rest. However, one of the other fields changing indicates
536 # rest. However, one of the other fields changing indicates
537 # something fishy going on, so return False if anything but atime
537 # something fishy going on, so return False if anything but atime
538 # changes.
538 # changes.
539 return (self.stat.st_mode == other.stat.st_mode and
539 return (self.stat.st_mode == other.stat.st_mode and
540 self.stat.st_ino == other.stat.st_ino and
540 self.stat.st_ino == other.stat.st_ino and
541 self.stat.st_dev == other.stat.st_dev and
541 self.stat.st_dev == other.stat.st_dev and
542 self.stat.st_nlink == other.stat.st_nlink and
542 self.stat.st_nlink == other.stat.st_nlink and
543 self.stat.st_uid == other.stat.st_uid and
543 self.stat.st_uid == other.stat.st_uid and
544 self.stat.st_gid == other.stat.st_gid and
544 self.stat.st_gid == other.stat.st_gid and
545 self.stat.st_size == other.stat.st_size and
545 self.stat.st_size == other.stat.st_size and
546 self.stat.st_mtime == other.stat.st_mtime and
546 self.stat.st_mtime == other.stat.st_mtime and
547 self.stat.st_ctime == other.stat.st_ctime)
547 self.stat.st_ctime == other.stat.st_ctime)
548 except AttributeError:
548 except AttributeError:
549 return False
549 return False
550
550
551 def __ne__(self, other):
551 def __ne__(self, other):
552 return not self == other
552 return not self == other
553
553
554 def executablepath():
554 def executablepath():
555 return None # available on Windows only
555 return None # available on Windows only
556
556
557 def statislink(st):
557 def statislink(st):
558 '''check whether a stat result is a symlink'''
558 '''check whether a stat result is a symlink'''
559 return st and stat.S_ISLNK(st.st_mode)
559 return st and stat.S_ISLNK(st.st_mode)
560
560
561 def statisexec(st):
561 def statisexec(st):
562 '''check whether a stat result is an executable file'''
562 '''check whether a stat result is an executable file'''
563 return st and (st.st_mode & 0o100 != 0)
563 return st and (st.st_mode & 0o100 != 0)
564
564
565 def poll(fds):
565 def poll(fds):
566 """block until something happens on any file descriptor
566 """block until something happens on any file descriptor
567
567
568 This is a generic helper that will check for any activity
568 This is a generic helper that will check for any activity
569 (read, write. exception) and return the list of touched files.
569 (read, write. exception) and return the list of touched files.
570
570
571 In unsupported cases, it will raise a NotImplementedError"""
571 In unsupported cases, it will raise a NotImplementedError"""
572 try:
572 try:
573 res = select.select(fds, fds, fds)
573 res = select.select(fds, fds, fds)
574 except ValueError: # out of range file descriptor
574 except ValueError: # out of range file descriptor
575 raise NotImplementedError()
575 raise NotImplementedError()
576 return sorted(list(set(sum(res, []))))
576 return sorted(list(set(sum(res, []))))
577
577
578 def readpipe(pipe):
578 def readpipe(pipe):
579 """Read all available data from a pipe."""
579 """Read all available data from a pipe."""
580 # We can't fstat() a pipe because Linux will always report 0.
580 # We can't fstat() a pipe because Linux will always report 0.
581 # So, we set the pipe to non-blocking mode and read everything
581 # So, we set the pipe to non-blocking mode and read everything
582 # that's available.
582 # that's available.
583 flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
583 flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
584 flags |= os.O_NONBLOCK
584 flags |= os.O_NONBLOCK
585 oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags)
585 oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags)
586
586
587 try:
587 try:
588 chunks = []
588 chunks = []
589 while True:
589 while True:
590 try:
590 try:
591 s = pipe.read()
591 s = pipe.read()
592 if not s:
592 if not s:
593 break
593 break
594 chunks.append(s)
594 chunks.append(s)
595 except IOError:
595 except IOError:
596 break
596 break
597
597
598 return ''.join(chunks)
598 return ''.join(chunks)
599 finally:
599 finally:
600 fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
600 fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
601
602 def bindunixsocket(sock, path):
603 """Bind the UNIX domain socket to the specified path"""
604 # use relative path instead of full path at bind() if possible, since
605 # AF_UNIX path has very small length limit (107 chars) on common
606 # platforms (see sys/un.h)
607 dirname, basename = os.path.split(path)
608 bakwdfd = None
609 if dirname:
610 bakwdfd = os.open('.', os.O_DIRECTORY)
611 os.chdir(dirname)
612 sock.bind(basename)
613 if bakwdfd:
614 os.fchdir(bakwdfd)
615 os.close(bakwdfd)
@@ -1,2857 +1,2858
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import zlib
37 import zlib
38
38
39 from . import (
39 from . import (
40 encoding,
40 encoding,
41 error,
41 error,
42 i18n,
42 i18n,
43 osutil,
43 osutil,
44 parsers,
44 parsers,
45 pycompat,
45 pycompat,
46 )
46 )
47
47
48 for attr in (
48 for attr in (
49 'empty',
49 'empty',
50 'httplib',
50 'httplib',
51 'pickle',
51 'pickle',
52 'queue',
52 'queue',
53 'urlerr',
53 'urlerr',
54 'urlparse',
54 'urlparse',
55 # we do import urlreq, but we do it outside the loop
55 # we do import urlreq, but we do it outside the loop
56 #'urlreq',
56 #'urlreq',
57 'stringio',
57 'stringio',
58 'socketserver',
58 'socketserver',
59 'xmlrpclib',
59 'xmlrpclib',
60 ):
60 ):
61 globals()[attr] = getattr(pycompat, attr)
61 globals()[attr] = getattr(pycompat, attr)
62
62
63 # This line is to make pyflakes happy:
63 # This line is to make pyflakes happy:
64 urlreq = pycompat.urlreq
64 urlreq = pycompat.urlreq
65
65
66 if os.name == 'nt':
66 if os.name == 'nt':
67 from . import windows as platform
67 from . import windows as platform
68 else:
68 else:
69 from . import posix as platform
69 from . import posix as platform
70
70
71 _ = i18n._
71 _ = i18n._
72
72
73 bindunixsocket = platform.bindunixsocket
73 cachestat = platform.cachestat
74 cachestat = platform.cachestat
74 checkexec = platform.checkexec
75 checkexec = platform.checkexec
75 checklink = platform.checklink
76 checklink = platform.checklink
76 copymode = platform.copymode
77 copymode = platform.copymode
77 executablepath = platform.executablepath
78 executablepath = platform.executablepath
78 expandglobs = platform.expandglobs
79 expandglobs = platform.expandglobs
79 explainexit = platform.explainexit
80 explainexit = platform.explainexit
80 findexe = platform.findexe
81 findexe = platform.findexe
81 gethgcmd = platform.gethgcmd
82 gethgcmd = platform.gethgcmd
82 getuser = platform.getuser
83 getuser = platform.getuser
83 getpid = os.getpid
84 getpid = os.getpid
84 groupmembers = platform.groupmembers
85 groupmembers = platform.groupmembers
85 groupname = platform.groupname
86 groupname = platform.groupname
86 hidewindow = platform.hidewindow
87 hidewindow = platform.hidewindow
87 isexec = platform.isexec
88 isexec = platform.isexec
88 isowner = platform.isowner
89 isowner = platform.isowner
89 localpath = platform.localpath
90 localpath = platform.localpath
90 lookupreg = platform.lookupreg
91 lookupreg = platform.lookupreg
91 makedir = platform.makedir
92 makedir = platform.makedir
92 nlinks = platform.nlinks
93 nlinks = platform.nlinks
93 normpath = platform.normpath
94 normpath = platform.normpath
94 normcase = platform.normcase
95 normcase = platform.normcase
95 normcasespec = platform.normcasespec
96 normcasespec = platform.normcasespec
96 normcasefallback = platform.normcasefallback
97 normcasefallback = platform.normcasefallback
97 openhardlinks = platform.openhardlinks
98 openhardlinks = platform.openhardlinks
98 oslink = platform.oslink
99 oslink = platform.oslink
99 parsepatchoutput = platform.parsepatchoutput
100 parsepatchoutput = platform.parsepatchoutput
100 pconvert = platform.pconvert
101 pconvert = platform.pconvert
101 poll = platform.poll
102 poll = platform.poll
102 popen = platform.popen
103 popen = platform.popen
103 posixfile = platform.posixfile
104 posixfile = platform.posixfile
104 quotecommand = platform.quotecommand
105 quotecommand = platform.quotecommand
105 readpipe = platform.readpipe
106 readpipe = platform.readpipe
106 rename = platform.rename
107 rename = platform.rename
107 removedirs = platform.removedirs
108 removedirs = platform.removedirs
108 samedevice = platform.samedevice
109 samedevice = platform.samedevice
109 samefile = platform.samefile
110 samefile = platform.samefile
110 samestat = platform.samestat
111 samestat = platform.samestat
111 setbinary = platform.setbinary
112 setbinary = platform.setbinary
112 setflags = platform.setflags
113 setflags = platform.setflags
113 setsignalhandler = platform.setsignalhandler
114 setsignalhandler = platform.setsignalhandler
114 shellquote = platform.shellquote
115 shellquote = platform.shellquote
115 spawndetached = platform.spawndetached
116 spawndetached = platform.spawndetached
116 split = platform.split
117 split = platform.split
117 sshargs = platform.sshargs
118 sshargs = platform.sshargs
118 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
119 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
119 statisexec = platform.statisexec
120 statisexec = platform.statisexec
120 statislink = platform.statislink
121 statislink = platform.statislink
121 termwidth = platform.termwidth
122 termwidth = platform.termwidth
122 testpid = platform.testpid
123 testpid = platform.testpid
123 umask = platform.umask
124 umask = platform.umask
124 unlink = platform.unlink
125 unlink = platform.unlink
125 unlinkpath = platform.unlinkpath
126 unlinkpath = platform.unlinkpath
126 username = platform.username
127 username = platform.username
127
128
128 # Python compatibility
129 # Python compatibility
129
130
130 _notset = object()
131 _notset = object()
131
132
132 # disable Python's problematic floating point timestamps (issue4836)
133 # disable Python's problematic floating point timestamps (issue4836)
133 # (Python hypocritically says you shouldn't change this behavior in
134 # (Python hypocritically says you shouldn't change this behavior in
134 # libraries, and sure enough Mercurial is not a library.)
135 # libraries, and sure enough Mercurial is not a library.)
135 os.stat_float_times(False)
136 os.stat_float_times(False)
136
137
137 def safehasattr(thing, attr):
138 def safehasattr(thing, attr):
138 return getattr(thing, attr, _notset) is not _notset
139 return getattr(thing, attr, _notset) is not _notset
139
140
140 DIGESTS = {
141 DIGESTS = {
141 'md5': hashlib.md5,
142 'md5': hashlib.md5,
142 'sha1': hashlib.sha1,
143 'sha1': hashlib.sha1,
143 'sha512': hashlib.sha512,
144 'sha512': hashlib.sha512,
144 }
145 }
145 # List of digest types from strongest to weakest
146 # List of digest types from strongest to weakest
146 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
147 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
147
148
148 for k in DIGESTS_BY_STRENGTH:
149 for k in DIGESTS_BY_STRENGTH:
149 assert k in DIGESTS
150 assert k in DIGESTS
150
151
151 class digester(object):
152 class digester(object):
152 """helper to compute digests.
153 """helper to compute digests.
153
154
154 This helper can be used to compute one or more digests given their name.
155 This helper can be used to compute one or more digests given their name.
155
156
156 >>> d = digester(['md5', 'sha1'])
157 >>> d = digester(['md5', 'sha1'])
157 >>> d.update('foo')
158 >>> d.update('foo')
158 >>> [k for k in sorted(d)]
159 >>> [k for k in sorted(d)]
159 ['md5', 'sha1']
160 ['md5', 'sha1']
160 >>> d['md5']
161 >>> d['md5']
161 'acbd18db4cc2f85cedef654fccc4a4d8'
162 'acbd18db4cc2f85cedef654fccc4a4d8'
162 >>> d['sha1']
163 >>> d['sha1']
163 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
164 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
164 >>> digester.preferred(['md5', 'sha1'])
165 >>> digester.preferred(['md5', 'sha1'])
165 'sha1'
166 'sha1'
166 """
167 """
167
168
168 def __init__(self, digests, s=''):
169 def __init__(self, digests, s=''):
169 self._hashes = {}
170 self._hashes = {}
170 for k in digests:
171 for k in digests:
171 if k not in DIGESTS:
172 if k not in DIGESTS:
172 raise Abort(_('unknown digest type: %s') % k)
173 raise Abort(_('unknown digest type: %s') % k)
173 self._hashes[k] = DIGESTS[k]()
174 self._hashes[k] = DIGESTS[k]()
174 if s:
175 if s:
175 self.update(s)
176 self.update(s)
176
177
177 def update(self, data):
178 def update(self, data):
178 for h in self._hashes.values():
179 for h in self._hashes.values():
179 h.update(data)
180 h.update(data)
180
181
181 def __getitem__(self, key):
182 def __getitem__(self, key):
182 if key not in DIGESTS:
183 if key not in DIGESTS:
183 raise Abort(_('unknown digest type: %s') % k)
184 raise Abort(_('unknown digest type: %s') % k)
184 return self._hashes[key].hexdigest()
185 return self._hashes[key].hexdigest()
185
186
186 def __iter__(self):
187 def __iter__(self):
187 return iter(self._hashes)
188 return iter(self._hashes)
188
189
189 @staticmethod
190 @staticmethod
190 def preferred(supported):
191 def preferred(supported):
191 """returns the strongest digest type in both supported and DIGESTS."""
192 """returns the strongest digest type in both supported and DIGESTS."""
192
193
193 for k in DIGESTS_BY_STRENGTH:
194 for k in DIGESTS_BY_STRENGTH:
194 if k in supported:
195 if k in supported:
195 return k
196 return k
196 return None
197 return None
197
198
198 class digestchecker(object):
199 class digestchecker(object):
199 """file handle wrapper that additionally checks content against a given
200 """file handle wrapper that additionally checks content against a given
200 size and digests.
201 size and digests.
201
202
202 d = digestchecker(fh, size, {'md5': '...'})
203 d = digestchecker(fh, size, {'md5': '...'})
203
204
204 When multiple digests are given, all of them are validated.
205 When multiple digests are given, all of them are validated.
205 """
206 """
206
207
207 def __init__(self, fh, size, digests):
208 def __init__(self, fh, size, digests):
208 self._fh = fh
209 self._fh = fh
209 self._size = size
210 self._size = size
210 self._got = 0
211 self._got = 0
211 self._digests = dict(digests)
212 self._digests = dict(digests)
212 self._digester = digester(self._digests.keys())
213 self._digester = digester(self._digests.keys())
213
214
214 def read(self, length=-1):
215 def read(self, length=-1):
215 content = self._fh.read(length)
216 content = self._fh.read(length)
216 self._digester.update(content)
217 self._digester.update(content)
217 self._got += len(content)
218 self._got += len(content)
218 return content
219 return content
219
220
220 def validate(self):
221 def validate(self):
221 if self._size != self._got:
222 if self._size != self._got:
222 raise Abort(_('size mismatch: expected %d, got %d') %
223 raise Abort(_('size mismatch: expected %d, got %d') %
223 (self._size, self._got))
224 (self._size, self._got))
224 for k, v in self._digests.items():
225 for k, v in self._digests.items():
225 if v != self._digester[k]:
226 if v != self._digester[k]:
226 # i18n: first parameter is a digest name
227 # i18n: first parameter is a digest name
227 raise Abort(_('%s mismatch: expected %s, got %s') %
228 raise Abort(_('%s mismatch: expected %s, got %s') %
228 (k, v, self._digester[k]))
229 (k, v, self._digester[k]))
229
230
230 try:
231 try:
231 buffer = buffer
232 buffer = buffer
232 except NameError:
233 except NameError:
233 if sys.version_info[0] < 3:
234 if sys.version_info[0] < 3:
234 def buffer(sliceable, offset=0):
235 def buffer(sliceable, offset=0):
235 return sliceable[offset:]
236 return sliceable[offset:]
236 else:
237 else:
237 def buffer(sliceable, offset=0):
238 def buffer(sliceable, offset=0):
238 return memoryview(sliceable)[offset:]
239 return memoryview(sliceable)[offset:]
239
240
240 closefds = os.name == 'posix'
241 closefds = os.name == 'posix'
241
242
242 _chunksize = 4096
243 _chunksize = 4096
243
244
244 class bufferedinputpipe(object):
245 class bufferedinputpipe(object):
245 """a manually buffered input pipe
246 """a manually buffered input pipe
246
247
247 Python will not let us use buffered IO and lazy reading with 'polling' at
248 Python will not let us use buffered IO and lazy reading with 'polling' at
248 the same time. We cannot probe the buffer state and select will not detect
249 the same time. We cannot probe the buffer state and select will not detect
249 that data are ready to read if they are already buffered.
250 that data are ready to read if they are already buffered.
250
251
251 This class let us work around that by implementing its own buffering
252 This class let us work around that by implementing its own buffering
252 (allowing efficient readline) while offering a way to know if the buffer is
253 (allowing efficient readline) while offering a way to know if the buffer is
253 empty from the output (allowing collaboration of the buffer with polling).
254 empty from the output (allowing collaboration of the buffer with polling).
254
255
255 This class lives in the 'util' module because it makes use of the 'os'
256 This class lives in the 'util' module because it makes use of the 'os'
256 module from the python stdlib.
257 module from the python stdlib.
257 """
258 """
258
259
259 def __init__(self, input):
260 def __init__(self, input):
260 self._input = input
261 self._input = input
261 self._buffer = []
262 self._buffer = []
262 self._eof = False
263 self._eof = False
263 self._lenbuf = 0
264 self._lenbuf = 0
264
265
265 @property
266 @property
266 def hasbuffer(self):
267 def hasbuffer(self):
267 """True is any data is currently buffered
268 """True is any data is currently buffered
268
269
269 This will be used externally a pre-step for polling IO. If there is
270 This will be used externally a pre-step for polling IO. If there is
270 already data then no polling should be set in place."""
271 already data then no polling should be set in place."""
271 return bool(self._buffer)
272 return bool(self._buffer)
272
273
273 @property
274 @property
274 def closed(self):
275 def closed(self):
275 return self._input.closed
276 return self._input.closed
276
277
277 def fileno(self):
278 def fileno(self):
278 return self._input.fileno()
279 return self._input.fileno()
279
280
280 def close(self):
281 def close(self):
281 return self._input.close()
282 return self._input.close()
282
283
283 def read(self, size):
284 def read(self, size):
284 while (not self._eof) and (self._lenbuf < size):
285 while (not self._eof) and (self._lenbuf < size):
285 self._fillbuffer()
286 self._fillbuffer()
286 return self._frombuffer(size)
287 return self._frombuffer(size)
287
288
288 def readline(self, *args, **kwargs):
289 def readline(self, *args, **kwargs):
289 if 1 < len(self._buffer):
290 if 1 < len(self._buffer):
290 # this should not happen because both read and readline end with a
291 # this should not happen because both read and readline end with a
291 # _frombuffer call that collapse it.
292 # _frombuffer call that collapse it.
292 self._buffer = [''.join(self._buffer)]
293 self._buffer = [''.join(self._buffer)]
293 self._lenbuf = len(self._buffer[0])
294 self._lenbuf = len(self._buffer[0])
294 lfi = -1
295 lfi = -1
295 if self._buffer:
296 if self._buffer:
296 lfi = self._buffer[-1].find('\n')
297 lfi = self._buffer[-1].find('\n')
297 while (not self._eof) and lfi < 0:
298 while (not self._eof) and lfi < 0:
298 self._fillbuffer()
299 self._fillbuffer()
299 if self._buffer:
300 if self._buffer:
300 lfi = self._buffer[-1].find('\n')
301 lfi = self._buffer[-1].find('\n')
301 size = lfi + 1
302 size = lfi + 1
302 if lfi < 0: # end of file
303 if lfi < 0: # end of file
303 size = self._lenbuf
304 size = self._lenbuf
304 elif 1 < len(self._buffer):
305 elif 1 < len(self._buffer):
305 # we need to take previous chunks into account
306 # we need to take previous chunks into account
306 size += self._lenbuf - len(self._buffer[-1])
307 size += self._lenbuf - len(self._buffer[-1])
307 return self._frombuffer(size)
308 return self._frombuffer(size)
308
309
309 def _frombuffer(self, size):
310 def _frombuffer(self, size):
310 """return at most 'size' data from the buffer
311 """return at most 'size' data from the buffer
311
312
312 The data are removed from the buffer."""
313 The data are removed from the buffer."""
313 if size == 0 or not self._buffer:
314 if size == 0 or not self._buffer:
314 return ''
315 return ''
315 buf = self._buffer[0]
316 buf = self._buffer[0]
316 if 1 < len(self._buffer):
317 if 1 < len(self._buffer):
317 buf = ''.join(self._buffer)
318 buf = ''.join(self._buffer)
318
319
319 data = buf[:size]
320 data = buf[:size]
320 buf = buf[len(data):]
321 buf = buf[len(data):]
321 if buf:
322 if buf:
322 self._buffer = [buf]
323 self._buffer = [buf]
323 self._lenbuf = len(buf)
324 self._lenbuf = len(buf)
324 else:
325 else:
325 self._buffer = []
326 self._buffer = []
326 self._lenbuf = 0
327 self._lenbuf = 0
327 return data
328 return data
328
329
329 def _fillbuffer(self):
330 def _fillbuffer(self):
330 """read data to the buffer"""
331 """read data to the buffer"""
331 data = os.read(self._input.fileno(), _chunksize)
332 data = os.read(self._input.fileno(), _chunksize)
332 if not data:
333 if not data:
333 self._eof = True
334 self._eof = True
334 else:
335 else:
335 self._lenbuf += len(data)
336 self._lenbuf += len(data)
336 self._buffer.append(data)
337 self._buffer.append(data)
337
338
338 def popen2(cmd, env=None, newlines=False):
339 def popen2(cmd, env=None, newlines=False):
339 # Setting bufsize to -1 lets the system decide the buffer size.
340 # Setting bufsize to -1 lets the system decide the buffer size.
340 # The default for bufsize is 0, meaning unbuffered. This leads to
341 # The default for bufsize is 0, meaning unbuffered. This leads to
341 # poor performance on Mac OS X: http://bugs.python.org/issue4194
342 # poor performance on Mac OS X: http://bugs.python.org/issue4194
342 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
343 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
343 close_fds=closefds,
344 close_fds=closefds,
344 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
345 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
345 universal_newlines=newlines,
346 universal_newlines=newlines,
346 env=env)
347 env=env)
347 return p.stdin, p.stdout
348 return p.stdin, p.stdout
348
349
349 def popen3(cmd, env=None, newlines=False):
350 def popen3(cmd, env=None, newlines=False):
350 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
351 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
351 return stdin, stdout, stderr
352 return stdin, stdout, stderr
352
353
353 def popen4(cmd, env=None, newlines=False, bufsize=-1):
354 def popen4(cmd, env=None, newlines=False, bufsize=-1):
354 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
355 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
355 close_fds=closefds,
356 close_fds=closefds,
356 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
357 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
357 stderr=subprocess.PIPE,
358 stderr=subprocess.PIPE,
358 universal_newlines=newlines,
359 universal_newlines=newlines,
359 env=env)
360 env=env)
360 return p.stdin, p.stdout, p.stderr, p
361 return p.stdin, p.stdout, p.stderr, p
361
362
362 def version():
363 def version():
363 """Return version information if available."""
364 """Return version information if available."""
364 try:
365 try:
365 from . import __version__
366 from . import __version__
366 return __version__.version
367 return __version__.version
367 except ImportError:
368 except ImportError:
368 return 'unknown'
369 return 'unknown'
369
370
370 def versiontuple(v=None, n=4):
371 def versiontuple(v=None, n=4):
371 """Parses a Mercurial version string into an N-tuple.
372 """Parses a Mercurial version string into an N-tuple.
372
373
373 The version string to be parsed is specified with the ``v`` argument.
374 The version string to be parsed is specified with the ``v`` argument.
374 If it isn't defined, the current Mercurial version string will be parsed.
375 If it isn't defined, the current Mercurial version string will be parsed.
375
376
376 ``n`` can be 2, 3, or 4. Here is how some version strings map to
377 ``n`` can be 2, 3, or 4. Here is how some version strings map to
377 returned values:
378 returned values:
378
379
379 >>> v = '3.6.1+190-df9b73d2d444'
380 >>> v = '3.6.1+190-df9b73d2d444'
380 >>> versiontuple(v, 2)
381 >>> versiontuple(v, 2)
381 (3, 6)
382 (3, 6)
382 >>> versiontuple(v, 3)
383 >>> versiontuple(v, 3)
383 (3, 6, 1)
384 (3, 6, 1)
384 >>> versiontuple(v, 4)
385 >>> versiontuple(v, 4)
385 (3, 6, 1, '190-df9b73d2d444')
386 (3, 6, 1, '190-df9b73d2d444')
386
387
387 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
388 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
388 (3, 6, 1, '190-df9b73d2d444+20151118')
389 (3, 6, 1, '190-df9b73d2d444+20151118')
389
390
390 >>> v = '3.6'
391 >>> v = '3.6'
391 >>> versiontuple(v, 2)
392 >>> versiontuple(v, 2)
392 (3, 6)
393 (3, 6)
393 >>> versiontuple(v, 3)
394 >>> versiontuple(v, 3)
394 (3, 6, None)
395 (3, 6, None)
395 >>> versiontuple(v, 4)
396 >>> versiontuple(v, 4)
396 (3, 6, None, None)
397 (3, 6, None, None)
397 """
398 """
398 if not v:
399 if not v:
399 v = version()
400 v = version()
400 parts = v.split('+', 1)
401 parts = v.split('+', 1)
401 if len(parts) == 1:
402 if len(parts) == 1:
402 vparts, extra = parts[0], None
403 vparts, extra = parts[0], None
403 else:
404 else:
404 vparts, extra = parts
405 vparts, extra = parts
405
406
406 vints = []
407 vints = []
407 for i in vparts.split('.'):
408 for i in vparts.split('.'):
408 try:
409 try:
409 vints.append(int(i))
410 vints.append(int(i))
410 except ValueError:
411 except ValueError:
411 break
412 break
412 # (3, 6) -> (3, 6, None)
413 # (3, 6) -> (3, 6, None)
413 while len(vints) < 3:
414 while len(vints) < 3:
414 vints.append(None)
415 vints.append(None)
415
416
416 if n == 2:
417 if n == 2:
417 return (vints[0], vints[1])
418 return (vints[0], vints[1])
418 if n == 3:
419 if n == 3:
419 return (vints[0], vints[1], vints[2])
420 return (vints[0], vints[1], vints[2])
420 if n == 4:
421 if n == 4:
421 return (vints[0], vints[1], vints[2], extra)
422 return (vints[0], vints[1], vints[2], extra)
422
423
423 # used by parsedate
424 # used by parsedate
424 defaultdateformats = (
425 defaultdateformats = (
425 '%Y-%m-%d %H:%M:%S',
426 '%Y-%m-%d %H:%M:%S',
426 '%Y-%m-%d %I:%M:%S%p',
427 '%Y-%m-%d %I:%M:%S%p',
427 '%Y-%m-%d %H:%M',
428 '%Y-%m-%d %H:%M',
428 '%Y-%m-%d %I:%M%p',
429 '%Y-%m-%d %I:%M%p',
429 '%Y-%m-%d',
430 '%Y-%m-%d',
430 '%m-%d',
431 '%m-%d',
431 '%m/%d',
432 '%m/%d',
432 '%m/%d/%y',
433 '%m/%d/%y',
433 '%m/%d/%Y',
434 '%m/%d/%Y',
434 '%a %b %d %H:%M:%S %Y',
435 '%a %b %d %H:%M:%S %Y',
435 '%a %b %d %I:%M:%S%p %Y',
436 '%a %b %d %I:%M:%S%p %Y',
436 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
437 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
437 '%b %d %H:%M:%S %Y',
438 '%b %d %H:%M:%S %Y',
438 '%b %d %I:%M:%S%p %Y',
439 '%b %d %I:%M:%S%p %Y',
439 '%b %d %H:%M:%S',
440 '%b %d %H:%M:%S',
440 '%b %d %I:%M:%S%p',
441 '%b %d %I:%M:%S%p',
441 '%b %d %H:%M',
442 '%b %d %H:%M',
442 '%b %d %I:%M%p',
443 '%b %d %I:%M%p',
443 '%b %d %Y',
444 '%b %d %Y',
444 '%b %d',
445 '%b %d',
445 '%H:%M:%S',
446 '%H:%M:%S',
446 '%I:%M:%S%p',
447 '%I:%M:%S%p',
447 '%H:%M',
448 '%H:%M',
448 '%I:%M%p',
449 '%I:%M%p',
449 )
450 )
450
451
451 extendeddateformats = defaultdateformats + (
452 extendeddateformats = defaultdateformats + (
452 "%Y",
453 "%Y",
453 "%Y-%m",
454 "%Y-%m",
454 "%b",
455 "%b",
455 "%b %Y",
456 "%b %Y",
456 )
457 )
457
458
458 def cachefunc(func):
459 def cachefunc(func):
459 '''cache the result of function calls'''
460 '''cache the result of function calls'''
460 # XXX doesn't handle keywords args
461 # XXX doesn't handle keywords args
461 if func.__code__.co_argcount == 0:
462 if func.__code__.co_argcount == 0:
462 cache = []
463 cache = []
463 def f():
464 def f():
464 if len(cache) == 0:
465 if len(cache) == 0:
465 cache.append(func())
466 cache.append(func())
466 return cache[0]
467 return cache[0]
467 return f
468 return f
468 cache = {}
469 cache = {}
469 if func.__code__.co_argcount == 1:
470 if func.__code__.co_argcount == 1:
470 # we gain a small amount of time because
471 # we gain a small amount of time because
471 # we don't need to pack/unpack the list
472 # we don't need to pack/unpack the list
472 def f(arg):
473 def f(arg):
473 if arg not in cache:
474 if arg not in cache:
474 cache[arg] = func(arg)
475 cache[arg] = func(arg)
475 return cache[arg]
476 return cache[arg]
476 else:
477 else:
477 def f(*args):
478 def f(*args):
478 if args not in cache:
479 if args not in cache:
479 cache[args] = func(*args)
480 cache[args] = func(*args)
480 return cache[args]
481 return cache[args]
481
482
482 return f
483 return f
483
484
484 class sortdict(dict):
485 class sortdict(dict):
485 '''a simple sorted dictionary'''
486 '''a simple sorted dictionary'''
486 def __init__(self, data=None):
487 def __init__(self, data=None):
487 self._list = []
488 self._list = []
488 if data:
489 if data:
489 self.update(data)
490 self.update(data)
490 def copy(self):
491 def copy(self):
491 return sortdict(self)
492 return sortdict(self)
492 def __setitem__(self, key, val):
493 def __setitem__(self, key, val):
493 if key in self:
494 if key in self:
494 self._list.remove(key)
495 self._list.remove(key)
495 self._list.append(key)
496 self._list.append(key)
496 dict.__setitem__(self, key, val)
497 dict.__setitem__(self, key, val)
497 def __iter__(self):
498 def __iter__(self):
498 return self._list.__iter__()
499 return self._list.__iter__()
499 def update(self, src):
500 def update(self, src):
500 if isinstance(src, dict):
501 if isinstance(src, dict):
501 src = src.iteritems()
502 src = src.iteritems()
502 for k, v in src:
503 for k, v in src:
503 self[k] = v
504 self[k] = v
504 def clear(self):
505 def clear(self):
505 dict.clear(self)
506 dict.clear(self)
506 self._list = []
507 self._list = []
507 def items(self):
508 def items(self):
508 return [(k, self[k]) for k in self._list]
509 return [(k, self[k]) for k in self._list]
509 def __delitem__(self, key):
510 def __delitem__(self, key):
510 dict.__delitem__(self, key)
511 dict.__delitem__(self, key)
511 self._list.remove(key)
512 self._list.remove(key)
512 def pop(self, key, *args, **kwargs):
513 def pop(self, key, *args, **kwargs):
513 dict.pop(self, key, *args, **kwargs)
514 dict.pop(self, key, *args, **kwargs)
514 try:
515 try:
515 self._list.remove(key)
516 self._list.remove(key)
516 except ValueError:
517 except ValueError:
517 pass
518 pass
518 def keys(self):
519 def keys(self):
519 return self._list
520 return self._list
520 def iterkeys(self):
521 def iterkeys(self):
521 return self._list.__iter__()
522 return self._list.__iter__()
522 def iteritems(self):
523 def iteritems(self):
523 for k in self._list:
524 for k in self._list:
524 yield k, self[k]
525 yield k, self[k]
525 def insert(self, index, key, val):
526 def insert(self, index, key, val):
526 self._list.insert(index, key)
527 self._list.insert(index, key)
527 dict.__setitem__(self, key, val)
528 dict.__setitem__(self, key, val)
528
529
529 class _lrucachenode(object):
530 class _lrucachenode(object):
530 """A node in a doubly linked list.
531 """A node in a doubly linked list.
531
532
532 Holds a reference to nodes on either side as well as a key-value
533 Holds a reference to nodes on either side as well as a key-value
533 pair for the dictionary entry.
534 pair for the dictionary entry.
534 """
535 """
535 __slots__ = ('next', 'prev', 'key', 'value')
536 __slots__ = ('next', 'prev', 'key', 'value')
536
537
537 def __init__(self):
538 def __init__(self):
538 self.next = None
539 self.next = None
539 self.prev = None
540 self.prev = None
540
541
541 self.key = _notset
542 self.key = _notset
542 self.value = None
543 self.value = None
543
544
544 def markempty(self):
545 def markempty(self):
545 """Mark the node as emptied."""
546 """Mark the node as emptied."""
546 self.key = _notset
547 self.key = _notset
547
548
548 class lrucachedict(object):
549 class lrucachedict(object):
549 """Dict that caches most recent accesses and sets.
550 """Dict that caches most recent accesses and sets.
550
551
551 The dict consists of an actual backing dict - indexed by original
552 The dict consists of an actual backing dict - indexed by original
552 key - and a doubly linked circular list defining the order of entries in
553 key - and a doubly linked circular list defining the order of entries in
553 the cache.
554 the cache.
554
555
555 The head node is the newest entry in the cache. If the cache is full,
556 The head node is the newest entry in the cache. If the cache is full,
556 we recycle head.prev and make it the new head. Cache accesses result in
557 we recycle head.prev and make it the new head. Cache accesses result in
557 the node being moved to before the existing head and being marked as the
558 the node being moved to before the existing head and being marked as the
558 new head node.
559 new head node.
559 """
560 """
560 def __init__(self, max):
561 def __init__(self, max):
561 self._cache = {}
562 self._cache = {}
562
563
563 self._head = head = _lrucachenode()
564 self._head = head = _lrucachenode()
564 head.prev = head
565 head.prev = head
565 head.next = head
566 head.next = head
566 self._size = 1
567 self._size = 1
567 self._capacity = max
568 self._capacity = max
568
569
569 def __len__(self):
570 def __len__(self):
570 return len(self._cache)
571 return len(self._cache)
571
572
572 def __contains__(self, k):
573 def __contains__(self, k):
573 return k in self._cache
574 return k in self._cache
574
575
575 def __iter__(self):
576 def __iter__(self):
576 # We don't have to iterate in cache order, but why not.
577 # We don't have to iterate in cache order, but why not.
577 n = self._head
578 n = self._head
578 for i in range(len(self._cache)):
579 for i in range(len(self._cache)):
579 yield n.key
580 yield n.key
580 n = n.next
581 n = n.next
581
582
582 def __getitem__(self, k):
583 def __getitem__(self, k):
583 node = self._cache[k]
584 node = self._cache[k]
584 self._movetohead(node)
585 self._movetohead(node)
585 return node.value
586 return node.value
586
587
587 def __setitem__(self, k, v):
588 def __setitem__(self, k, v):
588 node = self._cache.get(k)
589 node = self._cache.get(k)
589 # Replace existing value and mark as newest.
590 # Replace existing value and mark as newest.
590 if node is not None:
591 if node is not None:
591 node.value = v
592 node.value = v
592 self._movetohead(node)
593 self._movetohead(node)
593 return
594 return
594
595
595 if self._size < self._capacity:
596 if self._size < self._capacity:
596 node = self._addcapacity()
597 node = self._addcapacity()
597 else:
598 else:
598 # Grab the last/oldest item.
599 # Grab the last/oldest item.
599 node = self._head.prev
600 node = self._head.prev
600
601
601 # At capacity. Kill the old entry.
602 # At capacity. Kill the old entry.
602 if node.key is not _notset:
603 if node.key is not _notset:
603 del self._cache[node.key]
604 del self._cache[node.key]
604
605
605 node.key = k
606 node.key = k
606 node.value = v
607 node.value = v
607 self._cache[k] = node
608 self._cache[k] = node
608 # And mark it as newest entry. No need to adjust order since it
609 # And mark it as newest entry. No need to adjust order since it
609 # is already self._head.prev.
610 # is already self._head.prev.
610 self._head = node
611 self._head = node
611
612
612 def __delitem__(self, k):
613 def __delitem__(self, k):
613 node = self._cache.pop(k)
614 node = self._cache.pop(k)
614 node.markempty()
615 node.markempty()
615
616
616 # Temporarily mark as newest item before re-adjusting head to make
617 # Temporarily mark as newest item before re-adjusting head to make
617 # this node the oldest item.
618 # this node the oldest item.
618 self._movetohead(node)
619 self._movetohead(node)
619 self._head = node.next
620 self._head = node.next
620
621
621 # Additional dict methods.
622 # Additional dict methods.
622
623
623 def get(self, k, default=None):
624 def get(self, k, default=None):
624 try:
625 try:
625 return self._cache[k]
626 return self._cache[k]
626 except KeyError:
627 except KeyError:
627 return default
628 return default
628
629
629 def clear(self):
630 def clear(self):
630 n = self._head
631 n = self._head
631 while n.key is not _notset:
632 while n.key is not _notset:
632 n.markempty()
633 n.markempty()
633 n = n.next
634 n = n.next
634
635
635 self._cache.clear()
636 self._cache.clear()
636
637
637 def copy(self):
638 def copy(self):
638 result = lrucachedict(self._capacity)
639 result = lrucachedict(self._capacity)
639 n = self._head.prev
640 n = self._head.prev
640 # Iterate in oldest-to-newest order, so the copy has the right ordering
641 # Iterate in oldest-to-newest order, so the copy has the right ordering
641 for i in range(len(self._cache)):
642 for i in range(len(self._cache)):
642 result[n.key] = n.value
643 result[n.key] = n.value
643 n = n.prev
644 n = n.prev
644 return result
645 return result
645
646
646 def _movetohead(self, node):
647 def _movetohead(self, node):
647 """Mark a node as the newest, making it the new head.
648 """Mark a node as the newest, making it the new head.
648
649
649 When a node is accessed, it becomes the freshest entry in the LRU
650 When a node is accessed, it becomes the freshest entry in the LRU
650 list, which is denoted by self._head.
651 list, which is denoted by self._head.
651
652
652 Visually, let's make ``N`` the new head node (* denotes head):
653 Visually, let's make ``N`` the new head node (* denotes head):
653
654
654 previous/oldest <-> head <-> next/next newest
655 previous/oldest <-> head <-> next/next newest
655
656
656 ----<->--- A* ---<->-----
657 ----<->--- A* ---<->-----
657 | |
658 | |
658 E <-> D <-> N <-> C <-> B
659 E <-> D <-> N <-> C <-> B
659
660
660 To:
661 To:
661
662
662 ----<->--- N* ---<->-----
663 ----<->--- N* ---<->-----
663 | |
664 | |
664 E <-> D <-> C <-> B <-> A
665 E <-> D <-> C <-> B <-> A
665
666
666 This requires the following moves:
667 This requires the following moves:
667
668
668 C.next = D (node.prev.next = node.next)
669 C.next = D (node.prev.next = node.next)
669 D.prev = C (node.next.prev = node.prev)
670 D.prev = C (node.next.prev = node.prev)
670 E.next = N (head.prev.next = node)
671 E.next = N (head.prev.next = node)
671 N.prev = E (node.prev = head.prev)
672 N.prev = E (node.prev = head.prev)
672 N.next = A (node.next = head)
673 N.next = A (node.next = head)
673 A.prev = N (head.prev = node)
674 A.prev = N (head.prev = node)
674 """
675 """
675 head = self._head
676 head = self._head
676 # C.next = D
677 # C.next = D
677 node.prev.next = node.next
678 node.prev.next = node.next
678 # D.prev = C
679 # D.prev = C
679 node.next.prev = node.prev
680 node.next.prev = node.prev
680 # N.prev = E
681 # N.prev = E
681 node.prev = head.prev
682 node.prev = head.prev
682 # N.next = A
683 # N.next = A
683 # It is tempting to do just "head" here, however if node is
684 # It is tempting to do just "head" here, however if node is
684 # adjacent to head, this will do bad things.
685 # adjacent to head, this will do bad things.
685 node.next = head.prev.next
686 node.next = head.prev.next
686 # E.next = N
687 # E.next = N
687 node.next.prev = node
688 node.next.prev = node
688 # A.prev = N
689 # A.prev = N
689 node.prev.next = node
690 node.prev.next = node
690
691
691 self._head = node
692 self._head = node
692
693
693 def _addcapacity(self):
694 def _addcapacity(self):
694 """Add a node to the circular linked list.
695 """Add a node to the circular linked list.
695
696
696 The new node is inserted before the head node.
697 The new node is inserted before the head node.
697 """
698 """
698 head = self._head
699 head = self._head
699 node = _lrucachenode()
700 node = _lrucachenode()
700 head.prev.next = node
701 head.prev.next = node
701 node.prev = head.prev
702 node.prev = head.prev
702 node.next = head
703 node.next = head
703 head.prev = node
704 head.prev = node
704 self._size += 1
705 self._size += 1
705 return node
706 return node
706
707
707 def lrucachefunc(func):
708 def lrucachefunc(func):
708 '''cache most recent results of function calls'''
709 '''cache most recent results of function calls'''
709 cache = {}
710 cache = {}
710 order = collections.deque()
711 order = collections.deque()
711 if func.__code__.co_argcount == 1:
712 if func.__code__.co_argcount == 1:
712 def f(arg):
713 def f(arg):
713 if arg not in cache:
714 if arg not in cache:
714 if len(cache) > 20:
715 if len(cache) > 20:
715 del cache[order.popleft()]
716 del cache[order.popleft()]
716 cache[arg] = func(arg)
717 cache[arg] = func(arg)
717 else:
718 else:
718 order.remove(arg)
719 order.remove(arg)
719 order.append(arg)
720 order.append(arg)
720 return cache[arg]
721 return cache[arg]
721 else:
722 else:
722 def f(*args):
723 def f(*args):
723 if args not in cache:
724 if args not in cache:
724 if len(cache) > 20:
725 if len(cache) > 20:
725 del cache[order.popleft()]
726 del cache[order.popleft()]
726 cache[args] = func(*args)
727 cache[args] = func(*args)
727 else:
728 else:
728 order.remove(args)
729 order.remove(args)
729 order.append(args)
730 order.append(args)
730 return cache[args]
731 return cache[args]
731
732
732 return f
733 return f
733
734
734 class propertycache(object):
735 class propertycache(object):
735 def __init__(self, func):
736 def __init__(self, func):
736 self.func = func
737 self.func = func
737 self.name = func.__name__
738 self.name = func.__name__
738 def __get__(self, obj, type=None):
739 def __get__(self, obj, type=None):
739 result = self.func(obj)
740 result = self.func(obj)
740 self.cachevalue(obj, result)
741 self.cachevalue(obj, result)
741 return result
742 return result
742
743
743 def cachevalue(self, obj, value):
744 def cachevalue(self, obj, value):
744 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
745 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
745 obj.__dict__[self.name] = value
746 obj.__dict__[self.name] = value
746
747
747 def pipefilter(s, cmd):
748 def pipefilter(s, cmd):
748 '''filter string S through command CMD, returning its output'''
749 '''filter string S through command CMD, returning its output'''
749 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
750 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
750 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
751 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
751 pout, perr = p.communicate(s)
752 pout, perr = p.communicate(s)
752 return pout
753 return pout
753
754
754 def tempfilter(s, cmd):
755 def tempfilter(s, cmd):
755 '''filter string S through a pair of temporary files with CMD.
756 '''filter string S through a pair of temporary files with CMD.
756 CMD is used as a template to create the real command to be run,
757 CMD is used as a template to create the real command to be run,
757 with the strings INFILE and OUTFILE replaced by the real names of
758 with the strings INFILE and OUTFILE replaced by the real names of
758 the temporary files generated.'''
759 the temporary files generated.'''
759 inname, outname = None, None
760 inname, outname = None, None
760 try:
761 try:
761 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
762 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
762 fp = os.fdopen(infd, 'wb')
763 fp = os.fdopen(infd, 'wb')
763 fp.write(s)
764 fp.write(s)
764 fp.close()
765 fp.close()
765 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
766 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
766 os.close(outfd)
767 os.close(outfd)
767 cmd = cmd.replace('INFILE', inname)
768 cmd = cmd.replace('INFILE', inname)
768 cmd = cmd.replace('OUTFILE', outname)
769 cmd = cmd.replace('OUTFILE', outname)
769 code = os.system(cmd)
770 code = os.system(cmd)
770 if sys.platform == 'OpenVMS' and code & 1:
771 if sys.platform == 'OpenVMS' and code & 1:
771 code = 0
772 code = 0
772 if code:
773 if code:
773 raise Abort(_("command '%s' failed: %s") %
774 raise Abort(_("command '%s' failed: %s") %
774 (cmd, explainexit(code)))
775 (cmd, explainexit(code)))
775 return readfile(outname)
776 return readfile(outname)
776 finally:
777 finally:
777 try:
778 try:
778 if inname:
779 if inname:
779 os.unlink(inname)
780 os.unlink(inname)
780 except OSError:
781 except OSError:
781 pass
782 pass
782 try:
783 try:
783 if outname:
784 if outname:
784 os.unlink(outname)
785 os.unlink(outname)
785 except OSError:
786 except OSError:
786 pass
787 pass
787
788
788 filtertable = {
789 filtertable = {
789 'tempfile:': tempfilter,
790 'tempfile:': tempfilter,
790 'pipe:': pipefilter,
791 'pipe:': pipefilter,
791 }
792 }
792
793
793 def filter(s, cmd):
794 def filter(s, cmd):
794 "filter a string through a command that transforms its input to its output"
795 "filter a string through a command that transforms its input to its output"
795 for name, fn in filtertable.iteritems():
796 for name, fn in filtertable.iteritems():
796 if cmd.startswith(name):
797 if cmd.startswith(name):
797 return fn(s, cmd[len(name):].lstrip())
798 return fn(s, cmd[len(name):].lstrip())
798 return pipefilter(s, cmd)
799 return pipefilter(s, cmd)
799
800
800 def binary(s):
801 def binary(s):
801 """return true if a string is binary data"""
802 """return true if a string is binary data"""
802 return bool(s and '\0' in s)
803 return bool(s and '\0' in s)
803
804
804 def increasingchunks(source, min=1024, max=65536):
805 def increasingchunks(source, min=1024, max=65536):
805 '''return no less than min bytes per chunk while data remains,
806 '''return no less than min bytes per chunk while data remains,
806 doubling min after each chunk until it reaches max'''
807 doubling min after each chunk until it reaches max'''
807 def log2(x):
808 def log2(x):
808 if not x:
809 if not x:
809 return 0
810 return 0
810 i = 0
811 i = 0
811 while x:
812 while x:
812 x >>= 1
813 x >>= 1
813 i += 1
814 i += 1
814 return i - 1
815 return i - 1
815
816
816 buf = []
817 buf = []
817 blen = 0
818 blen = 0
818 for chunk in source:
819 for chunk in source:
819 buf.append(chunk)
820 buf.append(chunk)
820 blen += len(chunk)
821 blen += len(chunk)
821 if blen >= min:
822 if blen >= min:
822 if min < max:
823 if min < max:
823 min = min << 1
824 min = min << 1
824 nmin = 1 << log2(blen)
825 nmin = 1 << log2(blen)
825 if nmin > min:
826 if nmin > min:
826 min = nmin
827 min = nmin
827 if min > max:
828 if min > max:
828 min = max
829 min = max
829 yield ''.join(buf)
830 yield ''.join(buf)
830 blen = 0
831 blen = 0
831 buf = []
832 buf = []
832 if buf:
833 if buf:
833 yield ''.join(buf)
834 yield ''.join(buf)
834
835
835 Abort = error.Abort
836 Abort = error.Abort
836
837
837 def always(fn):
838 def always(fn):
838 return True
839 return True
839
840
840 def never(fn):
841 def never(fn):
841 return False
842 return False
842
843
843 def nogc(func):
844 def nogc(func):
844 """disable garbage collector
845 """disable garbage collector
845
846
846 Python's garbage collector triggers a GC each time a certain number of
847 Python's garbage collector triggers a GC each time a certain number of
847 container objects (the number being defined by gc.get_threshold()) are
848 container objects (the number being defined by gc.get_threshold()) are
848 allocated even when marked not to be tracked by the collector. Tracking has
849 allocated even when marked not to be tracked by the collector. Tracking has
849 no effect on when GCs are triggered, only on what objects the GC looks
850 no effect on when GCs are triggered, only on what objects the GC looks
850 into. As a workaround, disable GC while building complex (huge)
851 into. As a workaround, disable GC while building complex (huge)
851 containers.
852 containers.
852
853
853 This garbage collector issue have been fixed in 2.7.
854 This garbage collector issue have been fixed in 2.7.
854 """
855 """
855 def wrapper(*args, **kwargs):
856 def wrapper(*args, **kwargs):
856 gcenabled = gc.isenabled()
857 gcenabled = gc.isenabled()
857 gc.disable()
858 gc.disable()
858 try:
859 try:
859 return func(*args, **kwargs)
860 return func(*args, **kwargs)
860 finally:
861 finally:
861 if gcenabled:
862 if gcenabled:
862 gc.enable()
863 gc.enable()
863 return wrapper
864 return wrapper
864
865
865 def pathto(root, n1, n2):
866 def pathto(root, n1, n2):
866 '''return the relative path from one place to another.
867 '''return the relative path from one place to another.
867 root should use os.sep to separate directories
868 root should use os.sep to separate directories
868 n1 should use os.sep to separate directories
869 n1 should use os.sep to separate directories
869 n2 should use "/" to separate directories
870 n2 should use "/" to separate directories
870 returns an os.sep-separated path.
871 returns an os.sep-separated path.
871
872
872 If n1 is a relative path, it's assumed it's
873 If n1 is a relative path, it's assumed it's
873 relative to root.
874 relative to root.
874 n2 should always be relative to root.
875 n2 should always be relative to root.
875 '''
876 '''
876 if not n1:
877 if not n1:
877 return localpath(n2)
878 return localpath(n2)
878 if os.path.isabs(n1):
879 if os.path.isabs(n1):
879 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
880 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
880 return os.path.join(root, localpath(n2))
881 return os.path.join(root, localpath(n2))
881 n2 = '/'.join((pconvert(root), n2))
882 n2 = '/'.join((pconvert(root), n2))
882 a, b = splitpath(n1), n2.split('/')
883 a, b = splitpath(n1), n2.split('/')
883 a.reverse()
884 a.reverse()
884 b.reverse()
885 b.reverse()
885 while a and b and a[-1] == b[-1]:
886 while a and b and a[-1] == b[-1]:
886 a.pop()
887 a.pop()
887 b.pop()
888 b.pop()
888 b.reverse()
889 b.reverse()
889 return os.sep.join((['..'] * len(a)) + b) or '.'
890 return os.sep.join((['..'] * len(a)) + b) or '.'
890
891
891 def mainfrozen():
892 def mainfrozen():
892 """return True if we are a frozen executable.
893 """return True if we are a frozen executable.
893
894
894 The code supports py2exe (most common, Windows only) and tools/freeze
895 The code supports py2exe (most common, Windows only) and tools/freeze
895 (portable, not much used).
896 (portable, not much used).
896 """
897 """
897 return (safehasattr(sys, "frozen") or # new py2exe
898 return (safehasattr(sys, "frozen") or # new py2exe
898 safehasattr(sys, "importers") or # old py2exe
899 safehasattr(sys, "importers") or # old py2exe
899 imp.is_frozen("__main__")) # tools/freeze
900 imp.is_frozen("__main__")) # tools/freeze
900
901
901 # the location of data files matching the source code
902 # the location of data files matching the source code
902 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
903 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
903 # executable version (py2exe) doesn't support __file__
904 # executable version (py2exe) doesn't support __file__
904 datapath = os.path.dirname(sys.executable)
905 datapath = os.path.dirname(sys.executable)
905 else:
906 else:
906 datapath = os.path.dirname(__file__)
907 datapath = os.path.dirname(__file__)
907
908
908 i18n.setdatapath(datapath)
909 i18n.setdatapath(datapath)
909
910
910 _hgexecutable = None
911 _hgexecutable = None
911
912
912 def hgexecutable():
913 def hgexecutable():
913 """return location of the 'hg' executable.
914 """return location of the 'hg' executable.
914
915
915 Defaults to $HG or 'hg' in the search path.
916 Defaults to $HG or 'hg' in the search path.
916 """
917 """
917 if _hgexecutable is None:
918 if _hgexecutable is None:
918 hg = os.environ.get('HG')
919 hg = os.environ.get('HG')
919 mainmod = sys.modules['__main__']
920 mainmod = sys.modules['__main__']
920 if hg:
921 if hg:
921 _sethgexecutable(hg)
922 _sethgexecutable(hg)
922 elif mainfrozen():
923 elif mainfrozen():
923 if getattr(sys, 'frozen', None) == 'macosx_app':
924 if getattr(sys, 'frozen', None) == 'macosx_app':
924 # Env variable set by py2app
925 # Env variable set by py2app
925 _sethgexecutable(os.environ['EXECUTABLEPATH'])
926 _sethgexecutable(os.environ['EXECUTABLEPATH'])
926 else:
927 else:
927 _sethgexecutable(sys.executable)
928 _sethgexecutable(sys.executable)
928 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
929 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
929 _sethgexecutable(mainmod.__file__)
930 _sethgexecutable(mainmod.__file__)
930 else:
931 else:
931 exe = findexe('hg') or os.path.basename(sys.argv[0])
932 exe = findexe('hg') or os.path.basename(sys.argv[0])
932 _sethgexecutable(exe)
933 _sethgexecutable(exe)
933 return _hgexecutable
934 return _hgexecutable
934
935
935 def _sethgexecutable(path):
936 def _sethgexecutable(path):
936 """set location of the 'hg' executable"""
937 """set location of the 'hg' executable"""
937 global _hgexecutable
938 global _hgexecutable
938 _hgexecutable = path
939 _hgexecutable = path
939
940
940 def _isstdout(f):
941 def _isstdout(f):
941 fileno = getattr(f, 'fileno', None)
942 fileno = getattr(f, 'fileno', None)
942 return fileno and fileno() == sys.__stdout__.fileno()
943 return fileno and fileno() == sys.__stdout__.fileno()
943
944
944 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
945 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
945 '''enhanced shell command execution.
946 '''enhanced shell command execution.
946 run with environment maybe modified, maybe in different dir.
947 run with environment maybe modified, maybe in different dir.
947
948
948 if command fails and onerr is None, return status, else raise onerr
949 if command fails and onerr is None, return status, else raise onerr
949 object as exception.
950 object as exception.
950
951
951 if out is specified, it is assumed to be a file-like object that has a
952 if out is specified, it is assumed to be a file-like object that has a
952 write() method. stdout and stderr will be redirected to out.'''
953 write() method. stdout and stderr will be redirected to out.'''
953 if environ is None:
954 if environ is None:
954 environ = {}
955 environ = {}
955 try:
956 try:
956 sys.stdout.flush()
957 sys.stdout.flush()
957 except Exception:
958 except Exception:
958 pass
959 pass
959 def py2shell(val):
960 def py2shell(val):
960 'convert python object into string that is useful to shell'
961 'convert python object into string that is useful to shell'
961 if val is None or val is False:
962 if val is None or val is False:
962 return '0'
963 return '0'
963 if val is True:
964 if val is True:
964 return '1'
965 return '1'
965 return str(val)
966 return str(val)
966 origcmd = cmd
967 origcmd = cmd
967 cmd = quotecommand(cmd)
968 cmd = quotecommand(cmd)
968 if sys.platform == 'plan9' and (sys.version_info[0] == 2
969 if sys.platform == 'plan9' and (sys.version_info[0] == 2
969 and sys.version_info[1] < 7):
970 and sys.version_info[1] < 7):
970 # subprocess kludge to work around issues in half-baked Python
971 # subprocess kludge to work around issues in half-baked Python
971 # ports, notably bichued/python:
972 # ports, notably bichued/python:
972 if not cwd is None:
973 if not cwd is None:
973 os.chdir(cwd)
974 os.chdir(cwd)
974 rc = os.system(cmd)
975 rc = os.system(cmd)
975 else:
976 else:
976 env = dict(os.environ)
977 env = dict(os.environ)
977 env.update((k, py2shell(v)) for k, v in environ.iteritems())
978 env.update((k, py2shell(v)) for k, v in environ.iteritems())
978 env['HG'] = hgexecutable()
979 env['HG'] = hgexecutable()
979 if out is None or _isstdout(out):
980 if out is None or _isstdout(out):
980 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
981 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
981 env=env, cwd=cwd)
982 env=env, cwd=cwd)
982 else:
983 else:
983 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
984 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
984 env=env, cwd=cwd, stdout=subprocess.PIPE,
985 env=env, cwd=cwd, stdout=subprocess.PIPE,
985 stderr=subprocess.STDOUT)
986 stderr=subprocess.STDOUT)
986 while True:
987 while True:
987 line = proc.stdout.readline()
988 line = proc.stdout.readline()
988 if not line:
989 if not line:
989 break
990 break
990 out.write(line)
991 out.write(line)
991 proc.wait()
992 proc.wait()
992 rc = proc.returncode
993 rc = proc.returncode
993 if sys.platform == 'OpenVMS' and rc & 1:
994 if sys.platform == 'OpenVMS' and rc & 1:
994 rc = 0
995 rc = 0
995 if rc and onerr:
996 if rc and onerr:
996 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
997 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
997 explainexit(rc)[0])
998 explainexit(rc)[0])
998 if errprefix:
999 if errprefix:
999 errmsg = '%s: %s' % (errprefix, errmsg)
1000 errmsg = '%s: %s' % (errprefix, errmsg)
1000 raise onerr(errmsg)
1001 raise onerr(errmsg)
1001 return rc
1002 return rc
1002
1003
1003 def checksignature(func):
1004 def checksignature(func):
1004 '''wrap a function with code to check for calling errors'''
1005 '''wrap a function with code to check for calling errors'''
1005 def check(*args, **kwargs):
1006 def check(*args, **kwargs):
1006 try:
1007 try:
1007 return func(*args, **kwargs)
1008 return func(*args, **kwargs)
1008 except TypeError:
1009 except TypeError:
1009 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1010 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1010 raise error.SignatureError
1011 raise error.SignatureError
1011 raise
1012 raise
1012
1013
1013 return check
1014 return check
1014
1015
1015 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1016 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1016 '''copy a file, preserving mode and optionally other stat info like
1017 '''copy a file, preserving mode and optionally other stat info like
1017 atime/mtime
1018 atime/mtime
1018
1019
1019 checkambig argument is used with filestat, and is useful only if
1020 checkambig argument is used with filestat, and is useful only if
1020 destination file is guarded by any lock (e.g. repo.lock or
1021 destination file is guarded by any lock (e.g. repo.lock or
1021 repo.wlock).
1022 repo.wlock).
1022
1023
1023 copystat and checkambig should be exclusive.
1024 copystat and checkambig should be exclusive.
1024 '''
1025 '''
1025 assert not (copystat and checkambig)
1026 assert not (copystat and checkambig)
1026 oldstat = None
1027 oldstat = None
1027 if os.path.lexists(dest):
1028 if os.path.lexists(dest):
1028 if checkambig:
1029 if checkambig:
1029 oldstat = checkambig and filestat(dest)
1030 oldstat = checkambig and filestat(dest)
1030 unlink(dest)
1031 unlink(dest)
1031 # hardlinks are problematic on CIFS, quietly ignore this flag
1032 # hardlinks are problematic on CIFS, quietly ignore this flag
1032 # until we find a way to work around it cleanly (issue4546)
1033 # until we find a way to work around it cleanly (issue4546)
1033 if False and hardlink:
1034 if False and hardlink:
1034 try:
1035 try:
1035 oslink(src, dest)
1036 oslink(src, dest)
1036 return
1037 return
1037 except (IOError, OSError):
1038 except (IOError, OSError):
1038 pass # fall back to normal copy
1039 pass # fall back to normal copy
1039 if os.path.islink(src):
1040 if os.path.islink(src):
1040 os.symlink(os.readlink(src), dest)
1041 os.symlink(os.readlink(src), dest)
1041 # copytime is ignored for symlinks, but in general copytime isn't needed
1042 # copytime is ignored for symlinks, but in general copytime isn't needed
1042 # for them anyway
1043 # for them anyway
1043 else:
1044 else:
1044 try:
1045 try:
1045 shutil.copyfile(src, dest)
1046 shutil.copyfile(src, dest)
1046 if copystat:
1047 if copystat:
1047 # copystat also copies mode
1048 # copystat also copies mode
1048 shutil.copystat(src, dest)
1049 shutil.copystat(src, dest)
1049 else:
1050 else:
1050 shutil.copymode(src, dest)
1051 shutil.copymode(src, dest)
1051 if oldstat and oldstat.stat:
1052 if oldstat and oldstat.stat:
1052 newstat = filestat(dest)
1053 newstat = filestat(dest)
1053 if newstat.isambig(oldstat):
1054 if newstat.isambig(oldstat):
1054 # stat of copied file is ambiguous to original one
1055 # stat of copied file is ambiguous to original one
1055 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1056 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1056 os.utime(dest, (advanced, advanced))
1057 os.utime(dest, (advanced, advanced))
1057 except shutil.Error as inst:
1058 except shutil.Error as inst:
1058 raise Abort(str(inst))
1059 raise Abort(str(inst))
1059
1060
1060 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1061 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1061 """Copy a directory tree using hardlinks if possible."""
1062 """Copy a directory tree using hardlinks if possible."""
1062 num = 0
1063 num = 0
1063
1064
1064 if hardlink is None:
1065 if hardlink is None:
1065 hardlink = (os.stat(src).st_dev ==
1066 hardlink = (os.stat(src).st_dev ==
1066 os.stat(os.path.dirname(dst)).st_dev)
1067 os.stat(os.path.dirname(dst)).st_dev)
1067 if hardlink:
1068 if hardlink:
1068 topic = _('linking')
1069 topic = _('linking')
1069 else:
1070 else:
1070 topic = _('copying')
1071 topic = _('copying')
1071
1072
1072 if os.path.isdir(src):
1073 if os.path.isdir(src):
1073 os.mkdir(dst)
1074 os.mkdir(dst)
1074 for name, kind in osutil.listdir(src):
1075 for name, kind in osutil.listdir(src):
1075 srcname = os.path.join(src, name)
1076 srcname = os.path.join(src, name)
1076 dstname = os.path.join(dst, name)
1077 dstname = os.path.join(dst, name)
1077 def nprog(t, pos):
1078 def nprog(t, pos):
1078 if pos is not None:
1079 if pos is not None:
1079 return progress(t, pos + num)
1080 return progress(t, pos + num)
1080 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1081 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1081 num += n
1082 num += n
1082 else:
1083 else:
1083 if hardlink:
1084 if hardlink:
1084 try:
1085 try:
1085 oslink(src, dst)
1086 oslink(src, dst)
1086 except (IOError, OSError):
1087 except (IOError, OSError):
1087 hardlink = False
1088 hardlink = False
1088 shutil.copy(src, dst)
1089 shutil.copy(src, dst)
1089 else:
1090 else:
1090 shutil.copy(src, dst)
1091 shutil.copy(src, dst)
1091 num += 1
1092 num += 1
1092 progress(topic, num)
1093 progress(topic, num)
1093 progress(topic, None)
1094 progress(topic, None)
1094
1095
1095 return hardlink, num
1096 return hardlink, num
1096
1097
1097 _winreservednames = '''con prn aux nul
1098 _winreservednames = '''con prn aux nul
1098 com1 com2 com3 com4 com5 com6 com7 com8 com9
1099 com1 com2 com3 com4 com5 com6 com7 com8 com9
1099 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1100 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1100 _winreservedchars = ':*?"<>|'
1101 _winreservedchars = ':*?"<>|'
1101 def checkwinfilename(path):
1102 def checkwinfilename(path):
1102 r'''Check that the base-relative path is a valid filename on Windows.
1103 r'''Check that the base-relative path is a valid filename on Windows.
1103 Returns None if the path is ok, or a UI string describing the problem.
1104 Returns None if the path is ok, or a UI string describing the problem.
1104
1105
1105 >>> checkwinfilename("just/a/normal/path")
1106 >>> checkwinfilename("just/a/normal/path")
1106 >>> checkwinfilename("foo/bar/con.xml")
1107 >>> checkwinfilename("foo/bar/con.xml")
1107 "filename contains 'con', which is reserved on Windows"
1108 "filename contains 'con', which is reserved on Windows"
1108 >>> checkwinfilename("foo/con.xml/bar")
1109 >>> checkwinfilename("foo/con.xml/bar")
1109 "filename contains 'con', which is reserved on Windows"
1110 "filename contains 'con', which is reserved on Windows"
1110 >>> checkwinfilename("foo/bar/xml.con")
1111 >>> checkwinfilename("foo/bar/xml.con")
1111 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1112 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1112 "filename contains 'AUX', which is reserved on Windows"
1113 "filename contains 'AUX', which is reserved on Windows"
1113 >>> checkwinfilename("foo/bar/bla:.txt")
1114 >>> checkwinfilename("foo/bar/bla:.txt")
1114 "filename contains ':', which is reserved on Windows"
1115 "filename contains ':', which is reserved on Windows"
1115 >>> checkwinfilename("foo/bar/b\07la.txt")
1116 >>> checkwinfilename("foo/bar/b\07la.txt")
1116 "filename contains '\\x07', which is invalid on Windows"
1117 "filename contains '\\x07', which is invalid on Windows"
1117 >>> checkwinfilename("foo/bar/bla ")
1118 >>> checkwinfilename("foo/bar/bla ")
1118 "filename ends with ' ', which is not allowed on Windows"
1119 "filename ends with ' ', which is not allowed on Windows"
1119 >>> checkwinfilename("../bar")
1120 >>> checkwinfilename("../bar")
1120 >>> checkwinfilename("foo\\")
1121 >>> checkwinfilename("foo\\")
1121 "filename ends with '\\', which is invalid on Windows"
1122 "filename ends with '\\', which is invalid on Windows"
1122 >>> checkwinfilename("foo\\/bar")
1123 >>> checkwinfilename("foo\\/bar")
1123 "directory name ends with '\\', which is invalid on Windows"
1124 "directory name ends with '\\', which is invalid on Windows"
1124 '''
1125 '''
1125 if path.endswith('\\'):
1126 if path.endswith('\\'):
1126 return _("filename ends with '\\', which is invalid on Windows")
1127 return _("filename ends with '\\', which is invalid on Windows")
1127 if '\\/' in path:
1128 if '\\/' in path:
1128 return _("directory name ends with '\\', which is invalid on Windows")
1129 return _("directory name ends with '\\', which is invalid on Windows")
1129 for n in path.replace('\\', '/').split('/'):
1130 for n in path.replace('\\', '/').split('/'):
1130 if not n:
1131 if not n:
1131 continue
1132 continue
1132 for c in n:
1133 for c in n:
1133 if c in _winreservedchars:
1134 if c in _winreservedchars:
1134 return _("filename contains '%s', which is reserved "
1135 return _("filename contains '%s', which is reserved "
1135 "on Windows") % c
1136 "on Windows") % c
1136 if ord(c) <= 31:
1137 if ord(c) <= 31:
1137 return _("filename contains %r, which is invalid "
1138 return _("filename contains %r, which is invalid "
1138 "on Windows") % c
1139 "on Windows") % c
1139 base = n.split('.')[0]
1140 base = n.split('.')[0]
1140 if base and base.lower() in _winreservednames:
1141 if base and base.lower() in _winreservednames:
1141 return _("filename contains '%s', which is reserved "
1142 return _("filename contains '%s', which is reserved "
1142 "on Windows") % base
1143 "on Windows") % base
1143 t = n[-1]
1144 t = n[-1]
1144 if t in '. ' and n not in '..':
1145 if t in '. ' and n not in '..':
1145 return _("filename ends with '%s', which is not allowed "
1146 return _("filename ends with '%s', which is not allowed "
1146 "on Windows") % t
1147 "on Windows") % t
1147
1148
1148 if os.name == 'nt':
1149 if os.name == 'nt':
1149 checkosfilename = checkwinfilename
1150 checkosfilename = checkwinfilename
1150 else:
1151 else:
1151 checkosfilename = platform.checkosfilename
1152 checkosfilename = platform.checkosfilename
1152
1153
1153 def makelock(info, pathname):
1154 def makelock(info, pathname):
1154 try:
1155 try:
1155 return os.symlink(info, pathname)
1156 return os.symlink(info, pathname)
1156 except OSError as why:
1157 except OSError as why:
1157 if why.errno == errno.EEXIST:
1158 if why.errno == errno.EEXIST:
1158 raise
1159 raise
1159 except AttributeError: # no symlink in os
1160 except AttributeError: # no symlink in os
1160 pass
1161 pass
1161
1162
1162 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1163 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1163 os.write(ld, info)
1164 os.write(ld, info)
1164 os.close(ld)
1165 os.close(ld)
1165
1166
1166 def readlock(pathname):
1167 def readlock(pathname):
1167 try:
1168 try:
1168 return os.readlink(pathname)
1169 return os.readlink(pathname)
1169 except OSError as why:
1170 except OSError as why:
1170 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1171 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1171 raise
1172 raise
1172 except AttributeError: # no symlink in os
1173 except AttributeError: # no symlink in os
1173 pass
1174 pass
1174 fp = posixfile(pathname)
1175 fp = posixfile(pathname)
1175 r = fp.read()
1176 r = fp.read()
1176 fp.close()
1177 fp.close()
1177 return r
1178 return r
1178
1179
1179 def fstat(fp):
1180 def fstat(fp):
1180 '''stat file object that may not have fileno method.'''
1181 '''stat file object that may not have fileno method.'''
1181 try:
1182 try:
1182 return os.fstat(fp.fileno())
1183 return os.fstat(fp.fileno())
1183 except AttributeError:
1184 except AttributeError:
1184 return os.stat(fp.name)
1185 return os.stat(fp.name)
1185
1186
1186 # File system features
1187 # File system features
1187
1188
1188 def checkcase(path):
1189 def checkcase(path):
1189 """
1190 """
1190 Return true if the given path is on a case-sensitive filesystem
1191 Return true if the given path is on a case-sensitive filesystem
1191
1192
1192 Requires a path (like /foo/.hg) ending with a foldable final
1193 Requires a path (like /foo/.hg) ending with a foldable final
1193 directory component.
1194 directory component.
1194 """
1195 """
1195 s1 = os.lstat(path)
1196 s1 = os.lstat(path)
1196 d, b = os.path.split(path)
1197 d, b = os.path.split(path)
1197 b2 = b.upper()
1198 b2 = b.upper()
1198 if b == b2:
1199 if b == b2:
1199 b2 = b.lower()
1200 b2 = b.lower()
1200 if b == b2:
1201 if b == b2:
1201 return True # no evidence against case sensitivity
1202 return True # no evidence against case sensitivity
1202 p2 = os.path.join(d, b2)
1203 p2 = os.path.join(d, b2)
1203 try:
1204 try:
1204 s2 = os.lstat(p2)
1205 s2 = os.lstat(p2)
1205 if s2 == s1:
1206 if s2 == s1:
1206 return False
1207 return False
1207 return True
1208 return True
1208 except OSError:
1209 except OSError:
1209 return True
1210 return True
1210
1211
1211 try:
1212 try:
1212 import re2
1213 import re2
1213 _re2 = None
1214 _re2 = None
1214 except ImportError:
1215 except ImportError:
1215 _re2 = False
1216 _re2 = False
1216
1217
1217 class _re(object):
1218 class _re(object):
1218 def _checkre2(self):
1219 def _checkre2(self):
1219 global _re2
1220 global _re2
1220 try:
1221 try:
1221 # check if match works, see issue3964
1222 # check if match works, see issue3964
1222 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1223 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1223 except ImportError:
1224 except ImportError:
1224 _re2 = False
1225 _re2 = False
1225
1226
1226 def compile(self, pat, flags=0):
1227 def compile(self, pat, flags=0):
1227 '''Compile a regular expression, using re2 if possible
1228 '''Compile a regular expression, using re2 if possible
1228
1229
1229 For best performance, use only re2-compatible regexp features. The
1230 For best performance, use only re2-compatible regexp features. The
1230 only flags from the re module that are re2-compatible are
1231 only flags from the re module that are re2-compatible are
1231 IGNORECASE and MULTILINE.'''
1232 IGNORECASE and MULTILINE.'''
1232 if _re2 is None:
1233 if _re2 is None:
1233 self._checkre2()
1234 self._checkre2()
1234 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1235 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1235 if flags & remod.IGNORECASE:
1236 if flags & remod.IGNORECASE:
1236 pat = '(?i)' + pat
1237 pat = '(?i)' + pat
1237 if flags & remod.MULTILINE:
1238 if flags & remod.MULTILINE:
1238 pat = '(?m)' + pat
1239 pat = '(?m)' + pat
1239 try:
1240 try:
1240 return re2.compile(pat)
1241 return re2.compile(pat)
1241 except re2.error:
1242 except re2.error:
1242 pass
1243 pass
1243 return remod.compile(pat, flags)
1244 return remod.compile(pat, flags)
1244
1245
1245 @propertycache
1246 @propertycache
1246 def escape(self):
1247 def escape(self):
1247 '''Return the version of escape corresponding to self.compile.
1248 '''Return the version of escape corresponding to self.compile.
1248
1249
1249 This is imperfect because whether re2 or re is used for a particular
1250 This is imperfect because whether re2 or re is used for a particular
1250 function depends on the flags, etc, but it's the best we can do.
1251 function depends on the flags, etc, but it's the best we can do.
1251 '''
1252 '''
1252 global _re2
1253 global _re2
1253 if _re2 is None:
1254 if _re2 is None:
1254 self._checkre2()
1255 self._checkre2()
1255 if _re2:
1256 if _re2:
1256 return re2.escape
1257 return re2.escape
1257 else:
1258 else:
1258 return remod.escape
1259 return remod.escape
1259
1260
1260 re = _re()
1261 re = _re()
1261
1262
1262 _fspathcache = {}
1263 _fspathcache = {}
1263 def fspath(name, root):
1264 def fspath(name, root):
1264 '''Get name in the case stored in the filesystem
1265 '''Get name in the case stored in the filesystem
1265
1266
1266 The name should be relative to root, and be normcase-ed for efficiency.
1267 The name should be relative to root, and be normcase-ed for efficiency.
1267
1268
1268 Note that this function is unnecessary, and should not be
1269 Note that this function is unnecessary, and should not be
1269 called, for case-sensitive filesystems (simply because it's expensive).
1270 called, for case-sensitive filesystems (simply because it's expensive).
1270
1271
1271 The root should be normcase-ed, too.
1272 The root should be normcase-ed, too.
1272 '''
1273 '''
1273 def _makefspathcacheentry(dir):
1274 def _makefspathcacheentry(dir):
1274 return dict((normcase(n), n) for n in os.listdir(dir))
1275 return dict((normcase(n), n) for n in os.listdir(dir))
1275
1276
1276 seps = os.sep
1277 seps = os.sep
1277 if os.altsep:
1278 if os.altsep:
1278 seps = seps + os.altsep
1279 seps = seps + os.altsep
1279 # Protect backslashes. This gets silly very quickly.
1280 # Protect backslashes. This gets silly very quickly.
1280 seps.replace('\\','\\\\')
1281 seps.replace('\\','\\\\')
1281 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1282 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1282 dir = os.path.normpath(root)
1283 dir = os.path.normpath(root)
1283 result = []
1284 result = []
1284 for part, sep in pattern.findall(name):
1285 for part, sep in pattern.findall(name):
1285 if sep:
1286 if sep:
1286 result.append(sep)
1287 result.append(sep)
1287 continue
1288 continue
1288
1289
1289 if dir not in _fspathcache:
1290 if dir not in _fspathcache:
1290 _fspathcache[dir] = _makefspathcacheentry(dir)
1291 _fspathcache[dir] = _makefspathcacheentry(dir)
1291 contents = _fspathcache[dir]
1292 contents = _fspathcache[dir]
1292
1293
1293 found = contents.get(part)
1294 found = contents.get(part)
1294 if not found:
1295 if not found:
1295 # retry "once per directory" per "dirstate.walk" which
1296 # retry "once per directory" per "dirstate.walk" which
1296 # may take place for each patches of "hg qpush", for example
1297 # may take place for each patches of "hg qpush", for example
1297 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1298 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1298 found = contents.get(part)
1299 found = contents.get(part)
1299
1300
1300 result.append(found or part)
1301 result.append(found or part)
1301 dir = os.path.join(dir, part)
1302 dir = os.path.join(dir, part)
1302
1303
1303 return ''.join(result)
1304 return ''.join(result)
1304
1305
1305 def checknlink(testfile):
1306 def checknlink(testfile):
1306 '''check whether hardlink count reporting works properly'''
1307 '''check whether hardlink count reporting works properly'''
1307
1308
1308 # testfile may be open, so we need a separate file for checking to
1309 # testfile may be open, so we need a separate file for checking to
1309 # work around issue2543 (or testfile may get lost on Samba shares)
1310 # work around issue2543 (or testfile may get lost on Samba shares)
1310 f1 = testfile + ".hgtmp1"
1311 f1 = testfile + ".hgtmp1"
1311 if os.path.lexists(f1):
1312 if os.path.lexists(f1):
1312 return False
1313 return False
1313 try:
1314 try:
1314 posixfile(f1, 'w').close()
1315 posixfile(f1, 'w').close()
1315 except IOError:
1316 except IOError:
1316 return False
1317 return False
1317
1318
1318 f2 = testfile + ".hgtmp2"
1319 f2 = testfile + ".hgtmp2"
1319 fd = None
1320 fd = None
1320 try:
1321 try:
1321 oslink(f1, f2)
1322 oslink(f1, f2)
1322 # nlinks() may behave differently for files on Windows shares if
1323 # nlinks() may behave differently for files on Windows shares if
1323 # the file is open.
1324 # the file is open.
1324 fd = posixfile(f2)
1325 fd = posixfile(f2)
1325 return nlinks(f2) > 1
1326 return nlinks(f2) > 1
1326 except OSError:
1327 except OSError:
1327 return False
1328 return False
1328 finally:
1329 finally:
1329 if fd is not None:
1330 if fd is not None:
1330 fd.close()
1331 fd.close()
1331 for f in (f1, f2):
1332 for f in (f1, f2):
1332 try:
1333 try:
1333 os.unlink(f)
1334 os.unlink(f)
1334 except OSError:
1335 except OSError:
1335 pass
1336 pass
1336
1337
1337 def endswithsep(path):
1338 def endswithsep(path):
1338 '''Check path ends with os.sep or os.altsep.'''
1339 '''Check path ends with os.sep or os.altsep.'''
1339 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1340 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1340
1341
1341 def splitpath(path):
1342 def splitpath(path):
1342 '''Split path by os.sep.
1343 '''Split path by os.sep.
1343 Note that this function does not use os.altsep because this is
1344 Note that this function does not use os.altsep because this is
1344 an alternative of simple "xxx.split(os.sep)".
1345 an alternative of simple "xxx.split(os.sep)".
1345 It is recommended to use os.path.normpath() before using this
1346 It is recommended to use os.path.normpath() before using this
1346 function if need.'''
1347 function if need.'''
1347 return path.split(os.sep)
1348 return path.split(os.sep)
1348
1349
1349 def gui():
1350 def gui():
1350 '''Are we running in a GUI?'''
1351 '''Are we running in a GUI?'''
1351 if sys.platform == 'darwin':
1352 if sys.platform == 'darwin':
1352 if 'SSH_CONNECTION' in os.environ:
1353 if 'SSH_CONNECTION' in os.environ:
1353 # handle SSH access to a box where the user is logged in
1354 # handle SSH access to a box where the user is logged in
1354 return False
1355 return False
1355 elif getattr(osutil, 'isgui', None):
1356 elif getattr(osutil, 'isgui', None):
1356 # check if a CoreGraphics session is available
1357 # check if a CoreGraphics session is available
1357 return osutil.isgui()
1358 return osutil.isgui()
1358 else:
1359 else:
1359 # pure build; use a safe default
1360 # pure build; use a safe default
1360 return True
1361 return True
1361 else:
1362 else:
1362 return os.name == "nt" or os.environ.get("DISPLAY")
1363 return os.name == "nt" or os.environ.get("DISPLAY")
1363
1364
1364 def mktempcopy(name, emptyok=False, createmode=None):
1365 def mktempcopy(name, emptyok=False, createmode=None):
1365 """Create a temporary file with the same contents from name
1366 """Create a temporary file with the same contents from name
1366
1367
1367 The permission bits are copied from the original file.
1368 The permission bits are copied from the original file.
1368
1369
1369 If the temporary file is going to be truncated immediately, you
1370 If the temporary file is going to be truncated immediately, you
1370 can use emptyok=True as an optimization.
1371 can use emptyok=True as an optimization.
1371
1372
1372 Returns the name of the temporary file.
1373 Returns the name of the temporary file.
1373 """
1374 """
1374 d, fn = os.path.split(name)
1375 d, fn = os.path.split(name)
1375 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1376 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1376 os.close(fd)
1377 os.close(fd)
1377 # Temporary files are created with mode 0600, which is usually not
1378 # Temporary files are created with mode 0600, which is usually not
1378 # what we want. If the original file already exists, just copy
1379 # what we want. If the original file already exists, just copy
1379 # its mode. Otherwise, manually obey umask.
1380 # its mode. Otherwise, manually obey umask.
1380 copymode(name, temp, createmode)
1381 copymode(name, temp, createmode)
1381 if emptyok:
1382 if emptyok:
1382 return temp
1383 return temp
1383 try:
1384 try:
1384 try:
1385 try:
1385 ifp = posixfile(name, "rb")
1386 ifp = posixfile(name, "rb")
1386 except IOError as inst:
1387 except IOError as inst:
1387 if inst.errno == errno.ENOENT:
1388 if inst.errno == errno.ENOENT:
1388 return temp
1389 return temp
1389 if not getattr(inst, 'filename', None):
1390 if not getattr(inst, 'filename', None):
1390 inst.filename = name
1391 inst.filename = name
1391 raise
1392 raise
1392 ofp = posixfile(temp, "wb")
1393 ofp = posixfile(temp, "wb")
1393 for chunk in filechunkiter(ifp):
1394 for chunk in filechunkiter(ifp):
1394 ofp.write(chunk)
1395 ofp.write(chunk)
1395 ifp.close()
1396 ifp.close()
1396 ofp.close()
1397 ofp.close()
1397 except: # re-raises
1398 except: # re-raises
1398 try: os.unlink(temp)
1399 try: os.unlink(temp)
1399 except OSError: pass
1400 except OSError: pass
1400 raise
1401 raise
1401 return temp
1402 return temp
1402
1403
1403 class filestat(object):
1404 class filestat(object):
1404 """help to exactly detect change of a file
1405 """help to exactly detect change of a file
1405
1406
1406 'stat' attribute is result of 'os.stat()' if specified 'path'
1407 'stat' attribute is result of 'os.stat()' if specified 'path'
1407 exists. Otherwise, it is None. This can avoid preparative
1408 exists. Otherwise, it is None. This can avoid preparative
1408 'exists()' examination on client side of this class.
1409 'exists()' examination on client side of this class.
1409 """
1410 """
1410 def __init__(self, path):
1411 def __init__(self, path):
1411 try:
1412 try:
1412 self.stat = os.stat(path)
1413 self.stat = os.stat(path)
1413 except OSError as err:
1414 except OSError as err:
1414 if err.errno != errno.ENOENT:
1415 if err.errno != errno.ENOENT:
1415 raise
1416 raise
1416 self.stat = None
1417 self.stat = None
1417
1418
1418 __hash__ = object.__hash__
1419 __hash__ = object.__hash__
1419
1420
1420 def __eq__(self, old):
1421 def __eq__(self, old):
1421 try:
1422 try:
1422 # if ambiguity between stat of new and old file is
1423 # if ambiguity between stat of new and old file is
1423 # avoided, comparision of size, ctime and mtime is enough
1424 # avoided, comparision of size, ctime and mtime is enough
1424 # to exactly detect change of a file regardless of platform
1425 # to exactly detect change of a file regardless of platform
1425 return (self.stat.st_size == old.stat.st_size and
1426 return (self.stat.st_size == old.stat.st_size and
1426 self.stat.st_ctime == old.stat.st_ctime and
1427 self.stat.st_ctime == old.stat.st_ctime and
1427 self.stat.st_mtime == old.stat.st_mtime)
1428 self.stat.st_mtime == old.stat.st_mtime)
1428 except AttributeError:
1429 except AttributeError:
1429 return False
1430 return False
1430
1431
1431 def isambig(self, old):
1432 def isambig(self, old):
1432 """Examine whether new (= self) stat is ambiguous against old one
1433 """Examine whether new (= self) stat is ambiguous against old one
1433
1434
1434 "S[N]" below means stat of a file at N-th change:
1435 "S[N]" below means stat of a file at N-th change:
1435
1436
1436 - S[n-1].ctime < S[n].ctime: can detect change of a file
1437 - S[n-1].ctime < S[n].ctime: can detect change of a file
1437 - S[n-1].ctime == S[n].ctime
1438 - S[n-1].ctime == S[n].ctime
1438 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1439 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1439 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1440 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1440 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1441 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1441 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1442 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1442
1443
1443 Case (*2) above means that a file was changed twice or more at
1444 Case (*2) above means that a file was changed twice or more at
1444 same time in sec (= S[n-1].ctime), and comparison of timestamp
1445 same time in sec (= S[n-1].ctime), and comparison of timestamp
1445 is ambiguous.
1446 is ambiguous.
1446
1447
1447 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1448 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1448 timestamp is ambiguous".
1449 timestamp is ambiguous".
1449
1450
1450 But advancing mtime only in case (*2) doesn't work as
1451 But advancing mtime only in case (*2) doesn't work as
1451 expected, because naturally advanced S[n].mtime in case (*1)
1452 expected, because naturally advanced S[n].mtime in case (*1)
1452 might be equal to manually advanced S[n-1 or earlier].mtime.
1453 might be equal to manually advanced S[n-1 or earlier].mtime.
1453
1454
1454 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1455 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1455 treated as ambiguous regardless of mtime, to avoid overlooking
1456 treated as ambiguous regardless of mtime, to avoid overlooking
1456 by confliction between such mtime.
1457 by confliction between such mtime.
1457
1458
1458 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1459 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1459 S[n].mtime", even if size of a file isn't changed.
1460 S[n].mtime", even if size of a file isn't changed.
1460 """
1461 """
1461 try:
1462 try:
1462 return (self.stat.st_ctime == old.stat.st_ctime)
1463 return (self.stat.st_ctime == old.stat.st_ctime)
1463 except AttributeError:
1464 except AttributeError:
1464 return False
1465 return False
1465
1466
1466 def __ne__(self, other):
1467 def __ne__(self, other):
1467 return not self == other
1468 return not self == other
1468
1469
1469 class atomictempfile(object):
1470 class atomictempfile(object):
1470 '''writable file object that atomically updates a file
1471 '''writable file object that atomically updates a file
1471
1472
1472 All writes will go to a temporary copy of the original file. Call
1473 All writes will go to a temporary copy of the original file. Call
1473 close() when you are done writing, and atomictempfile will rename
1474 close() when you are done writing, and atomictempfile will rename
1474 the temporary copy to the original name, making the changes
1475 the temporary copy to the original name, making the changes
1475 visible. If the object is destroyed without being closed, all your
1476 visible. If the object is destroyed without being closed, all your
1476 writes are discarded.
1477 writes are discarded.
1477
1478
1478 checkambig argument of constructor is used with filestat, and is
1479 checkambig argument of constructor is used with filestat, and is
1479 useful only if target file is guarded by any lock (e.g. repo.lock
1480 useful only if target file is guarded by any lock (e.g. repo.lock
1480 or repo.wlock).
1481 or repo.wlock).
1481 '''
1482 '''
1482 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1483 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1483 self.__name = name # permanent name
1484 self.__name = name # permanent name
1484 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1485 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1485 createmode=createmode)
1486 createmode=createmode)
1486 self._fp = posixfile(self._tempname, mode)
1487 self._fp = posixfile(self._tempname, mode)
1487 self._checkambig = checkambig
1488 self._checkambig = checkambig
1488
1489
1489 # delegated methods
1490 # delegated methods
1490 self.read = self._fp.read
1491 self.read = self._fp.read
1491 self.write = self._fp.write
1492 self.write = self._fp.write
1492 self.seek = self._fp.seek
1493 self.seek = self._fp.seek
1493 self.tell = self._fp.tell
1494 self.tell = self._fp.tell
1494 self.fileno = self._fp.fileno
1495 self.fileno = self._fp.fileno
1495
1496
1496 def close(self):
1497 def close(self):
1497 if not self._fp.closed:
1498 if not self._fp.closed:
1498 self._fp.close()
1499 self._fp.close()
1499 filename = localpath(self.__name)
1500 filename = localpath(self.__name)
1500 oldstat = self._checkambig and filestat(filename)
1501 oldstat = self._checkambig and filestat(filename)
1501 if oldstat and oldstat.stat:
1502 if oldstat and oldstat.stat:
1502 rename(self._tempname, filename)
1503 rename(self._tempname, filename)
1503 newstat = filestat(filename)
1504 newstat = filestat(filename)
1504 if newstat.isambig(oldstat):
1505 if newstat.isambig(oldstat):
1505 # stat of changed file is ambiguous to original one
1506 # stat of changed file is ambiguous to original one
1506 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1507 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1507 os.utime(filename, (advanced, advanced))
1508 os.utime(filename, (advanced, advanced))
1508 else:
1509 else:
1509 rename(self._tempname, filename)
1510 rename(self._tempname, filename)
1510
1511
1511 def discard(self):
1512 def discard(self):
1512 if not self._fp.closed:
1513 if not self._fp.closed:
1513 try:
1514 try:
1514 os.unlink(self._tempname)
1515 os.unlink(self._tempname)
1515 except OSError:
1516 except OSError:
1516 pass
1517 pass
1517 self._fp.close()
1518 self._fp.close()
1518
1519
1519 def __del__(self):
1520 def __del__(self):
1520 if safehasattr(self, '_fp'): # constructor actually did something
1521 if safehasattr(self, '_fp'): # constructor actually did something
1521 self.discard()
1522 self.discard()
1522
1523
1523 def __enter__(self):
1524 def __enter__(self):
1524 return self
1525 return self
1525
1526
1526 def __exit__(self, exctype, excvalue, traceback):
1527 def __exit__(self, exctype, excvalue, traceback):
1527 if exctype is not None:
1528 if exctype is not None:
1528 self.discard()
1529 self.discard()
1529 else:
1530 else:
1530 self.close()
1531 self.close()
1531
1532
1532 def makedirs(name, mode=None, notindexed=False):
1533 def makedirs(name, mode=None, notindexed=False):
1533 """recursive directory creation with parent mode inheritance
1534 """recursive directory creation with parent mode inheritance
1534
1535
1535 Newly created directories are marked as "not to be indexed by
1536 Newly created directories are marked as "not to be indexed by
1536 the content indexing service", if ``notindexed`` is specified
1537 the content indexing service", if ``notindexed`` is specified
1537 for "write" mode access.
1538 for "write" mode access.
1538 """
1539 """
1539 try:
1540 try:
1540 makedir(name, notindexed)
1541 makedir(name, notindexed)
1541 except OSError as err:
1542 except OSError as err:
1542 if err.errno == errno.EEXIST:
1543 if err.errno == errno.EEXIST:
1543 return
1544 return
1544 if err.errno != errno.ENOENT or not name:
1545 if err.errno != errno.ENOENT or not name:
1545 raise
1546 raise
1546 parent = os.path.dirname(os.path.abspath(name))
1547 parent = os.path.dirname(os.path.abspath(name))
1547 if parent == name:
1548 if parent == name:
1548 raise
1549 raise
1549 makedirs(parent, mode, notindexed)
1550 makedirs(parent, mode, notindexed)
1550 try:
1551 try:
1551 makedir(name, notindexed)
1552 makedir(name, notindexed)
1552 except OSError as err:
1553 except OSError as err:
1553 # Catch EEXIST to handle races
1554 # Catch EEXIST to handle races
1554 if err.errno == errno.EEXIST:
1555 if err.errno == errno.EEXIST:
1555 return
1556 return
1556 raise
1557 raise
1557 if mode is not None:
1558 if mode is not None:
1558 os.chmod(name, mode)
1559 os.chmod(name, mode)
1559
1560
1560 def readfile(path):
1561 def readfile(path):
1561 with open(path, 'rb') as fp:
1562 with open(path, 'rb') as fp:
1562 return fp.read()
1563 return fp.read()
1563
1564
1564 def writefile(path, text):
1565 def writefile(path, text):
1565 with open(path, 'wb') as fp:
1566 with open(path, 'wb') as fp:
1566 fp.write(text)
1567 fp.write(text)
1567
1568
1568 def appendfile(path, text):
1569 def appendfile(path, text):
1569 with open(path, 'ab') as fp:
1570 with open(path, 'ab') as fp:
1570 fp.write(text)
1571 fp.write(text)
1571
1572
1572 class chunkbuffer(object):
1573 class chunkbuffer(object):
1573 """Allow arbitrary sized chunks of data to be efficiently read from an
1574 """Allow arbitrary sized chunks of data to be efficiently read from an
1574 iterator over chunks of arbitrary size."""
1575 iterator over chunks of arbitrary size."""
1575
1576
1576 def __init__(self, in_iter):
1577 def __init__(self, in_iter):
1577 """in_iter is the iterator that's iterating over the input chunks.
1578 """in_iter is the iterator that's iterating over the input chunks.
1578 targetsize is how big a buffer to try to maintain."""
1579 targetsize is how big a buffer to try to maintain."""
1579 def splitbig(chunks):
1580 def splitbig(chunks):
1580 for chunk in chunks:
1581 for chunk in chunks:
1581 if len(chunk) > 2**20:
1582 if len(chunk) > 2**20:
1582 pos = 0
1583 pos = 0
1583 while pos < len(chunk):
1584 while pos < len(chunk):
1584 end = pos + 2 ** 18
1585 end = pos + 2 ** 18
1585 yield chunk[pos:end]
1586 yield chunk[pos:end]
1586 pos = end
1587 pos = end
1587 else:
1588 else:
1588 yield chunk
1589 yield chunk
1589 self.iter = splitbig(in_iter)
1590 self.iter = splitbig(in_iter)
1590 self._queue = collections.deque()
1591 self._queue = collections.deque()
1591 self._chunkoffset = 0
1592 self._chunkoffset = 0
1592
1593
1593 def read(self, l=None):
1594 def read(self, l=None):
1594 """Read L bytes of data from the iterator of chunks of data.
1595 """Read L bytes of data from the iterator of chunks of data.
1595 Returns less than L bytes if the iterator runs dry.
1596 Returns less than L bytes if the iterator runs dry.
1596
1597
1597 If size parameter is omitted, read everything"""
1598 If size parameter is omitted, read everything"""
1598 if l is None:
1599 if l is None:
1599 return ''.join(self.iter)
1600 return ''.join(self.iter)
1600
1601
1601 left = l
1602 left = l
1602 buf = []
1603 buf = []
1603 queue = self._queue
1604 queue = self._queue
1604 while left > 0:
1605 while left > 0:
1605 # refill the queue
1606 # refill the queue
1606 if not queue:
1607 if not queue:
1607 target = 2**18
1608 target = 2**18
1608 for chunk in self.iter:
1609 for chunk in self.iter:
1609 queue.append(chunk)
1610 queue.append(chunk)
1610 target -= len(chunk)
1611 target -= len(chunk)
1611 if target <= 0:
1612 if target <= 0:
1612 break
1613 break
1613 if not queue:
1614 if not queue:
1614 break
1615 break
1615
1616
1616 # The easy way to do this would be to queue.popleft(), modify the
1617 # The easy way to do this would be to queue.popleft(), modify the
1617 # chunk (if necessary), then queue.appendleft(). However, for cases
1618 # chunk (if necessary), then queue.appendleft(). However, for cases
1618 # where we read partial chunk content, this incurs 2 dequeue
1619 # where we read partial chunk content, this incurs 2 dequeue
1619 # mutations and creates a new str for the remaining chunk in the
1620 # mutations and creates a new str for the remaining chunk in the
1620 # queue. Our code below avoids this overhead.
1621 # queue. Our code below avoids this overhead.
1621
1622
1622 chunk = queue[0]
1623 chunk = queue[0]
1623 chunkl = len(chunk)
1624 chunkl = len(chunk)
1624 offset = self._chunkoffset
1625 offset = self._chunkoffset
1625
1626
1626 # Use full chunk.
1627 # Use full chunk.
1627 if offset == 0 and left >= chunkl:
1628 if offset == 0 and left >= chunkl:
1628 left -= chunkl
1629 left -= chunkl
1629 queue.popleft()
1630 queue.popleft()
1630 buf.append(chunk)
1631 buf.append(chunk)
1631 # self._chunkoffset remains at 0.
1632 # self._chunkoffset remains at 0.
1632 continue
1633 continue
1633
1634
1634 chunkremaining = chunkl - offset
1635 chunkremaining = chunkl - offset
1635
1636
1636 # Use all of unconsumed part of chunk.
1637 # Use all of unconsumed part of chunk.
1637 if left >= chunkremaining:
1638 if left >= chunkremaining:
1638 left -= chunkremaining
1639 left -= chunkremaining
1639 queue.popleft()
1640 queue.popleft()
1640 # offset == 0 is enabled by block above, so this won't merely
1641 # offset == 0 is enabled by block above, so this won't merely
1641 # copy via ``chunk[0:]``.
1642 # copy via ``chunk[0:]``.
1642 buf.append(chunk[offset:])
1643 buf.append(chunk[offset:])
1643 self._chunkoffset = 0
1644 self._chunkoffset = 0
1644
1645
1645 # Partial chunk needed.
1646 # Partial chunk needed.
1646 else:
1647 else:
1647 buf.append(chunk[offset:offset + left])
1648 buf.append(chunk[offset:offset + left])
1648 self._chunkoffset += left
1649 self._chunkoffset += left
1649 left -= chunkremaining
1650 left -= chunkremaining
1650
1651
1651 return ''.join(buf)
1652 return ''.join(buf)
1652
1653
1653 def filechunkiter(f, size=65536, limit=None):
1654 def filechunkiter(f, size=65536, limit=None):
1654 """Create a generator that produces the data in the file size
1655 """Create a generator that produces the data in the file size
1655 (default 65536) bytes at a time, up to optional limit (default is
1656 (default 65536) bytes at a time, up to optional limit (default is
1656 to read all data). Chunks may be less than size bytes if the
1657 to read all data). Chunks may be less than size bytes if the
1657 chunk is the last chunk in the file, or the file is a socket or
1658 chunk is the last chunk in the file, or the file is a socket or
1658 some other type of file that sometimes reads less data than is
1659 some other type of file that sometimes reads less data than is
1659 requested."""
1660 requested."""
1660 assert size >= 0
1661 assert size >= 0
1661 assert limit is None or limit >= 0
1662 assert limit is None or limit >= 0
1662 while True:
1663 while True:
1663 if limit is None:
1664 if limit is None:
1664 nbytes = size
1665 nbytes = size
1665 else:
1666 else:
1666 nbytes = min(limit, size)
1667 nbytes = min(limit, size)
1667 s = nbytes and f.read(nbytes)
1668 s = nbytes and f.read(nbytes)
1668 if not s:
1669 if not s:
1669 break
1670 break
1670 if limit:
1671 if limit:
1671 limit -= len(s)
1672 limit -= len(s)
1672 yield s
1673 yield s
1673
1674
1674 def makedate(timestamp=None):
1675 def makedate(timestamp=None):
1675 '''Return a unix timestamp (or the current time) as a (unixtime,
1676 '''Return a unix timestamp (or the current time) as a (unixtime,
1676 offset) tuple based off the local timezone.'''
1677 offset) tuple based off the local timezone.'''
1677 if timestamp is None:
1678 if timestamp is None:
1678 timestamp = time.time()
1679 timestamp = time.time()
1679 if timestamp < 0:
1680 if timestamp < 0:
1680 hint = _("check your clock")
1681 hint = _("check your clock")
1681 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1682 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1682 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1683 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1683 datetime.datetime.fromtimestamp(timestamp))
1684 datetime.datetime.fromtimestamp(timestamp))
1684 tz = delta.days * 86400 + delta.seconds
1685 tz = delta.days * 86400 + delta.seconds
1685 return timestamp, tz
1686 return timestamp, tz
1686
1687
1687 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1688 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1688 """represent a (unixtime, offset) tuple as a localized time.
1689 """represent a (unixtime, offset) tuple as a localized time.
1689 unixtime is seconds since the epoch, and offset is the time zone's
1690 unixtime is seconds since the epoch, and offset is the time zone's
1690 number of seconds away from UTC.
1691 number of seconds away from UTC.
1691
1692
1692 >>> datestr((0, 0))
1693 >>> datestr((0, 0))
1693 'Thu Jan 01 00:00:00 1970 +0000'
1694 'Thu Jan 01 00:00:00 1970 +0000'
1694 >>> datestr((42, 0))
1695 >>> datestr((42, 0))
1695 'Thu Jan 01 00:00:42 1970 +0000'
1696 'Thu Jan 01 00:00:42 1970 +0000'
1696 >>> datestr((-42, 0))
1697 >>> datestr((-42, 0))
1697 'Wed Dec 31 23:59:18 1969 +0000'
1698 'Wed Dec 31 23:59:18 1969 +0000'
1698 >>> datestr((0x7fffffff, 0))
1699 >>> datestr((0x7fffffff, 0))
1699 'Tue Jan 19 03:14:07 2038 +0000'
1700 'Tue Jan 19 03:14:07 2038 +0000'
1700 >>> datestr((-0x80000000, 0))
1701 >>> datestr((-0x80000000, 0))
1701 'Fri Dec 13 20:45:52 1901 +0000'
1702 'Fri Dec 13 20:45:52 1901 +0000'
1702 """
1703 """
1703 t, tz = date or makedate()
1704 t, tz = date or makedate()
1704 if "%1" in format or "%2" in format or "%z" in format:
1705 if "%1" in format or "%2" in format or "%z" in format:
1705 sign = (tz > 0) and "-" or "+"
1706 sign = (tz > 0) and "-" or "+"
1706 minutes = abs(tz) // 60
1707 minutes = abs(tz) // 60
1707 q, r = divmod(minutes, 60)
1708 q, r = divmod(minutes, 60)
1708 format = format.replace("%z", "%1%2")
1709 format = format.replace("%z", "%1%2")
1709 format = format.replace("%1", "%c%02d" % (sign, q))
1710 format = format.replace("%1", "%c%02d" % (sign, q))
1710 format = format.replace("%2", "%02d" % r)
1711 format = format.replace("%2", "%02d" % r)
1711 d = t - tz
1712 d = t - tz
1712 if d > 0x7fffffff:
1713 if d > 0x7fffffff:
1713 d = 0x7fffffff
1714 d = 0x7fffffff
1714 elif d < -0x80000000:
1715 elif d < -0x80000000:
1715 d = -0x80000000
1716 d = -0x80000000
1716 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1717 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1717 # because they use the gmtime() system call which is buggy on Windows
1718 # because they use the gmtime() system call which is buggy on Windows
1718 # for negative values.
1719 # for negative values.
1719 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1720 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1720 s = t.strftime(format)
1721 s = t.strftime(format)
1721 return s
1722 return s
1722
1723
1723 def shortdate(date=None):
1724 def shortdate(date=None):
1724 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1725 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1725 return datestr(date, format='%Y-%m-%d')
1726 return datestr(date, format='%Y-%m-%d')
1726
1727
1727 def parsetimezone(tz):
1728 def parsetimezone(tz):
1728 """parse a timezone string and return an offset integer"""
1729 """parse a timezone string and return an offset integer"""
1729 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1730 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1730 sign = (tz[0] == "+") and 1 or -1
1731 sign = (tz[0] == "+") and 1 or -1
1731 hours = int(tz[1:3])
1732 hours = int(tz[1:3])
1732 minutes = int(tz[3:5])
1733 minutes = int(tz[3:5])
1733 return -sign * (hours * 60 + minutes) * 60
1734 return -sign * (hours * 60 + minutes) * 60
1734 if tz == "GMT" or tz == "UTC":
1735 if tz == "GMT" or tz == "UTC":
1735 return 0
1736 return 0
1736 return None
1737 return None
1737
1738
1738 def strdate(string, format, defaults=[]):
1739 def strdate(string, format, defaults=[]):
1739 """parse a localized time string and return a (unixtime, offset) tuple.
1740 """parse a localized time string and return a (unixtime, offset) tuple.
1740 if the string cannot be parsed, ValueError is raised."""
1741 if the string cannot be parsed, ValueError is raised."""
1741 # NOTE: unixtime = localunixtime + offset
1742 # NOTE: unixtime = localunixtime + offset
1742 offset, date = parsetimezone(string.split()[-1]), string
1743 offset, date = parsetimezone(string.split()[-1]), string
1743 if offset is not None:
1744 if offset is not None:
1744 date = " ".join(string.split()[:-1])
1745 date = " ".join(string.split()[:-1])
1745
1746
1746 # add missing elements from defaults
1747 # add missing elements from defaults
1747 usenow = False # default to using biased defaults
1748 usenow = False # default to using biased defaults
1748 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1749 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1749 found = [True for p in part if ("%"+p) in format]
1750 found = [True for p in part if ("%"+p) in format]
1750 if not found:
1751 if not found:
1751 date += "@" + defaults[part][usenow]
1752 date += "@" + defaults[part][usenow]
1752 format += "@%" + part[0]
1753 format += "@%" + part[0]
1753 else:
1754 else:
1754 # We've found a specific time element, less specific time
1755 # We've found a specific time element, less specific time
1755 # elements are relative to today
1756 # elements are relative to today
1756 usenow = True
1757 usenow = True
1757
1758
1758 timetuple = time.strptime(date, format)
1759 timetuple = time.strptime(date, format)
1759 localunixtime = int(calendar.timegm(timetuple))
1760 localunixtime = int(calendar.timegm(timetuple))
1760 if offset is None:
1761 if offset is None:
1761 # local timezone
1762 # local timezone
1762 unixtime = int(time.mktime(timetuple))
1763 unixtime = int(time.mktime(timetuple))
1763 offset = unixtime - localunixtime
1764 offset = unixtime - localunixtime
1764 else:
1765 else:
1765 unixtime = localunixtime + offset
1766 unixtime = localunixtime + offset
1766 return unixtime, offset
1767 return unixtime, offset
1767
1768
1768 def parsedate(date, formats=None, bias=None):
1769 def parsedate(date, formats=None, bias=None):
1769 """parse a localized date/time and return a (unixtime, offset) tuple.
1770 """parse a localized date/time and return a (unixtime, offset) tuple.
1770
1771
1771 The date may be a "unixtime offset" string or in one of the specified
1772 The date may be a "unixtime offset" string or in one of the specified
1772 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1773 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1773
1774
1774 >>> parsedate(' today ') == parsedate(\
1775 >>> parsedate(' today ') == parsedate(\
1775 datetime.date.today().strftime('%b %d'))
1776 datetime.date.today().strftime('%b %d'))
1776 True
1777 True
1777 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1778 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1778 datetime.timedelta(days=1)\
1779 datetime.timedelta(days=1)\
1779 ).strftime('%b %d'))
1780 ).strftime('%b %d'))
1780 True
1781 True
1781 >>> now, tz = makedate()
1782 >>> now, tz = makedate()
1782 >>> strnow, strtz = parsedate('now')
1783 >>> strnow, strtz = parsedate('now')
1783 >>> (strnow - now) < 1
1784 >>> (strnow - now) < 1
1784 True
1785 True
1785 >>> tz == strtz
1786 >>> tz == strtz
1786 True
1787 True
1787 """
1788 """
1788 if bias is None:
1789 if bias is None:
1789 bias = {}
1790 bias = {}
1790 if not date:
1791 if not date:
1791 return 0, 0
1792 return 0, 0
1792 if isinstance(date, tuple) and len(date) == 2:
1793 if isinstance(date, tuple) and len(date) == 2:
1793 return date
1794 return date
1794 if not formats:
1795 if not formats:
1795 formats = defaultdateformats
1796 formats = defaultdateformats
1796 date = date.strip()
1797 date = date.strip()
1797
1798
1798 if date == 'now' or date == _('now'):
1799 if date == 'now' or date == _('now'):
1799 return makedate()
1800 return makedate()
1800 if date == 'today' or date == _('today'):
1801 if date == 'today' or date == _('today'):
1801 date = datetime.date.today().strftime('%b %d')
1802 date = datetime.date.today().strftime('%b %d')
1802 elif date == 'yesterday' or date == _('yesterday'):
1803 elif date == 'yesterday' or date == _('yesterday'):
1803 date = (datetime.date.today() -
1804 date = (datetime.date.today() -
1804 datetime.timedelta(days=1)).strftime('%b %d')
1805 datetime.timedelta(days=1)).strftime('%b %d')
1805
1806
1806 try:
1807 try:
1807 when, offset = map(int, date.split(' '))
1808 when, offset = map(int, date.split(' '))
1808 except ValueError:
1809 except ValueError:
1809 # fill out defaults
1810 # fill out defaults
1810 now = makedate()
1811 now = makedate()
1811 defaults = {}
1812 defaults = {}
1812 for part in ("d", "mb", "yY", "HI", "M", "S"):
1813 for part in ("d", "mb", "yY", "HI", "M", "S"):
1813 # this piece is for rounding the specific end of unknowns
1814 # this piece is for rounding the specific end of unknowns
1814 b = bias.get(part)
1815 b = bias.get(part)
1815 if b is None:
1816 if b is None:
1816 if part[0] in "HMS":
1817 if part[0] in "HMS":
1817 b = "00"
1818 b = "00"
1818 else:
1819 else:
1819 b = "0"
1820 b = "0"
1820
1821
1821 # this piece is for matching the generic end to today's date
1822 # this piece is for matching the generic end to today's date
1822 n = datestr(now, "%" + part[0])
1823 n = datestr(now, "%" + part[0])
1823
1824
1824 defaults[part] = (b, n)
1825 defaults[part] = (b, n)
1825
1826
1826 for format in formats:
1827 for format in formats:
1827 try:
1828 try:
1828 when, offset = strdate(date, format, defaults)
1829 when, offset = strdate(date, format, defaults)
1829 except (ValueError, OverflowError):
1830 except (ValueError, OverflowError):
1830 pass
1831 pass
1831 else:
1832 else:
1832 break
1833 break
1833 else:
1834 else:
1834 raise Abort(_('invalid date: %r') % date)
1835 raise Abort(_('invalid date: %r') % date)
1835 # validate explicit (probably user-specified) date and
1836 # validate explicit (probably user-specified) date and
1836 # time zone offset. values must fit in signed 32 bits for
1837 # time zone offset. values must fit in signed 32 bits for
1837 # current 32-bit linux runtimes. timezones go from UTC-12
1838 # current 32-bit linux runtimes. timezones go from UTC-12
1838 # to UTC+14
1839 # to UTC+14
1839 if when < -0x80000000 or when > 0x7fffffff:
1840 if when < -0x80000000 or when > 0x7fffffff:
1840 raise Abort(_('date exceeds 32 bits: %d') % when)
1841 raise Abort(_('date exceeds 32 bits: %d') % when)
1841 if offset < -50400 or offset > 43200:
1842 if offset < -50400 or offset > 43200:
1842 raise Abort(_('impossible time zone offset: %d') % offset)
1843 raise Abort(_('impossible time zone offset: %d') % offset)
1843 return when, offset
1844 return when, offset
1844
1845
1845 def matchdate(date):
1846 def matchdate(date):
1846 """Return a function that matches a given date match specifier
1847 """Return a function that matches a given date match specifier
1847
1848
1848 Formats include:
1849 Formats include:
1849
1850
1850 '{date}' match a given date to the accuracy provided
1851 '{date}' match a given date to the accuracy provided
1851
1852
1852 '<{date}' on or before a given date
1853 '<{date}' on or before a given date
1853
1854
1854 '>{date}' on or after a given date
1855 '>{date}' on or after a given date
1855
1856
1856 >>> p1 = parsedate("10:29:59")
1857 >>> p1 = parsedate("10:29:59")
1857 >>> p2 = parsedate("10:30:00")
1858 >>> p2 = parsedate("10:30:00")
1858 >>> p3 = parsedate("10:30:59")
1859 >>> p3 = parsedate("10:30:59")
1859 >>> p4 = parsedate("10:31:00")
1860 >>> p4 = parsedate("10:31:00")
1860 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1861 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1861 >>> f = matchdate("10:30")
1862 >>> f = matchdate("10:30")
1862 >>> f(p1[0])
1863 >>> f(p1[0])
1863 False
1864 False
1864 >>> f(p2[0])
1865 >>> f(p2[0])
1865 True
1866 True
1866 >>> f(p3[0])
1867 >>> f(p3[0])
1867 True
1868 True
1868 >>> f(p4[0])
1869 >>> f(p4[0])
1869 False
1870 False
1870 >>> f(p5[0])
1871 >>> f(p5[0])
1871 False
1872 False
1872 """
1873 """
1873
1874
1874 def lower(date):
1875 def lower(date):
1875 d = {'mb': "1", 'd': "1"}
1876 d = {'mb': "1", 'd': "1"}
1876 return parsedate(date, extendeddateformats, d)[0]
1877 return parsedate(date, extendeddateformats, d)[0]
1877
1878
1878 def upper(date):
1879 def upper(date):
1879 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1880 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1880 for days in ("31", "30", "29"):
1881 for days in ("31", "30", "29"):
1881 try:
1882 try:
1882 d["d"] = days
1883 d["d"] = days
1883 return parsedate(date, extendeddateformats, d)[0]
1884 return parsedate(date, extendeddateformats, d)[0]
1884 except Abort:
1885 except Abort:
1885 pass
1886 pass
1886 d["d"] = "28"
1887 d["d"] = "28"
1887 return parsedate(date, extendeddateformats, d)[0]
1888 return parsedate(date, extendeddateformats, d)[0]
1888
1889
1889 date = date.strip()
1890 date = date.strip()
1890
1891
1891 if not date:
1892 if not date:
1892 raise Abort(_("dates cannot consist entirely of whitespace"))
1893 raise Abort(_("dates cannot consist entirely of whitespace"))
1893 elif date[0] == "<":
1894 elif date[0] == "<":
1894 if not date[1:]:
1895 if not date[1:]:
1895 raise Abort(_("invalid day spec, use '<DATE'"))
1896 raise Abort(_("invalid day spec, use '<DATE'"))
1896 when = upper(date[1:])
1897 when = upper(date[1:])
1897 return lambda x: x <= when
1898 return lambda x: x <= when
1898 elif date[0] == ">":
1899 elif date[0] == ">":
1899 if not date[1:]:
1900 if not date[1:]:
1900 raise Abort(_("invalid day spec, use '>DATE'"))
1901 raise Abort(_("invalid day spec, use '>DATE'"))
1901 when = lower(date[1:])
1902 when = lower(date[1:])
1902 return lambda x: x >= when
1903 return lambda x: x >= when
1903 elif date[0] == "-":
1904 elif date[0] == "-":
1904 try:
1905 try:
1905 days = int(date[1:])
1906 days = int(date[1:])
1906 except ValueError:
1907 except ValueError:
1907 raise Abort(_("invalid day spec: %s") % date[1:])
1908 raise Abort(_("invalid day spec: %s") % date[1:])
1908 if days < 0:
1909 if days < 0:
1909 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1910 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1910 % date[1:])
1911 % date[1:])
1911 when = makedate()[0] - days * 3600 * 24
1912 when = makedate()[0] - days * 3600 * 24
1912 return lambda x: x >= when
1913 return lambda x: x >= when
1913 elif " to " in date:
1914 elif " to " in date:
1914 a, b = date.split(" to ")
1915 a, b = date.split(" to ")
1915 start, stop = lower(a), upper(b)
1916 start, stop = lower(a), upper(b)
1916 return lambda x: x >= start and x <= stop
1917 return lambda x: x >= start and x <= stop
1917 else:
1918 else:
1918 start, stop = lower(date), upper(date)
1919 start, stop = lower(date), upper(date)
1919 return lambda x: x >= start and x <= stop
1920 return lambda x: x >= start and x <= stop
1920
1921
1921 def stringmatcher(pattern):
1922 def stringmatcher(pattern):
1922 """
1923 """
1923 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1924 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1924 returns the matcher name, pattern, and matcher function.
1925 returns the matcher name, pattern, and matcher function.
1925 missing or unknown prefixes are treated as literal matches.
1926 missing or unknown prefixes are treated as literal matches.
1926
1927
1927 helper for tests:
1928 helper for tests:
1928 >>> def test(pattern, *tests):
1929 >>> def test(pattern, *tests):
1929 ... kind, pattern, matcher = stringmatcher(pattern)
1930 ... kind, pattern, matcher = stringmatcher(pattern)
1930 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1931 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1931
1932
1932 exact matching (no prefix):
1933 exact matching (no prefix):
1933 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1934 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1934 ('literal', 'abcdefg', [False, False, True])
1935 ('literal', 'abcdefg', [False, False, True])
1935
1936
1936 regex matching ('re:' prefix)
1937 regex matching ('re:' prefix)
1937 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1938 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1938 ('re', 'a.+b', [False, False, True])
1939 ('re', 'a.+b', [False, False, True])
1939
1940
1940 force exact matches ('literal:' prefix)
1941 force exact matches ('literal:' prefix)
1941 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1942 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1942 ('literal', 're:foobar', [False, True])
1943 ('literal', 're:foobar', [False, True])
1943
1944
1944 unknown prefixes are ignored and treated as literals
1945 unknown prefixes are ignored and treated as literals
1945 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1946 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1946 ('literal', 'foo:bar', [False, False, True])
1947 ('literal', 'foo:bar', [False, False, True])
1947 """
1948 """
1948 if pattern.startswith('re:'):
1949 if pattern.startswith('re:'):
1949 pattern = pattern[3:]
1950 pattern = pattern[3:]
1950 try:
1951 try:
1951 regex = remod.compile(pattern)
1952 regex = remod.compile(pattern)
1952 except remod.error as e:
1953 except remod.error as e:
1953 raise error.ParseError(_('invalid regular expression: %s')
1954 raise error.ParseError(_('invalid regular expression: %s')
1954 % e)
1955 % e)
1955 return 're', pattern, regex.search
1956 return 're', pattern, regex.search
1956 elif pattern.startswith('literal:'):
1957 elif pattern.startswith('literal:'):
1957 pattern = pattern[8:]
1958 pattern = pattern[8:]
1958 return 'literal', pattern, pattern.__eq__
1959 return 'literal', pattern, pattern.__eq__
1959
1960
1960 def shortuser(user):
1961 def shortuser(user):
1961 """Return a short representation of a user name or email address."""
1962 """Return a short representation of a user name or email address."""
1962 f = user.find('@')
1963 f = user.find('@')
1963 if f >= 0:
1964 if f >= 0:
1964 user = user[:f]
1965 user = user[:f]
1965 f = user.find('<')
1966 f = user.find('<')
1966 if f >= 0:
1967 if f >= 0:
1967 user = user[f + 1:]
1968 user = user[f + 1:]
1968 f = user.find(' ')
1969 f = user.find(' ')
1969 if f >= 0:
1970 if f >= 0:
1970 user = user[:f]
1971 user = user[:f]
1971 f = user.find('.')
1972 f = user.find('.')
1972 if f >= 0:
1973 if f >= 0:
1973 user = user[:f]
1974 user = user[:f]
1974 return user
1975 return user
1975
1976
1976 def emailuser(user):
1977 def emailuser(user):
1977 """Return the user portion of an email address."""
1978 """Return the user portion of an email address."""
1978 f = user.find('@')
1979 f = user.find('@')
1979 if f >= 0:
1980 if f >= 0:
1980 user = user[:f]
1981 user = user[:f]
1981 f = user.find('<')
1982 f = user.find('<')
1982 if f >= 0:
1983 if f >= 0:
1983 user = user[f + 1:]
1984 user = user[f + 1:]
1984 return user
1985 return user
1985
1986
1986 def email(author):
1987 def email(author):
1987 '''get email of author.'''
1988 '''get email of author.'''
1988 r = author.find('>')
1989 r = author.find('>')
1989 if r == -1:
1990 if r == -1:
1990 r = None
1991 r = None
1991 return author[author.find('<') + 1:r]
1992 return author[author.find('<') + 1:r]
1992
1993
1993 def ellipsis(text, maxlength=400):
1994 def ellipsis(text, maxlength=400):
1994 """Trim string to at most maxlength (default: 400) columns in display."""
1995 """Trim string to at most maxlength (default: 400) columns in display."""
1995 return encoding.trim(text, maxlength, ellipsis='...')
1996 return encoding.trim(text, maxlength, ellipsis='...')
1996
1997
1997 def unitcountfn(*unittable):
1998 def unitcountfn(*unittable):
1998 '''return a function that renders a readable count of some quantity'''
1999 '''return a function that renders a readable count of some quantity'''
1999
2000
2000 def go(count):
2001 def go(count):
2001 for multiplier, divisor, format in unittable:
2002 for multiplier, divisor, format in unittable:
2002 if count >= divisor * multiplier:
2003 if count >= divisor * multiplier:
2003 return format % (count / float(divisor))
2004 return format % (count / float(divisor))
2004 return unittable[-1][2] % count
2005 return unittable[-1][2] % count
2005
2006
2006 return go
2007 return go
2007
2008
2008 bytecount = unitcountfn(
2009 bytecount = unitcountfn(
2009 (100, 1 << 30, _('%.0f GB')),
2010 (100, 1 << 30, _('%.0f GB')),
2010 (10, 1 << 30, _('%.1f GB')),
2011 (10, 1 << 30, _('%.1f GB')),
2011 (1, 1 << 30, _('%.2f GB')),
2012 (1, 1 << 30, _('%.2f GB')),
2012 (100, 1 << 20, _('%.0f MB')),
2013 (100, 1 << 20, _('%.0f MB')),
2013 (10, 1 << 20, _('%.1f MB')),
2014 (10, 1 << 20, _('%.1f MB')),
2014 (1, 1 << 20, _('%.2f MB')),
2015 (1, 1 << 20, _('%.2f MB')),
2015 (100, 1 << 10, _('%.0f KB')),
2016 (100, 1 << 10, _('%.0f KB')),
2016 (10, 1 << 10, _('%.1f KB')),
2017 (10, 1 << 10, _('%.1f KB')),
2017 (1, 1 << 10, _('%.2f KB')),
2018 (1, 1 << 10, _('%.2f KB')),
2018 (1, 1, _('%.0f bytes')),
2019 (1, 1, _('%.0f bytes')),
2019 )
2020 )
2020
2021
2021 def uirepr(s):
2022 def uirepr(s):
2022 # Avoid double backslash in Windows path repr()
2023 # Avoid double backslash in Windows path repr()
2023 return repr(s).replace('\\\\', '\\')
2024 return repr(s).replace('\\\\', '\\')
2024
2025
2025 # delay import of textwrap
2026 # delay import of textwrap
2026 def MBTextWrapper(**kwargs):
2027 def MBTextWrapper(**kwargs):
2027 class tw(textwrap.TextWrapper):
2028 class tw(textwrap.TextWrapper):
2028 """
2029 """
2029 Extend TextWrapper for width-awareness.
2030 Extend TextWrapper for width-awareness.
2030
2031
2031 Neither number of 'bytes' in any encoding nor 'characters' is
2032 Neither number of 'bytes' in any encoding nor 'characters' is
2032 appropriate to calculate terminal columns for specified string.
2033 appropriate to calculate terminal columns for specified string.
2033
2034
2034 Original TextWrapper implementation uses built-in 'len()' directly,
2035 Original TextWrapper implementation uses built-in 'len()' directly,
2035 so overriding is needed to use width information of each characters.
2036 so overriding is needed to use width information of each characters.
2036
2037
2037 In addition, characters classified into 'ambiguous' width are
2038 In addition, characters classified into 'ambiguous' width are
2038 treated as wide in East Asian area, but as narrow in other.
2039 treated as wide in East Asian area, but as narrow in other.
2039
2040
2040 This requires use decision to determine width of such characters.
2041 This requires use decision to determine width of such characters.
2041 """
2042 """
2042 def _cutdown(self, ucstr, space_left):
2043 def _cutdown(self, ucstr, space_left):
2043 l = 0
2044 l = 0
2044 colwidth = encoding.ucolwidth
2045 colwidth = encoding.ucolwidth
2045 for i in xrange(len(ucstr)):
2046 for i in xrange(len(ucstr)):
2046 l += colwidth(ucstr[i])
2047 l += colwidth(ucstr[i])
2047 if space_left < l:
2048 if space_left < l:
2048 return (ucstr[:i], ucstr[i:])
2049 return (ucstr[:i], ucstr[i:])
2049 return ucstr, ''
2050 return ucstr, ''
2050
2051
2051 # overriding of base class
2052 # overriding of base class
2052 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2053 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2053 space_left = max(width - cur_len, 1)
2054 space_left = max(width - cur_len, 1)
2054
2055
2055 if self.break_long_words:
2056 if self.break_long_words:
2056 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2057 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2057 cur_line.append(cut)
2058 cur_line.append(cut)
2058 reversed_chunks[-1] = res
2059 reversed_chunks[-1] = res
2059 elif not cur_line:
2060 elif not cur_line:
2060 cur_line.append(reversed_chunks.pop())
2061 cur_line.append(reversed_chunks.pop())
2061
2062
2062 # this overriding code is imported from TextWrapper of Python 2.6
2063 # this overriding code is imported from TextWrapper of Python 2.6
2063 # to calculate columns of string by 'encoding.ucolwidth()'
2064 # to calculate columns of string by 'encoding.ucolwidth()'
2064 def _wrap_chunks(self, chunks):
2065 def _wrap_chunks(self, chunks):
2065 colwidth = encoding.ucolwidth
2066 colwidth = encoding.ucolwidth
2066
2067
2067 lines = []
2068 lines = []
2068 if self.width <= 0:
2069 if self.width <= 0:
2069 raise ValueError("invalid width %r (must be > 0)" % self.width)
2070 raise ValueError("invalid width %r (must be > 0)" % self.width)
2070
2071
2071 # Arrange in reverse order so items can be efficiently popped
2072 # Arrange in reverse order so items can be efficiently popped
2072 # from a stack of chucks.
2073 # from a stack of chucks.
2073 chunks.reverse()
2074 chunks.reverse()
2074
2075
2075 while chunks:
2076 while chunks:
2076
2077
2077 # Start the list of chunks that will make up the current line.
2078 # Start the list of chunks that will make up the current line.
2078 # cur_len is just the length of all the chunks in cur_line.
2079 # cur_len is just the length of all the chunks in cur_line.
2079 cur_line = []
2080 cur_line = []
2080 cur_len = 0
2081 cur_len = 0
2081
2082
2082 # Figure out which static string will prefix this line.
2083 # Figure out which static string will prefix this line.
2083 if lines:
2084 if lines:
2084 indent = self.subsequent_indent
2085 indent = self.subsequent_indent
2085 else:
2086 else:
2086 indent = self.initial_indent
2087 indent = self.initial_indent
2087
2088
2088 # Maximum width for this line.
2089 # Maximum width for this line.
2089 width = self.width - len(indent)
2090 width = self.width - len(indent)
2090
2091
2091 # First chunk on line is whitespace -- drop it, unless this
2092 # First chunk on line is whitespace -- drop it, unless this
2092 # is the very beginning of the text (i.e. no lines started yet).
2093 # is the very beginning of the text (i.e. no lines started yet).
2093 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2094 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2094 del chunks[-1]
2095 del chunks[-1]
2095
2096
2096 while chunks:
2097 while chunks:
2097 l = colwidth(chunks[-1])
2098 l = colwidth(chunks[-1])
2098
2099
2099 # Can at least squeeze this chunk onto the current line.
2100 # Can at least squeeze this chunk onto the current line.
2100 if cur_len + l <= width:
2101 if cur_len + l <= width:
2101 cur_line.append(chunks.pop())
2102 cur_line.append(chunks.pop())
2102 cur_len += l
2103 cur_len += l
2103
2104
2104 # Nope, this line is full.
2105 # Nope, this line is full.
2105 else:
2106 else:
2106 break
2107 break
2107
2108
2108 # The current line is full, and the next chunk is too big to
2109 # The current line is full, and the next chunk is too big to
2109 # fit on *any* line (not just this one).
2110 # fit on *any* line (not just this one).
2110 if chunks and colwidth(chunks[-1]) > width:
2111 if chunks and colwidth(chunks[-1]) > width:
2111 self._handle_long_word(chunks, cur_line, cur_len, width)
2112 self._handle_long_word(chunks, cur_line, cur_len, width)
2112
2113
2113 # If the last chunk on this line is all whitespace, drop it.
2114 # If the last chunk on this line is all whitespace, drop it.
2114 if (self.drop_whitespace and
2115 if (self.drop_whitespace and
2115 cur_line and cur_line[-1].strip() == ''):
2116 cur_line and cur_line[-1].strip() == ''):
2116 del cur_line[-1]
2117 del cur_line[-1]
2117
2118
2118 # Convert current line back to a string and store it in list
2119 # Convert current line back to a string and store it in list
2119 # of all lines (return value).
2120 # of all lines (return value).
2120 if cur_line:
2121 if cur_line:
2121 lines.append(indent + ''.join(cur_line))
2122 lines.append(indent + ''.join(cur_line))
2122
2123
2123 return lines
2124 return lines
2124
2125
2125 global MBTextWrapper
2126 global MBTextWrapper
2126 MBTextWrapper = tw
2127 MBTextWrapper = tw
2127 return tw(**kwargs)
2128 return tw(**kwargs)
2128
2129
2129 def wrap(line, width, initindent='', hangindent=''):
2130 def wrap(line, width, initindent='', hangindent=''):
2130 maxindent = max(len(hangindent), len(initindent))
2131 maxindent = max(len(hangindent), len(initindent))
2131 if width <= maxindent:
2132 if width <= maxindent:
2132 # adjust for weird terminal size
2133 # adjust for weird terminal size
2133 width = max(78, maxindent + 1)
2134 width = max(78, maxindent + 1)
2134 line = line.decode(encoding.encoding, encoding.encodingmode)
2135 line = line.decode(encoding.encoding, encoding.encodingmode)
2135 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2136 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2136 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2137 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2137 wrapper = MBTextWrapper(width=width,
2138 wrapper = MBTextWrapper(width=width,
2138 initial_indent=initindent,
2139 initial_indent=initindent,
2139 subsequent_indent=hangindent)
2140 subsequent_indent=hangindent)
2140 return wrapper.fill(line).encode(encoding.encoding)
2141 return wrapper.fill(line).encode(encoding.encoding)
2141
2142
2142 def iterlines(iterator):
2143 def iterlines(iterator):
2143 for chunk in iterator:
2144 for chunk in iterator:
2144 for line in chunk.splitlines():
2145 for line in chunk.splitlines():
2145 yield line
2146 yield line
2146
2147
2147 def expandpath(path):
2148 def expandpath(path):
2148 return os.path.expanduser(os.path.expandvars(path))
2149 return os.path.expanduser(os.path.expandvars(path))
2149
2150
2150 def hgcmd():
2151 def hgcmd():
2151 """Return the command used to execute current hg
2152 """Return the command used to execute current hg
2152
2153
2153 This is different from hgexecutable() because on Windows we want
2154 This is different from hgexecutable() because on Windows we want
2154 to avoid things opening new shell windows like batch files, so we
2155 to avoid things opening new shell windows like batch files, so we
2155 get either the python call or current executable.
2156 get either the python call or current executable.
2156 """
2157 """
2157 if mainfrozen():
2158 if mainfrozen():
2158 if getattr(sys, 'frozen', None) == 'macosx_app':
2159 if getattr(sys, 'frozen', None) == 'macosx_app':
2159 # Env variable set by py2app
2160 # Env variable set by py2app
2160 return [os.environ['EXECUTABLEPATH']]
2161 return [os.environ['EXECUTABLEPATH']]
2161 else:
2162 else:
2162 return [sys.executable]
2163 return [sys.executable]
2163 return gethgcmd()
2164 return gethgcmd()
2164
2165
2165 def rundetached(args, condfn):
2166 def rundetached(args, condfn):
2166 """Execute the argument list in a detached process.
2167 """Execute the argument list in a detached process.
2167
2168
2168 condfn is a callable which is called repeatedly and should return
2169 condfn is a callable which is called repeatedly and should return
2169 True once the child process is known to have started successfully.
2170 True once the child process is known to have started successfully.
2170 At this point, the child process PID is returned. If the child
2171 At this point, the child process PID is returned. If the child
2171 process fails to start or finishes before condfn() evaluates to
2172 process fails to start or finishes before condfn() evaluates to
2172 True, return -1.
2173 True, return -1.
2173 """
2174 """
2174 # Windows case is easier because the child process is either
2175 # Windows case is easier because the child process is either
2175 # successfully starting and validating the condition or exiting
2176 # successfully starting and validating the condition or exiting
2176 # on failure. We just poll on its PID. On Unix, if the child
2177 # on failure. We just poll on its PID. On Unix, if the child
2177 # process fails to start, it will be left in a zombie state until
2178 # process fails to start, it will be left in a zombie state until
2178 # the parent wait on it, which we cannot do since we expect a long
2179 # the parent wait on it, which we cannot do since we expect a long
2179 # running process on success. Instead we listen for SIGCHLD telling
2180 # running process on success. Instead we listen for SIGCHLD telling
2180 # us our child process terminated.
2181 # us our child process terminated.
2181 terminated = set()
2182 terminated = set()
2182 def handler(signum, frame):
2183 def handler(signum, frame):
2183 terminated.add(os.wait())
2184 terminated.add(os.wait())
2184 prevhandler = None
2185 prevhandler = None
2185 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2186 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2186 if SIGCHLD is not None:
2187 if SIGCHLD is not None:
2187 prevhandler = signal.signal(SIGCHLD, handler)
2188 prevhandler = signal.signal(SIGCHLD, handler)
2188 try:
2189 try:
2189 pid = spawndetached(args)
2190 pid = spawndetached(args)
2190 while not condfn():
2191 while not condfn():
2191 if ((pid in terminated or not testpid(pid))
2192 if ((pid in terminated or not testpid(pid))
2192 and not condfn()):
2193 and not condfn()):
2193 return -1
2194 return -1
2194 time.sleep(0.1)
2195 time.sleep(0.1)
2195 return pid
2196 return pid
2196 finally:
2197 finally:
2197 if prevhandler is not None:
2198 if prevhandler is not None:
2198 signal.signal(signal.SIGCHLD, prevhandler)
2199 signal.signal(signal.SIGCHLD, prevhandler)
2199
2200
2200 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2201 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2201 """Return the result of interpolating items in the mapping into string s.
2202 """Return the result of interpolating items in the mapping into string s.
2202
2203
2203 prefix is a single character string, or a two character string with
2204 prefix is a single character string, or a two character string with
2204 a backslash as the first character if the prefix needs to be escaped in
2205 a backslash as the first character if the prefix needs to be escaped in
2205 a regular expression.
2206 a regular expression.
2206
2207
2207 fn is an optional function that will be applied to the replacement text
2208 fn is an optional function that will be applied to the replacement text
2208 just before replacement.
2209 just before replacement.
2209
2210
2210 escape_prefix is an optional flag that allows using doubled prefix for
2211 escape_prefix is an optional flag that allows using doubled prefix for
2211 its escaping.
2212 its escaping.
2212 """
2213 """
2213 fn = fn or (lambda s: s)
2214 fn = fn or (lambda s: s)
2214 patterns = '|'.join(mapping.keys())
2215 patterns = '|'.join(mapping.keys())
2215 if escape_prefix:
2216 if escape_prefix:
2216 patterns += '|' + prefix
2217 patterns += '|' + prefix
2217 if len(prefix) > 1:
2218 if len(prefix) > 1:
2218 prefix_char = prefix[1:]
2219 prefix_char = prefix[1:]
2219 else:
2220 else:
2220 prefix_char = prefix
2221 prefix_char = prefix
2221 mapping[prefix_char] = prefix_char
2222 mapping[prefix_char] = prefix_char
2222 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2223 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2223 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2224 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2224
2225
2225 def getport(port):
2226 def getport(port):
2226 """Return the port for a given network service.
2227 """Return the port for a given network service.
2227
2228
2228 If port is an integer, it's returned as is. If it's a string, it's
2229 If port is an integer, it's returned as is. If it's a string, it's
2229 looked up using socket.getservbyname(). If there's no matching
2230 looked up using socket.getservbyname(). If there's no matching
2230 service, error.Abort is raised.
2231 service, error.Abort is raised.
2231 """
2232 """
2232 try:
2233 try:
2233 return int(port)
2234 return int(port)
2234 except ValueError:
2235 except ValueError:
2235 pass
2236 pass
2236
2237
2237 try:
2238 try:
2238 return socket.getservbyname(port)
2239 return socket.getservbyname(port)
2239 except socket.error:
2240 except socket.error:
2240 raise Abort(_("no port number associated with service '%s'") % port)
2241 raise Abort(_("no port number associated with service '%s'") % port)
2241
2242
2242 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2243 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2243 '0': False, 'no': False, 'false': False, 'off': False,
2244 '0': False, 'no': False, 'false': False, 'off': False,
2244 'never': False}
2245 'never': False}
2245
2246
2246 def parsebool(s):
2247 def parsebool(s):
2247 """Parse s into a boolean.
2248 """Parse s into a boolean.
2248
2249
2249 If s is not a valid boolean, returns None.
2250 If s is not a valid boolean, returns None.
2250 """
2251 """
2251 return _booleans.get(s.lower(), None)
2252 return _booleans.get(s.lower(), None)
2252
2253
2253 _hexdig = '0123456789ABCDEFabcdef'
2254 _hexdig = '0123456789ABCDEFabcdef'
2254 _hextochr = dict((a + b, chr(int(a + b, 16)))
2255 _hextochr = dict((a + b, chr(int(a + b, 16)))
2255 for a in _hexdig for b in _hexdig)
2256 for a in _hexdig for b in _hexdig)
2256
2257
2257 def _urlunquote(s):
2258 def _urlunquote(s):
2258 """Decode HTTP/HTML % encoding.
2259 """Decode HTTP/HTML % encoding.
2259
2260
2260 >>> _urlunquote('abc%20def')
2261 >>> _urlunquote('abc%20def')
2261 'abc def'
2262 'abc def'
2262 """
2263 """
2263 res = s.split('%')
2264 res = s.split('%')
2264 # fastpath
2265 # fastpath
2265 if len(res) == 1:
2266 if len(res) == 1:
2266 return s
2267 return s
2267 s = res[0]
2268 s = res[0]
2268 for item in res[1:]:
2269 for item in res[1:]:
2269 try:
2270 try:
2270 s += _hextochr[item[:2]] + item[2:]
2271 s += _hextochr[item[:2]] + item[2:]
2271 except KeyError:
2272 except KeyError:
2272 s += '%' + item
2273 s += '%' + item
2273 except UnicodeDecodeError:
2274 except UnicodeDecodeError:
2274 s += unichr(int(item[:2], 16)) + item[2:]
2275 s += unichr(int(item[:2], 16)) + item[2:]
2275 return s
2276 return s
2276
2277
2277 class url(object):
2278 class url(object):
2278 r"""Reliable URL parser.
2279 r"""Reliable URL parser.
2279
2280
2280 This parses URLs and provides attributes for the following
2281 This parses URLs and provides attributes for the following
2281 components:
2282 components:
2282
2283
2283 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2284 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2284
2285
2285 Missing components are set to None. The only exception is
2286 Missing components are set to None. The only exception is
2286 fragment, which is set to '' if present but empty.
2287 fragment, which is set to '' if present but empty.
2287
2288
2288 If parsefragment is False, fragment is included in query. If
2289 If parsefragment is False, fragment is included in query. If
2289 parsequery is False, query is included in path. If both are
2290 parsequery is False, query is included in path. If both are
2290 False, both fragment and query are included in path.
2291 False, both fragment and query are included in path.
2291
2292
2292 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2293 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2293
2294
2294 Note that for backward compatibility reasons, bundle URLs do not
2295 Note that for backward compatibility reasons, bundle URLs do not
2295 take host names. That means 'bundle://../' has a path of '../'.
2296 take host names. That means 'bundle://../' has a path of '../'.
2296
2297
2297 Examples:
2298 Examples:
2298
2299
2299 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2300 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2300 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2301 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2301 >>> url('ssh://[::1]:2200//home/joe/repo')
2302 >>> url('ssh://[::1]:2200//home/joe/repo')
2302 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2303 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2303 >>> url('file:///home/joe/repo')
2304 >>> url('file:///home/joe/repo')
2304 <url scheme: 'file', path: '/home/joe/repo'>
2305 <url scheme: 'file', path: '/home/joe/repo'>
2305 >>> url('file:///c:/temp/foo/')
2306 >>> url('file:///c:/temp/foo/')
2306 <url scheme: 'file', path: 'c:/temp/foo/'>
2307 <url scheme: 'file', path: 'c:/temp/foo/'>
2307 >>> url('bundle:foo')
2308 >>> url('bundle:foo')
2308 <url scheme: 'bundle', path: 'foo'>
2309 <url scheme: 'bundle', path: 'foo'>
2309 >>> url('bundle://../foo')
2310 >>> url('bundle://../foo')
2310 <url scheme: 'bundle', path: '../foo'>
2311 <url scheme: 'bundle', path: '../foo'>
2311 >>> url(r'c:\foo\bar')
2312 >>> url(r'c:\foo\bar')
2312 <url path: 'c:\\foo\\bar'>
2313 <url path: 'c:\\foo\\bar'>
2313 >>> url(r'\\blah\blah\blah')
2314 >>> url(r'\\blah\blah\blah')
2314 <url path: '\\\\blah\\blah\\blah'>
2315 <url path: '\\\\blah\\blah\\blah'>
2315 >>> url(r'\\blah\blah\blah#baz')
2316 >>> url(r'\\blah\blah\blah#baz')
2316 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2317 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2317 >>> url(r'file:///C:\users\me')
2318 >>> url(r'file:///C:\users\me')
2318 <url scheme: 'file', path: 'C:\\users\\me'>
2319 <url scheme: 'file', path: 'C:\\users\\me'>
2319
2320
2320 Authentication credentials:
2321 Authentication credentials:
2321
2322
2322 >>> url('ssh://joe:xyz@x/repo')
2323 >>> url('ssh://joe:xyz@x/repo')
2323 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2324 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2324 >>> url('ssh://joe@x/repo')
2325 >>> url('ssh://joe@x/repo')
2325 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2326 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2326
2327
2327 Query strings and fragments:
2328 Query strings and fragments:
2328
2329
2329 >>> url('http://host/a?b#c')
2330 >>> url('http://host/a?b#c')
2330 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2331 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2331 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2332 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2332 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2333 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2333 """
2334 """
2334
2335
2335 _safechars = "!~*'()+"
2336 _safechars = "!~*'()+"
2336 _safepchars = "/!~*'()+:\\"
2337 _safepchars = "/!~*'()+:\\"
2337 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2338 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2338
2339
2339 def __init__(self, path, parsequery=True, parsefragment=True):
2340 def __init__(self, path, parsequery=True, parsefragment=True):
2340 # We slowly chomp away at path until we have only the path left
2341 # We slowly chomp away at path until we have only the path left
2341 self.scheme = self.user = self.passwd = self.host = None
2342 self.scheme = self.user = self.passwd = self.host = None
2342 self.port = self.path = self.query = self.fragment = None
2343 self.port = self.path = self.query = self.fragment = None
2343 self._localpath = True
2344 self._localpath = True
2344 self._hostport = ''
2345 self._hostport = ''
2345 self._origpath = path
2346 self._origpath = path
2346
2347
2347 if parsefragment and '#' in path:
2348 if parsefragment and '#' in path:
2348 path, self.fragment = path.split('#', 1)
2349 path, self.fragment = path.split('#', 1)
2349 if not path:
2350 if not path:
2350 path = None
2351 path = None
2351
2352
2352 # special case for Windows drive letters and UNC paths
2353 # special case for Windows drive letters and UNC paths
2353 if hasdriveletter(path) or path.startswith(r'\\'):
2354 if hasdriveletter(path) or path.startswith(r'\\'):
2354 self.path = path
2355 self.path = path
2355 return
2356 return
2356
2357
2357 # For compatibility reasons, we can't handle bundle paths as
2358 # For compatibility reasons, we can't handle bundle paths as
2358 # normal URLS
2359 # normal URLS
2359 if path.startswith('bundle:'):
2360 if path.startswith('bundle:'):
2360 self.scheme = 'bundle'
2361 self.scheme = 'bundle'
2361 path = path[7:]
2362 path = path[7:]
2362 if path.startswith('//'):
2363 if path.startswith('//'):
2363 path = path[2:]
2364 path = path[2:]
2364 self.path = path
2365 self.path = path
2365 return
2366 return
2366
2367
2367 if self._matchscheme(path):
2368 if self._matchscheme(path):
2368 parts = path.split(':', 1)
2369 parts = path.split(':', 1)
2369 if parts[0]:
2370 if parts[0]:
2370 self.scheme, path = parts
2371 self.scheme, path = parts
2371 self._localpath = False
2372 self._localpath = False
2372
2373
2373 if not path:
2374 if not path:
2374 path = None
2375 path = None
2375 if self._localpath:
2376 if self._localpath:
2376 self.path = ''
2377 self.path = ''
2377 return
2378 return
2378 else:
2379 else:
2379 if self._localpath:
2380 if self._localpath:
2380 self.path = path
2381 self.path = path
2381 return
2382 return
2382
2383
2383 if parsequery and '?' in path:
2384 if parsequery and '?' in path:
2384 path, self.query = path.split('?', 1)
2385 path, self.query = path.split('?', 1)
2385 if not path:
2386 if not path:
2386 path = None
2387 path = None
2387 if not self.query:
2388 if not self.query:
2388 self.query = None
2389 self.query = None
2389
2390
2390 # // is required to specify a host/authority
2391 # // is required to specify a host/authority
2391 if path and path.startswith('//'):
2392 if path and path.startswith('//'):
2392 parts = path[2:].split('/', 1)
2393 parts = path[2:].split('/', 1)
2393 if len(parts) > 1:
2394 if len(parts) > 1:
2394 self.host, path = parts
2395 self.host, path = parts
2395 else:
2396 else:
2396 self.host = parts[0]
2397 self.host = parts[0]
2397 path = None
2398 path = None
2398 if not self.host:
2399 if not self.host:
2399 self.host = None
2400 self.host = None
2400 # path of file:///d is /d
2401 # path of file:///d is /d
2401 # path of file:///d:/ is d:/, not /d:/
2402 # path of file:///d:/ is d:/, not /d:/
2402 if path and not hasdriveletter(path):
2403 if path and not hasdriveletter(path):
2403 path = '/' + path
2404 path = '/' + path
2404
2405
2405 if self.host and '@' in self.host:
2406 if self.host and '@' in self.host:
2406 self.user, self.host = self.host.rsplit('@', 1)
2407 self.user, self.host = self.host.rsplit('@', 1)
2407 if ':' in self.user:
2408 if ':' in self.user:
2408 self.user, self.passwd = self.user.split(':', 1)
2409 self.user, self.passwd = self.user.split(':', 1)
2409 if not self.host:
2410 if not self.host:
2410 self.host = None
2411 self.host = None
2411
2412
2412 # Don't split on colons in IPv6 addresses without ports
2413 # Don't split on colons in IPv6 addresses without ports
2413 if (self.host and ':' in self.host and
2414 if (self.host and ':' in self.host and
2414 not (self.host.startswith('[') and self.host.endswith(']'))):
2415 not (self.host.startswith('[') and self.host.endswith(']'))):
2415 self._hostport = self.host
2416 self._hostport = self.host
2416 self.host, self.port = self.host.rsplit(':', 1)
2417 self.host, self.port = self.host.rsplit(':', 1)
2417 if not self.host:
2418 if not self.host:
2418 self.host = None
2419 self.host = None
2419
2420
2420 if (self.host and self.scheme == 'file' and
2421 if (self.host and self.scheme == 'file' and
2421 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2422 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2422 raise Abort(_('file:// URLs can only refer to localhost'))
2423 raise Abort(_('file:// URLs can only refer to localhost'))
2423
2424
2424 self.path = path
2425 self.path = path
2425
2426
2426 # leave the query string escaped
2427 # leave the query string escaped
2427 for a in ('user', 'passwd', 'host', 'port',
2428 for a in ('user', 'passwd', 'host', 'port',
2428 'path', 'fragment'):
2429 'path', 'fragment'):
2429 v = getattr(self, a)
2430 v = getattr(self, a)
2430 if v is not None:
2431 if v is not None:
2431 setattr(self, a, _urlunquote(v))
2432 setattr(self, a, _urlunquote(v))
2432
2433
2433 def __repr__(self):
2434 def __repr__(self):
2434 attrs = []
2435 attrs = []
2435 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2436 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2436 'query', 'fragment'):
2437 'query', 'fragment'):
2437 v = getattr(self, a)
2438 v = getattr(self, a)
2438 if v is not None:
2439 if v is not None:
2439 attrs.append('%s: %r' % (a, v))
2440 attrs.append('%s: %r' % (a, v))
2440 return '<url %s>' % ', '.join(attrs)
2441 return '<url %s>' % ', '.join(attrs)
2441
2442
2442 def __str__(self):
2443 def __str__(self):
2443 r"""Join the URL's components back into a URL string.
2444 r"""Join the URL's components back into a URL string.
2444
2445
2445 Examples:
2446 Examples:
2446
2447
2447 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2448 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2448 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2449 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2449 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2450 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2450 'http://user:pw@host:80/?foo=bar&baz=42'
2451 'http://user:pw@host:80/?foo=bar&baz=42'
2451 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2452 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2452 'http://user:pw@host:80/?foo=bar%3dbaz'
2453 'http://user:pw@host:80/?foo=bar%3dbaz'
2453 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2454 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2454 'ssh://user:pw@[::1]:2200//home/joe#'
2455 'ssh://user:pw@[::1]:2200//home/joe#'
2455 >>> str(url('http://localhost:80//'))
2456 >>> str(url('http://localhost:80//'))
2456 'http://localhost:80//'
2457 'http://localhost:80//'
2457 >>> str(url('http://localhost:80/'))
2458 >>> str(url('http://localhost:80/'))
2458 'http://localhost:80/'
2459 'http://localhost:80/'
2459 >>> str(url('http://localhost:80'))
2460 >>> str(url('http://localhost:80'))
2460 'http://localhost:80/'
2461 'http://localhost:80/'
2461 >>> str(url('bundle:foo'))
2462 >>> str(url('bundle:foo'))
2462 'bundle:foo'
2463 'bundle:foo'
2463 >>> str(url('bundle://../foo'))
2464 >>> str(url('bundle://../foo'))
2464 'bundle:../foo'
2465 'bundle:../foo'
2465 >>> str(url('path'))
2466 >>> str(url('path'))
2466 'path'
2467 'path'
2467 >>> str(url('file:///tmp/foo/bar'))
2468 >>> str(url('file:///tmp/foo/bar'))
2468 'file:///tmp/foo/bar'
2469 'file:///tmp/foo/bar'
2469 >>> str(url('file:///c:/tmp/foo/bar'))
2470 >>> str(url('file:///c:/tmp/foo/bar'))
2470 'file:///c:/tmp/foo/bar'
2471 'file:///c:/tmp/foo/bar'
2471 >>> print url(r'bundle:foo\bar')
2472 >>> print url(r'bundle:foo\bar')
2472 bundle:foo\bar
2473 bundle:foo\bar
2473 >>> print url(r'file:///D:\data\hg')
2474 >>> print url(r'file:///D:\data\hg')
2474 file:///D:\data\hg
2475 file:///D:\data\hg
2475 """
2476 """
2476 if self._localpath:
2477 if self._localpath:
2477 s = self.path
2478 s = self.path
2478 if self.scheme == 'bundle':
2479 if self.scheme == 'bundle':
2479 s = 'bundle:' + s
2480 s = 'bundle:' + s
2480 if self.fragment:
2481 if self.fragment:
2481 s += '#' + self.fragment
2482 s += '#' + self.fragment
2482 return s
2483 return s
2483
2484
2484 s = self.scheme + ':'
2485 s = self.scheme + ':'
2485 if self.user or self.passwd or self.host:
2486 if self.user or self.passwd or self.host:
2486 s += '//'
2487 s += '//'
2487 elif self.scheme and (not self.path or self.path.startswith('/')
2488 elif self.scheme and (not self.path or self.path.startswith('/')
2488 or hasdriveletter(self.path)):
2489 or hasdriveletter(self.path)):
2489 s += '//'
2490 s += '//'
2490 if hasdriveletter(self.path):
2491 if hasdriveletter(self.path):
2491 s += '/'
2492 s += '/'
2492 if self.user:
2493 if self.user:
2493 s += urlreq.quote(self.user, safe=self._safechars)
2494 s += urlreq.quote(self.user, safe=self._safechars)
2494 if self.passwd:
2495 if self.passwd:
2495 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2496 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2496 if self.user or self.passwd:
2497 if self.user or self.passwd:
2497 s += '@'
2498 s += '@'
2498 if self.host:
2499 if self.host:
2499 if not (self.host.startswith('[') and self.host.endswith(']')):
2500 if not (self.host.startswith('[') and self.host.endswith(']')):
2500 s += urlreq.quote(self.host)
2501 s += urlreq.quote(self.host)
2501 else:
2502 else:
2502 s += self.host
2503 s += self.host
2503 if self.port:
2504 if self.port:
2504 s += ':' + urlreq.quote(self.port)
2505 s += ':' + urlreq.quote(self.port)
2505 if self.host:
2506 if self.host:
2506 s += '/'
2507 s += '/'
2507 if self.path:
2508 if self.path:
2508 # TODO: similar to the query string, we should not unescape the
2509 # TODO: similar to the query string, we should not unescape the
2509 # path when we store it, the path might contain '%2f' = '/',
2510 # path when we store it, the path might contain '%2f' = '/',
2510 # which we should *not* escape.
2511 # which we should *not* escape.
2511 s += urlreq.quote(self.path, safe=self._safepchars)
2512 s += urlreq.quote(self.path, safe=self._safepchars)
2512 if self.query:
2513 if self.query:
2513 # we store the query in escaped form.
2514 # we store the query in escaped form.
2514 s += '?' + self.query
2515 s += '?' + self.query
2515 if self.fragment is not None:
2516 if self.fragment is not None:
2516 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2517 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2517 return s
2518 return s
2518
2519
2519 def authinfo(self):
2520 def authinfo(self):
2520 user, passwd = self.user, self.passwd
2521 user, passwd = self.user, self.passwd
2521 try:
2522 try:
2522 self.user, self.passwd = None, None
2523 self.user, self.passwd = None, None
2523 s = str(self)
2524 s = str(self)
2524 finally:
2525 finally:
2525 self.user, self.passwd = user, passwd
2526 self.user, self.passwd = user, passwd
2526 if not self.user:
2527 if not self.user:
2527 return (s, None)
2528 return (s, None)
2528 # authinfo[1] is passed to urllib2 password manager, and its
2529 # authinfo[1] is passed to urllib2 password manager, and its
2529 # URIs must not contain credentials. The host is passed in the
2530 # URIs must not contain credentials. The host is passed in the
2530 # URIs list because Python < 2.4.3 uses only that to search for
2531 # URIs list because Python < 2.4.3 uses only that to search for
2531 # a password.
2532 # a password.
2532 return (s, (None, (s, self.host),
2533 return (s, (None, (s, self.host),
2533 self.user, self.passwd or ''))
2534 self.user, self.passwd or ''))
2534
2535
2535 def isabs(self):
2536 def isabs(self):
2536 if self.scheme and self.scheme != 'file':
2537 if self.scheme and self.scheme != 'file':
2537 return True # remote URL
2538 return True # remote URL
2538 if hasdriveletter(self.path):
2539 if hasdriveletter(self.path):
2539 return True # absolute for our purposes - can't be joined()
2540 return True # absolute for our purposes - can't be joined()
2540 if self.path.startswith(r'\\'):
2541 if self.path.startswith(r'\\'):
2541 return True # Windows UNC path
2542 return True # Windows UNC path
2542 if self.path.startswith('/'):
2543 if self.path.startswith('/'):
2543 return True # POSIX-style
2544 return True # POSIX-style
2544 return False
2545 return False
2545
2546
2546 def localpath(self):
2547 def localpath(self):
2547 if self.scheme == 'file' or self.scheme == 'bundle':
2548 if self.scheme == 'file' or self.scheme == 'bundle':
2548 path = self.path or '/'
2549 path = self.path or '/'
2549 # For Windows, we need to promote hosts containing drive
2550 # For Windows, we need to promote hosts containing drive
2550 # letters to paths with drive letters.
2551 # letters to paths with drive letters.
2551 if hasdriveletter(self._hostport):
2552 if hasdriveletter(self._hostport):
2552 path = self._hostport + '/' + self.path
2553 path = self._hostport + '/' + self.path
2553 elif (self.host is not None and self.path
2554 elif (self.host is not None and self.path
2554 and not hasdriveletter(path)):
2555 and not hasdriveletter(path)):
2555 path = '/' + path
2556 path = '/' + path
2556 return path
2557 return path
2557 return self._origpath
2558 return self._origpath
2558
2559
2559 def islocal(self):
2560 def islocal(self):
2560 '''whether localpath will return something that posixfile can open'''
2561 '''whether localpath will return something that posixfile can open'''
2561 return (not self.scheme or self.scheme == 'file'
2562 return (not self.scheme or self.scheme == 'file'
2562 or self.scheme == 'bundle')
2563 or self.scheme == 'bundle')
2563
2564
2564 def hasscheme(path):
2565 def hasscheme(path):
2565 return bool(url(path).scheme)
2566 return bool(url(path).scheme)
2566
2567
2567 def hasdriveletter(path):
2568 def hasdriveletter(path):
2568 return path and path[1:2] == ':' and path[0:1].isalpha()
2569 return path and path[1:2] == ':' and path[0:1].isalpha()
2569
2570
2570 def urllocalpath(path):
2571 def urllocalpath(path):
2571 return url(path, parsequery=False, parsefragment=False).localpath()
2572 return url(path, parsequery=False, parsefragment=False).localpath()
2572
2573
2573 def hidepassword(u):
2574 def hidepassword(u):
2574 '''hide user credential in a url string'''
2575 '''hide user credential in a url string'''
2575 u = url(u)
2576 u = url(u)
2576 if u.passwd:
2577 if u.passwd:
2577 u.passwd = '***'
2578 u.passwd = '***'
2578 return str(u)
2579 return str(u)
2579
2580
2580 def removeauth(u):
2581 def removeauth(u):
2581 '''remove all authentication information from a url string'''
2582 '''remove all authentication information from a url string'''
2582 u = url(u)
2583 u = url(u)
2583 u.user = u.passwd = None
2584 u.user = u.passwd = None
2584 return str(u)
2585 return str(u)
2585
2586
2586 def isatty(fp):
2587 def isatty(fp):
2587 try:
2588 try:
2588 return fp.isatty()
2589 return fp.isatty()
2589 except AttributeError:
2590 except AttributeError:
2590 return False
2591 return False
2591
2592
2592 timecount = unitcountfn(
2593 timecount = unitcountfn(
2593 (1, 1e3, _('%.0f s')),
2594 (1, 1e3, _('%.0f s')),
2594 (100, 1, _('%.1f s')),
2595 (100, 1, _('%.1f s')),
2595 (10, 1, _('%.2f s')),
2596 (10, 1, _('%.2f s')),
2596 (1, 1, _('%.3f s')),
2597 (1, 1, _('%.3f s')),
2597 (100, 0.001, _('%.1f ms')),
2598 (100, 0.001, _('%.1f ms')),
2598 (10, 0.001, _('%.2f ms')),
2599 (10, 0.001, _('%.2f ms')),
2599 (1, 0.001, _('%.3f ms')),
2600 (1, 0.001, _('%.3f ms')),
2600 (100, 0.000001, _('%.1f us')),
2601 (100, 0.000001, _('%.1f us')),
2601 (10, 0.000001, _('%.2f us')),
2602 (10, 0.000001, _('%.2f us')),
2602 (1, 0.000001, _('%.3f us')),
2603 (1, 0.000001, _('%.3f us')),
2603 (100, 0.000000001, _('%.1f ns')),
2604 (100, 0.000000001, _('%.1f ns')),
2604 (10, 0.000000001, _('%.2f ns')),
2605 (10, 0.000000001, _('%.2f ns')),
2605 (1, 0.000000001, _('%.3f ns')),
2606 (1, 0.000000001, _('%.3f ns')),
2606 )
2607 )
2607
2608
2608 _timenesting = [0]
2609 _timenesting = [0]
2609
2610
2610 def timed(func):
2611 def timed(func):
2611 '''Report the execution time of a function call to stderr.
2612 '''Report the execution time of a function call to stderr.
2612
2613
2613 During development, use as a decorator when you need to measure
2614 During development, use as a decorator when you need to measure
2614 the cost of a function, e.g. as follows:
2615 the cost of a function, e.g. as follows:
2615
2616
2616 @util.timed
2617 @util.timed
2617 def foo(a, b, c):
2618 def foo(a, b, c):
2618 pass
2619 pass
2619 '''
2620 '''
2620
2621
2621 def wrapper(*args, **kwargs):
2622 def wrapper(*args, **kwargs):
2622 start = time.time()
2623 start = time.time()
2623 indent = 2
2624 indent = 2
2624 _timenesting[0] += indent
2625 _timenesting[0] += indent
2625 try:
2626 try:
2626 return func(*args, **kwargs)
2627 return func(*args, **kwargs)
2627 finally:
2628 finally:
2628 elapsed = time.time() - start
2629 elapsed = time.time() - start
2629 _timenesting[0] -= indent
2630 _timenesting[0] -= indent
2630 sys.stderr.write('%s%s: %s\n' %
2631 sys.stderr.write('%s%s: %s\n' %
2631 (' ' * _timenesting[0], func.__name__,
2632 (' ' * _timenesting[0], func.__name__,
2632 timecount(elapsed)))
2633 timecount(elapsed)))
2633 return wrapper
2634 return wrapper
2634
2635
2635 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2636 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2636 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2637 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2637
2638
2638 def sizetoint(s):
2639 def sizetoint(s):
2639 '''Convert a space specifier to a byte count.
2640 '''Convert a space specifier to a byte count.
2640
2641
2641 >>> sizetoint('30')
2642 >>> sizetoint('30')
2642 30
2643 30
2643 >>> sizetoint('2.2kb')
2644 >>> sizetoint('2.2kb')
2644 2252
2645 2252
2645 >>> sizetoint('6M')
2646 >>> sizetoint('6M')
2646 6291456
2647 6291456
2647 '''
2648 '''
2648 t = s.strip().lower()
2649 t = s.strip().lower()
2649 try:
2650 try:
2650 for k, u in _sizeunits:
2651 for k, u in _sizeunits:
2651 if t.endswith(k):
2652 if t.endswith(k):
2652 return int(float(t[:-len(k)]) * u)
2653 return int(float(t[:-len(k)]) * u)
2653 return int(t)
2654 return int(t)
2654 except ValueError:
2655 except ValueError:
2655 raise error.ParseError(_("couldn't parse size: %s") % s)
2656 raise error.ParseError(_("couldn't parse size: %s") % s)
2656
2657
2657 class hooks(object):
2658 class hooks(object):
2658 '''A collection of hook functions that can be used to extend a
2659 '''A collection of hook functions that can be used to extend a
2659 function's behavior. Hooks are called in lexicographic order,
2660 function's behavior. Hooks are called in lexicographic order,
2660 based on the names of their sources.'''
2661 based on the names of their sources.'''
2661
2662
2662 def __init__(self):
2663 def __init__(self):
2663 self._hooks = []
2664 self._hooks = []
2664
2665
2665 def add(self, source, hook):
2666 def add(self, source, hook):
2666 self._hooks.append((source, hook))
2667 self._hooks.append((source, hook))
2667
2668
2668 def __call__(self, *args):
2669 def __call__(self, *args):
2669 self._hooks.sort(key=lambda x: x[0])
2670 self._hooks.sort(key=lambda x: x[0])
2670 results = []
2671 results = []
2671 for source, hook in self._hooks:
2672 for source, hook in self._hooks:
2672 results.append(hook(*args))
2673 results.append(hook(*args))
2673 return results
2674 return results
2674
2675
2675 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2676 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2676 '''Yields lines for a nicely formatted stacktrace.
2677 '''Yields lines for a nicely formatted stacktrace.
2677 Skips the 'skip' last entries.
2678 Skips the 'skip' last entries.
2678 Each file+linenumber is formatted according to fileline.
2679 Each file+linenumber is formatted according to fileline.
2679 Each line is formatted according to line.
2680 Each line is formatted according to line.
2680 If line is None, it yields:
2681 If line is None, it yields:
2681 length of longest filepath+line number,
2682 length of longest filepath+line number,
2682 filepath+linenumber,
2683 filepath+linenumber,
2683 function
2684 function
2684
2685
2685 Not be used in production code but very convenient while developing.
2686 Not be used in production code but very convenient while developing.
2686 '''
2687 '''
2687 entries = [(fileline % (fn, ln), func)
2688 entries = [(fileline % (fn, ln), func)
2688 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2689 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2689 if entries:
2690 if entries:
2690 fnmax = max(len(entry[0]) for entry in entries)
2691 fnmax = max(len(entry[0]) for entry in entries)
2691 for fnln, func in entries:
2692 for fnln, func in entries:
2692 if line is None:
2693 if line is None:
2693 yield (fnmax, fnln, func)
2694 yield (fnmax, fnln, func)
2694 else:
2695 else:
2695 yield line % (fnmax, fnln, func)
2696 yield line % (fnmax, fnln, func)
2696
2697
2697 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2698 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2698 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2699 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2699 Skips the 'skip' last entries. By default it will flush stdout first.
2700 Skips the 'skip' last entries. By default it will flush stdout first.
2700 It can be used everywhere and intentionally does not require an ui object.
2701 It can be used everywhere and intentionally does not require an ui object.
2701 Not be used in production code but very convenient while developing.
2702 Not be used in production code but very convenient while developing.
2702 '''
2703 '''
2703 if otherf:
2704 if otherf:
2704 otherf.flush()
2705 otherf.flush()
2705 f.write('%s at:\n' % msg)
2706 f.write('%s at:\n' % msg)
2706 for line in getstackframes(skip + 1):
2707 for line in getstackframes(skip + 1):
2707 f.write(line)
2708 f.write(line)
2708 f.flush()
2709 f.flush()
2709
2710
2710 class dirs(object):
2711 class dirs(object):
2711 '''a multiset of directory names from a dirstate or manifest'''
2712 '''a multiset of directory names from a dirstate or manifest'''
2712
2713
2713 def __init__(self, map, skip=None):
2714 def __init__(self, map, skip=None):
2714 self._dirs = {}
2715 self._dirs = {}
2715 addpath = self.addpath
2716 addpath = self.addpath
2716 if safehasattr(map, 'iteritems') and skip is not None:
2717 if safehasattr(map, 'iteritems') and skip is not None:
2717 for f, s in map.iteritems():
2718 for f, s in map.iteritems():
2718 if s[0] != skip:
2719 if s[0] != skip:
2719 addpath(f)
2720 addpath(f)
2720 else:
2721 else:
2721 for f in map:
2722 for f in map:
2722 addpath(f)
2723 addpath(f)
2723
2724
2724 def addpath(self, path):
2725 def addpath(self, path):
2725 dirs = self._dirs
2726 dirs = self._dirs
2726 for base in finddirs(path):
2727 for base in finddirs(path):
2727 if base in dirs:
2728 if base in dirs:
2728 dirs[base] += 1
2729 dirs[base] += 1
2729 return
2730 return
2730 dirs[base] = 1
2731 dirs[base] = 1
2731
2732
2732 def delpath(self, path):
2733 def delpath(self, path):
2733 dirs = self._dirs
2734 dirs = self._dirs
2734 for base in finddirs(path):
2735 for base in finddirs(path):
2735 if dirs[base] > 1:
2736 if dirs[base] > 1:
2736 dirs[base] -= 1
2737 dirs[base] -= 1
2737 return
2738 return
2738 del dirs[base]
2739 del dirs[base]
2739
2740
2740 def __iter__(self):
2741 def __iter__(self):
2741 return self._dirs.iterkeys()
2742 return self._dirs.iterkeys()
2742
2743
2743 def __contains__(self, d):
2744 def __contains__(self, d):
2744 return d in self._dirs
2745 return d in self._dirs
2745
2746
2746 if safehasattr(parsers, 'dirs'):
2747 if safehasattr(parsers, 'dirs'):
2747 dirs = parsers.dirs
2748 dirs = parsers.dirs
2748
2749
2749 def finddirs(path):
2750 def finddirs(path):
2750 pos = path.rfind('/')
2751 pos = path.rfind('/')
2751 while pos != -1:
2752 while pos != -1:
2752 yield path[:pos]
2753 yield path[:pos]
2753 pos = path.rfind('/', 0, pos)
2754 pos = path.rfind('/', 0, pos)
2754
2755
2755 # compression utility
2756 # compression utility
2756
2757
2757 class nocompress(object):
2758 class nocompress(object):
2758 def compress(self, x):
2759 def compress(self, x):
2759 return x
2760 return x
2760 def flush(self):
2761 def flush(self):
2761 return ""
2762 return ""
2762
2763
2763 compressors = {
2764 compressors = {
2764 None: nocompress,
2765 None: nocompress,
2765 # lambda to prevent early import
2766 # lambda to prevent early import
2766 'BZ': lambda: bz2.BZ2Compressor(),
2767 'BZ': lambda: bz2.BZ2Compressor(),
2767 'GZ': lambda: zlib.compressobj(),
2768 'GZ': lambda: zlib.compressobj(),
2768 }
2769 }
2769 # also support the old form by courtesies
2770 # also support the old form by courtesies
2770 compressors['UN'] = compressors[None]
2771 compressors['UN'] = compressors[None]
2771
2772
2772 def _makedecompressor(decompcls):
2773 def _makedecompressor(decompcls):
2773 def generator(f):
2774 def generator(f):
2774 d = decompcls()
2775 d = decompcls()
2775 for chunk in filechunkiter(f):
2776 for chunk in filechunkiter(f):
2776 yield d.decompress(chunk)
2777 yield d.decompress(chunk)
2777 def func(fh):
2778 def func(fh):
2778 return chunkbuffer(generator(fh))
2779 return chunkbuffer(generator(fh))
2779 return func
2780 return func
2780
2781
2781 class ctxmanager(object):
2782 class ctxmanager(object):
2782 '''A context manager for use in 'with' blocks to allow multiple
2783 '''A context manager for use in 'with' blocks to allow multiple
2783 contexts to be entered at once. This is both safer and more
2784 contexts to be entered at once. This is both safer and more
2784 flexible than contextlib.nested.
2785 flexible than contextlib.nested.
2785
2786
2786 Once Mercurial supports Python 2.7+, this will become mostly
2787 Once Mercurial supports Python 2.7+, this will become mostly
2787 unnecessary.
2788 unnecessary.
2788 '''
2789 '''
2789
2790
2790 def __init__(self, *args):
2791 def __init__(self, *args):
2791 '''Accepts a list of no-argument functions that return context
2792 '''Accepts a list of no-argument functions that return context
2792 managers. These will be invoked at __call__ time.'''
2793 managers. These will be invoked at __call__ time.'''
2793 self._pending = args
2794 self._pending = args
2794 self._atexit = []
2795 self._atexit = []
2795
2796
2796 def __enter__(self):
2797 def __enter__(self):
2797 return self
2798 return self
2798
2799
2799 def enter(self):
2800 def enter(self):
2800 '''Create and enter context managers in the order in which they were
2801 '''Create and enter context managers in the order in which they were
2801 passed to the constructor.'''
2802 passed to the constructor.'''
2802 values = []
2803 values = []
2803 for func in self._pending:
2804 for func in self._pending:
2804 obj = func()
2805 obj = func()
2805 values.append(obj.__enter__())
2806 values.append(obj.__enter__())
2806 self._atexit.append(obj.__exit__)
2807 self._atexit.append(obj.__exit__)
2807 del self._pending
2808 del self._pending
2808 return values
2809 return values
2809
2810
2810 def atexit(self, func, *args, **kwargs):
2811 def atexit(self, func, *args, **kwargs):
2811 '''Add a function to call when this context manager exits. The
2812 '''Add a function to call when this context manager exits. The
2812 ordering of multiple atexit calls is unspecified, save that
2813 ordering of multiple atexit calls is unspecified, save that
2813 they will happen before any __exit__ functions.'''
2814 they will happen before any __exit__ functions.'''
2814 def wrapper(exc_type, exc_val, exc_tb):
2815 def wrapper(exc_type, exc_val, exc_tb):
2815 func(*args, **kwargs)
2816 func(*args, **kwargs)
2816 self._atexit.append(wrapper)
2817 self._atexit.append(wrapper)
2817 return func
2818 return func
2818
2819
2819 def __exit__(self, exc_type, exc_val, exc_tb):
2820 def __exit__(self, exc_type, exc_val, exc_tb):
2820 '''Context managers are exited in the reverse order from which
2821 '''Context managers are exited in the reverse order from which
2821 they were created.'''
2822 they were created.'''
2822 received = exc_type is not None
2823 received = exc_type is not None
2823 suppressed = False
2824 suppressed = False
2824 pending = None
2825 pending = None
2825 self._atexit.reverse()
2826 self._atexit.reverse()
2826 for exitfunc in self._atexit:
2827 for exitfunc in self._atexit:
2827 try:
2828 try:
2828 if exitfunc(exc_type, exc_val, exc_tb):
2829 if exitfunc(exc_type, exc_val, exc_tb):
2829 suppressed = True
2830 suppressed = True
2830 exc_type = None
2831 exc_type = None
2831 exc_val = None
2832 exc_val = None
2832 exc_tb = None
2833 exc_tb = None
2833 except BaseException:
2834 except BaseException:
2834 pending = sys.exc_info()
2835 pending = sys.exc_info()
2835 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2836 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2836 del self._atexit
2837 del self._atexit
2837 if pending:
2838 if pending:
2838 raise exc_val
2839 raise exc_val
2839 return received and suppressed
2840 return received and suppressed
2840
2841
2841 def _bz2():
2842 def _bz2():
2842 d = bz2.BZ2Decompressor()
2843 d = bz2.BZ2Decompressor()
2843 # Bzip2 stream start with BZ, but we stripped it.
2844 # Bzip2 stream start with BZ, but we stripped it.
2844 # we put it back for good measure.
2845 # we put it back for good measure.
2845 d.decompress('BZ')
2846 d.decompress('BZ')
2846 return d
2847 return d
2847
2848
2848 decompressors = {None: lambda fh: fh,
2849 decompressors = {None: lambda fh: fh,
2849 '_truncatedBZ': _makedecompressor(_bz2),
2850 '_truncatedBZ': _makedecompressor(_bz2),
2850 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2851 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2851 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2852 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2852 }
2853 }
2853 # also support the old form by courtesies
2854 # also support the old form by courtesies
2854 decompressors['UN'] = decompressors[None]
2855 decompressors['UN'] = decompressors[None]
2855
2856
2856 # convenient shortcut
2857 # convenient shortcut
2857 dst = debugstacktrace
2858 dst = debugstacktrace
@@ -1,473 +1,476
1 # windows.py - Windows utility function implementations for Mercurial
1 # windows.py - Windows utility function implementations for Mercurial
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import _winreg
10 import _winreg
11 import errno
11 import errno
12 import msvcrt
12 import msvcrt
13 import os
13 import os
14 import re
14 import re
15 import stat
15 import stat
16 import sys
16 import sys
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 encoding,
20 encoding,
21 osutil,
21 osutil,
22 win32,
22 win32,
23 )
23 )
24
24
25 executablepath = win32.executablepath
25 executablepath = win32.executablepath
26 getuser = win32.getuser
26 getuser = win32.getuser
27 hidewindow = win32.hidewindow
27 hidewindow = win32.hidewindow
28 makedir = win32.makedir
28 makedir = win32.makedir
29 nlinks = win32.nlinks
29 nlinks = win32.nlinks
30 oslink = win32.oslink
30 oslink = win32.oslink
31 samedevice = win32.samedevice
31 samedevice = win32.samedevice
32 samefile = win32.samefile
32 samefile = win32.samefile
33 setsignalhandler = win32.setsignalhandler
33 setsignalhandler = win32.setsignalhandler
34 spawndetached = win32.spawndetached
34 spawndetached = win32.spawndetached
35 split = os.path.split
35 split = os.path.split
36 termwidth = win32.termwidth
36 termwidth = win32.termwidth
37 testpid = win32.testpid
37 testpid = win32.testpid
38 unlink = win32.unlink
38 unlink = win32.unlink
39
39
40 umask = 0o022
40 umask = 0o022
41
41
42 class mixedfilemodewrapper(object):
42 class mixedfilemodewrapper(object):
43 """Wraps a file handle when it is opened in read/write mode.
43 """Wraps a file handle when it is opened in read/write mode.
44
44
45 fopen() and fdopen() on Windows have a specific-to-Windows requirement
45 fopen() and fdopen() on Windows have a specific-to-Windows requirement
46 that files opened with mode r+, w+, or a+ make a call to a file positioning
46 that files opened with mode r+, w+, or a+ make a call to a file positioning
47 function when switching between reads and writes. Without this extra call,
47 function when switching between reads and writes. Without this extra call,
48 Python will raise a not very intuitive "IOError: [Errno 0] Error."
48 Python will raise a not very intuitive "IOError: [Errno 0] Error."
49
49
50 This class wraps posixfile instances when the file is opened in read/write
50 This class wraps posixfile instances when the file is opened in read/write
51 mode and automatically adds checks or inserts appropriate file positioning
51 mode and automatically adds checks or inserts appropriate file positioning
52 calls when necessary.
52 calls when necessary.
53 """
53 """
54 OPNONE = 0
54 OPNONE = 0
55 OPREAD = 1
55 OPREAD = 1
56 OPWRITE = 2
56 OPWRITE = 2
57
57
58 def __init__(self, fp):
58 def __init__(self, fp):
59 object.__setattr__(self, '_fp', fp)
59 object.__setattr__(self, '_fp', fp)
60 object.__setattr__(self, '_lastop', 0)
60 object.__setattr__(self, '_lastop', 0)
61
61
62 def __getattr__(self, name):
62 def __getattr__(self, name):
63 return getattr(self._fp, name)
63 return getattr(self._fp, name)
64
64
65 def __setattr__(self, name, value):
65 def __setattr__(self, name, value):
66 return self._fp.__setattr__(name, value)
66 return self._fp.__setattr__(name, value)
67
67
68 def _noopseek(self):
68 def _noopseek(self):
69 self._fp.seek(0, os.SEEK_CUR)
69 self._fp.seek(0, os.SEEK_CUR)
70
70
71 def seek(self, *args, **kwargs):
71 def seek(self, *args, **kwargs):
72 object.__setattr__(self, '_lastop', self.OPNONE)
72 object.__setattr__(self, '_lastop', self.OPNONE)
73 return self._fp.seek(*args, **kwargs)
73 return self._fp.seek(*args, **kwargs)
74
74
75 def write(self, d):
75 def write(self, d):
76 if self._lastop == self.OPREAD:
76 if self._lastop == self.OPREAD:
77 self._noopseek()
77 self._noopseek()
78
78
79 object.__setattr__(self, '_lastop', self.OPWRITE)
79 object.__setattr__(self, '_lastop', self.OPWRITE)
80 return self._fp.write(d)
80 return self._fp.write(d)
81
81
82 def writelines(self, *args, **kwargs):
82 def writelines(self, *args, **kwargs):
83 if self._lastop == self.OPREAD:
83 if self._lastop == self.OPREAD:
84 self._noopeseek()
84 self._noopeseek()
85
85
86 object.__setattr__(self, '_lastop', self.OPWRITE)
86 object.__setattr__(self, '_lastop', self.OPWRITE)
87 return self._fp.writelines(*args, **kwargs)
87 return self._fp.writelines(*args, **kwargs)
88
88
89 def read(self, *args, **kwargs):
89 def read(self, *args, **kwargs):
90 if self._lastop == self.OPWRITE:
90 if self._lastop == self.OPWRITE:
91 self._noopseek()
91 self._noopseek()
92
92
93 object.__setattr__(self, '_lastop', self.OPREAD)
93 object.__setattr__(self, '_lastop', self.OPREAD)
94 return self._fp.read(*args, **kwargs)
94 return self._fp.read(*args, **kwargs)
95
95
96 def readline(self, *args, **kwargs):
96 def readline(self, *args, **kwargs):
97 if self._lastop == self.OPWRITE:
97 if self._lastop == self.OPWRITE:
98 self._noopseek()
98 self._noopseek()
99
99
100 object.__setattr__(self, '_lastop', self.OPREAD)
100 object.__setattr__(self, '_lastop', self.OPREAD)
101 return self._fp.readline(*args, **kwargs)
101 return self._fp.readline(*args, **kwargs)
102
102
103 def readlines(self, *args, **kwargs):
103 def readlines(self, *args, **kwargs):
104 if self._lastop == self.OPWRITE:
104 if self._lastop == self.OPWRITE:
105 self._noopseek()
105 self._noopseek()
106
106
107 object.__setattr__(self, '_lastop', self.OPREAD)
107 object.__setattr__(self, '_lastop', self.OPREAD)
108 return self._fp.readlines(*args, **kwargs)
108 return self._fp.readlines(*args, **kwargs)
109
109
110 def posixfile(name, mode='r', buffering=-1):
110 def posixfile(name, mode='r', buffering=-1):
111 '''Open a file with even more POSIX-like semantics'''
111 '''Open a file with even more POSIX-like semantics'''
112 try:
112 try:
113 fp = osutil.posixfile(name, mode, buffering) # may raise WindowsError
113 fp = osutil.posixfile(name, mode, buffering) # may raise WindowsError
114
114
115 # The position when opening in append mode is implementation defined, so
115 # The position when opening in append mode is implementation defined, so
116 # make it consistent with other platforms, which position at EOF.
116 # make it consistent with other platforms, which position at EOF.
117 if 'a' in mode:
117 if 'a' in mode:
118 fp.seek(0, os.SEEK_END)
118 fp.seek(0, os.SEEK_END)
119
119
120 if '+' in mode:
120 if '+' in mode:
121 return mixedfilemodewrapper(fp)
121 return mixedfilemodewrapper(fp)
122
122
123 return fp
123 return fp
124 except WindowsError as err:
124 except WindowsError as err:
125 # convert to a friendlier exception
125 # convert to a friendlier exception
126 raise IOError(err.errno, '%s: %s' % (name, err.strerror))
126 raise IOError(err.errno, '%s: %s' % (name, err.strerror))
127
127
128 class winstdout(object):
128 class winstdout(object):
129 '''stdout on windows misbehaves if sent through a pipe'''
129 '''stdout on windows misbehaves if sent through a pipe'''
130
130
131 def __init__(self, fp):
131 def __init__(self, fp):
132 self.fp = fp
132 self.fp = fp
133
133
134 def __getattr__(self, key):
134 def __getattr__(self, key):
135 return getattr(self.fp, key)
135 return getattr(self.fp, key)
136
136
137 def close(self):
137 def close(self):
138 try:
138 try:
139 self.fp.close()
139 self.fp.close()
140 except IOError:
140 except IOError:
141 pass
141 pass
142
142
143 def write(self, s):
143 def write(self, s):
144 try:
144 try:
145 # This is workaround for "Not enough space" error on
145 # This is workaround for "Not enough space" error on
146 # writing large size of data to console.
146 # writing large size of data to console.
147 limit = 16000
147 limit = 16000
148 l = len(s)
148 l = len(s)
149 start = 0
149 start = 0
150 self.softspace = 0
150 self.softspace = 0
151 while start < l:
151 while start < l:
152 end = start + limit
152 end = start + limit
153 self.fp.write(s[start:end])
153 self.fp.write(s[start:end])
154 start = end
154 start = end
155 except IOError as inst:
155 except IOError as inst:
156 if inst.errno != 0:
156 if inst.errno != 0:
157 raise
157 raise
158 self.close()
158 self.close()
159 raise IOError(errno.EPIPE, 'Broken pipe')
159 raise IOError(errno.EPIPE, 'Broken pipe')
160
160
161 def flush(self):
161 def flush(self):
162 try:
162 try:
163 return self.fp.flush()
163 return self.fp.flush()
164 except IOError as inst:
164 except IOError as inst:
165 if inst.errno != errno.EINVAL:
165 if inst.errno != errno.EINVAL:
166 raise
166 raise
167 self.close()
167 self.close()
168 raise IOError(errno.EPIPE, 'Broken pipe')
168 raise IOError(errno.EPIPE, 'Broken pipe')
169
169
170 sys.__stdout__ = sys.stdout = winstdout(sys.stdout)
170 sys.__stdout__ = sys.stdout = winstdout(sys.stdout)
171
171
172 def _is_win_9x():
172 def _is_win_9x():
173 '''return true if run on windows 95, 98 or me.'''
173 '''return true if run on windows 95, 98 or me.'''
174 try:
174 try:
175 return sys.getwindowsversion()[3] == 1
175 return sys.getwindowsversion()[3] == 1
176 except AttributeError:
176 except AttributeError:
177 return 'command' in os.environ.get('comspec', '')
177 return 'command' in os.environ.get('comspec', '')
178
178
179 def openhardlinks():
179 def openhardlinks():
180 return not _is_win_9x()
180 return not _is_win_9x()
181
181
182 def parsepatchoutput(output_line):
182 def parsepatchoutput(output_line):
183 """parses the output produced by patch and returns the filename"""
183 """parses the output produced by patch and returns the filename"""
184 pf = output_line[14:]
184 pf = output_line[14:]
185 if pf[0] == '`':
185 if pf[0] == '`':
186 pf = pf[1:-1] # Remove the quotes
186 pf = pf[1:-1] # Remove the quotes
187 return pf
187 return pf
188
188
189 def sshargs(sshcmd, host, user, port):
189 def sshargs(sshcmd, host, user, port):
190 '''Build argument list for ssh or Plink'''
190 '''Build argument list for ssh or Plink'''
191 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
191 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
192 args = user and ("%s@%s" % (user, host)) or host
192 args = user and ("%s@%s" % (user, host)) or host
193 return port and ("%s %s %s" % (args, pflag, port)) or args
193 return port and ("%s %s %s" % (args, pflag, port)) or args
194
194
195 def setflags(f, l, x):
195 def setflags(f, l, x):
196 pass
196 pass
197
197
198 def copymode(src, dst, mode=None):
198 def copymode(src, dst, mode=None):
199 pass
199 pass
200
200
201 def checkexec(path):
201 def checkexec(path):
202 return False
202 return False
203
203
204 def checklink(path):
204 def checklink(path):
205 return False
205 return False
206
206
207 def setbinary(fd):
207 def setbinary(fd):
208 # When run without console, pipes may expose invalid
208 # When run without console, pipes may expose invalid
209 # fileno(), usually set to -1.
209 # fileno(), usually set to -1.
210 fno = getattr(fd, 'fileno', None)
210 fno = getattr(fd, 'fileno', None)
211 if fno is not None and fno() >= 0:
211 if fno is not None and fno() >= 0:
212 msvcrt.setmode(fno(), os.O_BINARY)
212 msvcrt.setmode(fno(), os.O_BINARY)
213
213
214 def pconvert(path):
214 def pconvert(path):
215 return path.replace(os.sep, '/')
215 return path.replace(os.sep, '/')
216
216
217 def localpath(path):
217 def localpath(path):
218 return path.replace('/', '\\')
218 return path.replace('/', '\\')
219
219
220 def normpath(path):
220 def normpath(path):
221 return pconvert(os.path.normpath(path))
221 return pconvert(os.path.normpath(path))
222
222
223 def normcase(path):
223 def normcase(path):
224 return encoding.upper(path) # NTFS compares via upper()
224 return encoding.upper(path) # NTFS compares via upper()
225
225
226 # see posix.py for definitions
226 # see posix.py for definitions
227 normcasespec = encoding.normcasespecs.upper
227 normcasespec = encoding.normcasespecs.upper
228 normcasefallback = encoding.upperfallback
228 normcasefallback = encoding.upperfallback
229
229
230 def samestat(s1, s2):
230 def samestat(s1, s2):
231 return False
231 return False
232
232
233 # A sequence of backslashes is special iff it precedes a double quote:
233 # A sequence of backslashes is special iff it precedes a double quote:
234 # - if there's an even number of backslashes, the double quote is not
234 # - if there's an even number of backslashes, the double quote is not
235 # quoted (i.e. it ends the quoted region)
235 # quoted (i.e. it ends the quoted region)
236 # - if there's an odd number of backslashes, the double quote is quoted
236 # - if there's an odd number of backslashes, the double quote is quoted
237 # - in both cases, every pair of backslashes is unquoted into a single
237 # - in both cases, every pair of backslashes is unquoted into a single
238 # backslash
238 # backslash
239 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
239 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
240 # So, to quote a string, we must surround it in double quotes, double
240 # So, to quote a string, we must surround it in double quotes, double
241 # the number of backslashes that precede double quotes and add another
241 # the number of backslashes that precede double quotes and add another
242 # backslash before every double quote (being careful with the double
242 # backslash before every double quote (being careful with the double
243 # quote we've appended to the end)
243 # quote we've appended to the end)
244 _quotere = None
244 _quotere = None
245 _needsshellquote = None
245 _needsshellquote = None
246 def shellquote(s):
246 def shellquote(s):
247 r"""
247 r"""
248 >>> shellquote(r'C:\Users\xyz')
248 >>> shellquote(r'C:\Users\xyz')
249 '"C:\\Users\\xyz"'
249 '"C:\\Users\\xyz"'
250 >>> shellquote(r'C:\Users\xyz/mixed')
250 >>> shellquote(r'C:\Users\xyz/mixed')
251 '"C:\\Users\\xyz/mixed"'
251 '"C:\\Users\\xyz/mixed"'
252 >>> # Would be safe not to quote too, since it is all double backslashes
252 >>> # Would be safe not to quote too, since it is all double backslashes
253 >>> shellquote(r'C:\\Users\\xyz')
253 >>> shellquote(r'C:\\Users\\xyz')
254 '"C:\\\\Users\\\\xyz"'
254 '"C:\\\\Users\\\\xyz"'
255 >>> # But this must be quoted
255 >>> # But this must be quoted
256 >>> shellquote(r'C:\\Users\\xyz/abc')
256 >>> shellquote(r'C:\\Users\\xyz/abc')
257 '"C:\\\\Users\\\\xyz/abc"'
257 '"C:\\\\Users\\\\xyz/abc"'
258 """
258 """
259 global _quotere
259 global _quotere
260 if _quotere is None:
260 if _quotere is None:
261 _quotere = re.compile(r'(\\*)("|\\$)')
261 _quotere = re.compile(r'(\\*)("|\\$)')
262 global _needsshellquote
262 global _needsshellquote
263 if _needsshellquote is None:
263 if _needsshellquote is None:
264 # ":" is also treated as "safe character", because it is used as a part
264 # ":" is also treated as "safe character", because it is used as a part
265 # of path name on Windows. "\" is also part of a path name, but isn't
265 # of path name on Windows. "\" is also part of a path name, but isn't
266 # safe because shlex.split() (kind of) treats it as an escape char and
266 # safe because shlex.split() (kind of) treats it as an escape char and
267 # drops it. It will leave the next character, even if it is another
267 # drops it. It will leave the next character, even if it is another
268 # "\".
268 # "\".
269 _needsshellquote = re.compile(r'[^a-zA-Z0-9._:/-]').search
269 _needsshellquote = re.compile(r'[^a-zA-Z0-9._:/-]').search
270 if s and not _needsshellquote(s) and not _quotere.search(s):
270 if s and not _needsshellquote(s) and not _quotere.search(s):
271 # "s" shouldn't have to be quoted
271 # "s" shouldn't have to be quoted
272 return s
272 return s
273 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
273 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
274
274
275 def quotecommand(cmd):
275 def quotecommand(cmd):
276 """Build a command string suitable for os.popen* calls."""
276 """Build a command string suitable for os.popen* calls."""
277 if sys.version_info < (2, 7, 1):
277 if sys.version_info < (2, 7, 1):
278 # Python versions since 2.7.1 do this extra quoting themselves
278 # Python versions since 2.7.1 do this extra quoting themselves
279 return '"' + cmd + '"'
279 return '"' + cmd + '"'
280 return cmd
280 return cmd
281
281
282 def popen(command, mode='r'):
282 def popen(command, mode='r'):
283 # Work around "popen spawned process may not write to stdout
283 # Work around "popen spawned process may not write to stdout
284 # under windows"
284 # under windows"
285 # http://bugs.python.org/issue1366
285 # http://bugs.python.org/issue1366
286 command += " 2> %s" % os.devnull
286 command += " 2> %s" % os.devnull
287 return os.popen(quotecommand(command), mode)
287 return os.popen(quotecommand(command), mode)
288
288
289 def explainexit(code):
289 def explainexit(code):
290 return _("exited with status %d") % code, code
290 return _("exited with status %d") % code, code
291
291
292 # if you change this stub into a real check, please try to implement the
292 # if you change this stub into a real check, please try to implement the
293 # username and groupname functions above, too.
293 # username and groupname functions above, too.
294 def isowner(st):
294 def isowner(st):
295 return True
295 return True
296
296
297 def findexe(command):
297 def findexe(command):
298 '''Find executable for command searching like cmd.exe does.
298 '''Find executable for command searching like cmd.exe does.
299 If command is a basename then PATH is searched for command.
299 If command is a basename then PATH is searched for command.
300 PATH isn't searched if command is an absolute or relative path.
300 PATH isn't searched if command is an absolute or relative path.
301 An extension from PATHEXT is found and added if not present.
301 An extension from PATHEXT is found and added if not present.
302 If command isn't found None is returned.'''
302 If command isn't found None is returned.'''
303 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
303 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
304 pathexts = [ext for ext in pathext.lower().split(os.pathsep)]
304 pathexts = [ext for ext in pathext.lower().split(os.pathsep)]
305 if os.path.splitext(command)[1].lower() in pathexts:
305 if os.path.splitext(command)[1].lower() in pathexts:
306 pathexts = ['']
306 pathexts = ['']
307
307
308 def findexisting(pathcommand):
308 def findexisting(pathcommand):
309 'Will append extension (if needed) and return existing file'
309 'Will append extension (if needed) and return existing file'
310 for ext in pathexts:
310 for ext in pathexts:
311 executable = pathcommand + ext
311 executable = pathcommand + ext
312 if os.path.exists(executable):
312 if os.path.exists(executable):
313 return executable
313 return executable
314 return None
314 return None
315
315
316 if os.sep in command:
316 if os.sep in command:
317 return findexisting(command)
317 return findexisting(command)
318
318
319 for path in os.environ.get('PATH', '').split(os.pathsep):
319 for path in os.environ.get('PATH', '').split(os.pathsep):
320 executable = findexisting(os.path.join(path, command))
320 executable = findexisting(os.path.join(path, command))
321 if executable is not None:
321 if executable is not None:
322 return executable
322 return executable
323 return findexisting(os.path.expanduser(os.path.expandvars(command)))
323 return findexisting(os.path.expanduser(os.path.expandvars(command)))
324
324
325 _wantedkinds = set([stat.S_IFREG, stat.S_IFLNK])
325 _wantedkinds = set([stat.S_IFREG, stat.S_IFLNK])
326
326
327 def statfiles(files):
327 def statfiles(files):
328 '''Stat each file in files. Yield each stat, or None if a file
328 '''Stat each file in files. Yield each stat, or None if a file
329 does not exist or has a type we don't care about.
329 does not exist or has a type we don't care about.
330
330
331 Cluster and cache stat per directory to minimize number of OS stat calls.'''
331 Cluster and cache stat per directory to minimize number of OS stat calls.'''
332 dircache = {} # dirname -> filename -> status | None if file does not exist
332 dircache = {} # dirname -> filename -> status | None if file does not exist
333 getkind = stat.S_IFMT
333 getkind = stat.S_IFMT
334 for nf in files:
334 for nf in files:
335 nf = normcase(nf)
335 nf = normcase(nf)
336 dir, base = os.path.split(nf)
336 dir, base = os.path.split(nf)
337 if not dir:
337 if not dir:
338 dir = '.'
338 dir = '.'
339 cache = dircache.get(dir, None)
339 cache = dircache.get(dir, None)
340 if cache is None:
340 if cache is None:
341 try:
341 try:
342 dmap = dict([(normcase(n), s)
342 dmap = dict([(normcase(n), s)
343 for n, k, s in osutil.listdir(dir, True)
343 for n, k, s in osutil.listdir(dir, True)
344 if getkind(s.st_mode) in _wantedkinds])
344 if getkind(s.st_mode) in _wantedkinds])
345 except OSError as err:
345 except OSError as err:
346 # Python >= 2.5 returns ENOENT and adds winerror field
346 # Python >= 2.5 returns ENOENT and adds winerror field
347 # EINVAL is raised if dir is not a directory.
347 # EINVAL is raised if dir is not a directory.
348 if err.errno not in (errno.ENOENT, errno.EINVAL,
348 if err.errno not in (errno.ENOENT, errno.EINVAL,
349 errno.ENOTDIR):
349 errno.ENOTDIR):
350 raise
350 raise
351 dmap = {}
351 dmap = {}
352 cache = dircache.setdefault(dir, dmap)
352 cache = dircache.setdefault(dir, dmap)
353 yield cache.get(base, None)
353 yield cache.get(base, None)
354
354
355 def username(uid=None):
355 def username(uid=None):
356 """Return the name of the user with the given uid.
356 """Return the name of the user with the given uid.
357
357
358 If uid is None, return the name of the current user."""
358 If uid is None, return the name of the current user."""
359 return None
359 return None
360
360
361 def groupname(gid=None):
361 def groupname(gid=None):
362 """Return the name of the group with the given gid.
362 """Return the name of the group with the given gid.
363
363
364 If gid is None, return the name of the current group."""
364 If gid is None, return the name of the current group."""
365 return None
365 return None
366
366
367 def removedirs(name):
367 def removedirs(name):
368 """special version of os.removedirs that does not remove symlinked
368 """special version of os.removedirs that does not remove symlinked
369 directories or junction points if they actually contain files"""
369 directories or junction points if they actually contain files"""
370 if osutil.listdir(name):
370 if osutil.listdir(name):
371 return
371 return
372 os.rmdir(name)
372 os.rmdir(name)
373 head, tail = os.path.split(name)
373 head, tail = os.path.split(name)
374 if not tail:
374 if not tail:
375 head, tail = os.path.split(head)
375 head, tail = os.path.split(head)
376 while head and tail:
376 while head and tail:
377 try:
377 try:
378 if osutil.listdir(head):
378 if osutil.listdir(head):
379 return
379 return
380 os.rmdir(head)
380 os.rmdir(head)
381 except (ValueError, OSError):
381 except (ValueError, OSError):
382 break
382 break
383 head, tail = os.path.split(head)
383 head, tail = os.path.split(head)
384
384
385 def unlinkpath(f, ignoremissing=False):
385 def unlinkpath(f, ignoremissing=False):
386 """unlink and remove the directory if it is empty"""
386 """unlink and remove the directory if it is empty"""
387 try:
387 try:
388 unlink(f)
388 unlink(f)
389 except OSError as e:
389 except OSError as e:
390 if not (ignoremissing and e.errno == errno.ENOENT):
390 if not (ignoremissing and e.errno == errno.ENOENT):
391 raise
391 raise
392 # try removing directories that might now be empty
392 # try removing directories that might now be empty
393 try:
393 try:
394 removedirs(os.path.dirname(f))
394 removedirs(os.path.dirname(f))
395 except OSError:
395 except OSError:
396 pass
396 pass
397
397
398 def rename(src, dst):
398 def rename(src, dst):
399 '''atomically rename file src to dst, replacing dst if it exists'''
399 '''atomically rename file src to dst, replacing dst if it exists'''
400 try:
400 try:
401 os.rename(src, dst)
401 os.rename(src, dst)
402 except OSError as e:
402 except OSError as e:
403 if e.errno != errno.EEXIST:
403 if e.errno != errno.EEXIST:
404 raise
404 raise
405 unlink(dst)
405 unlink(dst)
406 os.rename(src, dst)
406 os.rename(src, dst)
407
407
408 def gethgcmd():
408 def gethgcmd():
409 return [sys.executable] + sys.argv[:1]
409 return [sys.executable] + sys.argv[:1]
410
410
411 def groupmembers(name):
411 def groupmembers(name):
412 # Don't support groups on Windows for now
412 # Don't support groups on Windows for now
413 raise KeyError
413 raise KeyError
414
414
415 def isexec(f):
415 def isexec(f):
416 return False
416 return False
417
417
418 class cachestat(object):
418 class cachestat(object):
419 def __init__(self, path):
419 def __init__(self, path):
420 pass
420 pass
421
421
422 def cacheable(self):
422 def cacheable(self):
423 return False
423 return False
424
424
425 def lookupreg(key, valname=None, scope=None):
425 def lookupreg(key, valname=None, scope=None):
426 ''' Look up a key/value name in the Windows registry.
426 ''' Look up a key/value name in the Windows registry.
427
427
428 valname: value name. If unspecified, the default value for the key
428 valname: value name. If unspecified, the default value for the key
429 is used.
429 is used.
430 scope: optionally specify scope for registry lookup, this can be
430 scope: optionally specify scope for registry lookup, this can be
431 a sequence of scopes to look up in order. Default (CURRENT_USER,
431 a sequence of scopes to look up in order. Default (CURRENT_USER,
432 LOCAL_MACHINE).
432 LOCAL_MACHINE).
433 '''
433 '''
434 if scope is None:
434 if scope is None:
435 scope = (_winreg.HKEY_CURRENT_USER, _winreg.HKEY_LOCAL_MACHINE)
435 scope = (_winreg.HKEY_CURRENT_USER, _winreg.HKEY_LOCAL_MACHINE)
436 elif not isinstance(scope, (list, tuple)):
436 elif not isinstance(scope, (list, tuple)):
437 scope = (scope,)
437 scope = (scope,)
438 for s in scope:
438 for s in scope:
439 try:
439 try:
440 val = _winreg.QueryValueEx(_winreg.OpenKey(s, key), valname)[0]
440 val = _winreg.QueryValueEx(_winreg.OpenKey(s, key), valname)[0]
441 # never let a Unicode string escape into the wild
441 # never let a Unicode string escape into the wild
442 return encoding.tolocal(val.encode('UTF-8'))
442 return encoding.tolocal(val.encode('UTF-8'))
443 except EnvironmentError:
443 except EnvironmentError:
444 pass
444 pass
445
445
446 expandglobs = True
446 expandglobs = True
447
447
448 def statislink(st):
448 def statislink(st):
449 '''check whether a stat result is a symlink'''
449 '''check whether a stat result is a symlink'''
450 return False
450 return False
451
451
452 def statisexec(st):
452 def statisexec(st):
453 '''check whether a stat result is an executable file'''
453 '''check whether a stat result is an executable file'''
454 return False
454 return False
455
455
456 def poll(fds):
456 def poll(fds):
457 # see posix.py for description
457 # see posix.py for description
458 raise NotImplementedError()
458 raise NotImplementedError()
459
459
460 def readpipe(pipe):
460 def readpipe(pipe):
461 """Read all available data from a pipe."""
461 """Read all available data from a pipe."""
462 chunks = []
462 chunks = []
463 while True:
463 while True:
464 size = win32.peekpipe(pipe)
464 size = win32.peekpipe(pipe)
465 if not size:
465 if not size:
466 break
466 break
467
467
468 s = pipe.read(size)
468 s = pipe.read(size)
469 if not s:
469 if not s:
470 break
470 break
471 chunks.append(s)
471 chunks.append(s)
472
472
473 return ''.join(chunks)
473 return ''.join(chunks)
474
475 def bindunixsocket(sock, path):
476 raise NotImplementedError('unsupported platform')
General Comments 0
You need to be logged in to leave comments. Login now