##// END OF EJS Templates
chgserver: extract utility to bind unix domain socket to long path...
Yuya Nishihara -
r29530:3239e2fd default
parent child Browse files
Show More
@@ -1,675 +1,664
1 1 # chgserver.py - command server extension for cHg
2 2 #
3 3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """command server extension for cHg (EXPERIMENTAL)
9 9
10 10 'S' channel (read/write)
11 11 propagate ui.system() request to client
12 12
13 13 'attachio' command
14 14 attach client's stdio passed by sendmsg()
15 15
16 16 'chdir' command
17 17 change current directory
18 18
19 19 'getpager' command
20 20 checks if pager is enabled and which pager should be executed
21 21
22 22 'setenv' command
23 23 replace os.environ completely
24 24
25 25 'setumask' command
26 26 set umask
27 27
28 28 'validate' command
29 29 reload the config and check if the server is up to date
30 30
31 31 Config
32 32 ------
33 33
34 34 ::
35 35
36 36 [chgserver]
37 37 idletimeout = 3600 # seconds, after which an idle server will exit
38 38 skiphash = False # whether to skip config or env change checks
39 39 """
40 40
41 41 from __future__ import absolute_import
42 42
43 43 import errno
44 44 import hashlib
45 45 import inspect
46 46 import os
47 47 import re
48 48 import signal
49 49 import struct
50 50 import sys
51 51 import threading
52 52 import time
53 53
54 54 from mercurial.i18n import _
55 55
56 56 from mercurial import (
57 57 cmdutil,
58 58 commands,
59 59 commandserver,
60 60 dispatch,
61 61 error,
62 62 extensions,
63 63 osutil,
64 64 util,
65 65 )
66 66
67 67 socketserver = util.socketserver
68 68
69 69 # Note for extension authors: ONLY specify testedwith = 'internal' for
70 70 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
71 71 # be specifying the version(s) of Mercurial they are tested with, or
72 72 # leave the attribute unspecified.
73 73 testedwith = 'internal'
74 74
75 75 _log = commandserver.log
76 76
77 77 def _hashlist(items):
78 78 """return sha1 hexdigest for a list"""
79 79 return hashlib.sha1(str(items)).hexdigest()
80 80
81 81 # sensitive config sections affecting confighash
82 82 _configsections = [
83 83 'alias', # affects global state commands.table
84 84 'extdiff', # uisetup will register new commands
85 85 'extensions',
86 86 ]
87 87
88 88 # sensitive environment variables affecting confighash
89 89 _envre = re.compile(r'''\A(?:
90 90 CHGHG
91 91 |HG.*
92 92 |LANG(?:UAGE)?
93 93 |LC_.*
94 94 |LD_.*
95 95 |PATH
96 96 |PYTHON.*
97 97 |TERM(?:INFO)?
98 98 |TZ
99 99 )\Z''', re.X)
100 100
101 101 def _confighash(ui):
102 102 """return a quick hash for detecting config/env changes
103 103
104 104 confighash is the hash of sensitive config items and environment variables.
105 105
106 106 for chgserver, it is designed that once confighash changes, the server is
107 107 not qualified to serve its client and should redirect the client to a new
108 108 server. different from mtimehash, confighash change will not mark the
109 109 server outdated and exit since the user can have different configs at the
110 110 same time.
111 111 """
112 112 sectionitems = []
113 113 for section in _configsections:
114 114 sectionitems.append(ui.configitems(section))
115 115 sectionhash = _hashlist(sectionitems)
116 116 envitems = [(k, v) for k, v in os.environ.iteritems() if _envre.match(k)]
117 117 envhash = _hashlist(sorted(envitems))
118 118 return sectionhash[:6] + envhash[:6]
119 119
120 120 def _getmtimepaths(ui):
121 121 """get a list of paths that should be checked to detect change
122 122
123 123 The list will include:
124 124 - extensions (will not cover all files for complex extensions)
125 125 - mercurial/__version__.py
126 126 - python binary
127 127 """
128 128 modules = [m for n, m in extensions.extensions(ui)]
129 129 try:
130 130 from mercurial import __version__
131 131 modules.append(__version__)
132 132 except ImportError:
133 133 pass
134 134 files = [sys.executable]
135 135 for m in modules:
136 136 try:
137 137 files.append(inspect.getabsfile(m))
138 138 except TypeError:
139 139 pass
140 140 return sorted(set(files))
141 141
142 142 def _mtimehash(paths):
143 143 """return a quick hash for detecting file changes
144 144
145 145 mtimehash calls stat on given paths and calculate a hash based on size and
146 146 mtime of each file. mtimehash does not read file content because reading is
147 147 expensive. therefore it's not 100% reliable for detecting content changes.
148 148 it's possible to return different hashes for same file contents.
149 149 it's also possible to return a same hash for different file contents for
150 150 some carefully crafted situation.
151 151
152 152 for chgserver, it is designed that once mtimehash changes, the server is
153 153 considered outdated immediately and should no longer provide service.
154 154
155 155 mtimehash is not included in confighash because we only know the paths of
156 156 extensions after importing them (there is imp.find_module but that faces
157 157 race conditions). We need to calculate confighash without importing.
158 158 """
159 159 def trystat(path):
160 160 try:
161 161 st = os.stat(path)
162 162 return (st.st_mtime, st.st_size)
163 163 except OSError:
164 164 # could be ENOENT, EPERM etc. not fatal in any case
165 165 pass
166 166 return _hashlist(map(trystat, paths))[:12]
167 167
168 168 class hashstate(object):
169 169 """a structure storing confighash, mtimehash, paths used for mtimehash"""
170 170 def __init__(self, confighash, mtimehash, mtimepaths):
171 171 self.confighash = confighash
172 172 self.mtimehash = mtimehash
173 173 self.mtimepaths = mtimepaths
174 174
175 175 @staticmethod
176 176 def fromui(ui, mtimepaths=None):
177 177 if mtimepaths is None:
178 178 mtimepaths = _getmtimepaths(ui)
179 179 confighash = _confighash(ui)
180 180 mtimehash = _mtimehash(mtimepaths)
181 181 _log('confighash = %s mtimehash = %s\n' % (confighash, mtimehash))
182 182 return hashstate(confighash, mtimehash, mtimepaths)
183 183
184 184 # copied from hgext/pager.py:uisetup()
185 185 def _setuppagercmd(ui, options, cmd):
186 186 if not ui.formatted():
187 187 return
188 188
189 189 p = ui.config("pager", "pager", os.environ.get("PAGER"))
190 190 usepager = False
191 191 always = util.parsebool(options['pager'])
192 192 auto = options['pager'] == 'auto'
193 193
194 194 if not p:
195 195 pass
196 196 elif always:
197 197 usepager = True
198 198 elif not auto:
199 199 usepager = False
200 200 else:
201 201 attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff']
202 202 attend = ui.configlist('pager', 'attend', attended)
203 203 ignore = ui.configlist('pager', 'ignore')
204 204 cmds, _ = cmdutil.findcmd(cmd, commands.table)
205 205
206 206 for cmd in cmds:
207 207 var = 'attend-%s' % cmd
208 208 if ui.config('pager', var):
209 209 usepager = ui.configbool('pager', var)
210 210 break
211 211 if (cmd in attend or
212 212 (cmd not in ignore and not attend)):
213 213 usepager = True
214 214 break
215 215
216 216 if usepager:
217 217 ui.setconfig('ui', 'formatted', ui.formatted(), 'pager')
218 218 ui.setconfig('ui', 'interactive', False, 'pager')
219 219 return p
220 220
221 221 def _newchgui(srcui, csystem):
222 222 class chgui(srcui.__class__):
223 223 def __init__(self, src=None):
224 224 super(chgui, self).__init__(src)
225 225 if src:
226 226 self._csystem = getattr(src, '_csystem', csystem)
227 227 else:
228 228 self._csystem = csystem
229 229
230 230 def system(self, cmd, environ=None, cwd=None, onerr=None,
231 231 errprefix=None):
232 232 # fallback to the original system method if the output needs to be
233 233 # captured (to self._buffers), or the output stream is not stdout
234 234 # (e.g. stderr, cStringIO), because the chg client is not aware of
235 235 # these situations and will behave differently (write to stdout).
236 236 if (any(s[1] for s in self._bufferstates)
237 237 or not util.safehasattr(self.fout, 'fileno')
238 238 or self.fout.fileno() != sys.stdout.fileno()):
239 239 return super(chgui, self).system(cmd, environ, cwd, onerr,
240 240 errprefix)
241 241 # copied from mercurial/util.py:system()
242 242 self.flush()
243 243 def py2shell(val):
244 244 if val is None or val is False:
245 245 return '0'
246 246 if val is True:
247 247 return '1'
248 248 return str(val)
249 249 env = os.environ.copy()
250 250 if environ:
251 251 env.update((k, py2shell(v)) for k, v in environ.iteritems())
252 252 env['HG'] = util.hgexecutable()
253 253 rc = self._csystem(cmd, env, cwd)
254 254 if rc and onerr:
255 255 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
256 256 util.explainexit(rc)[0])
257 257 if errprefix:
258 258 errmsg = '%s: %s' % (errprefix, errmsg)
259 259 raise onerr(errmsg)
260 260 return rc
261 261
262 262 return chgui(srcui)
263 263
264 264 def _loadnewui(srcui, args):
265 265 newui = srcui.__class__()
266 266 for a in ['fin', 'fout', 'ferr', 'environ']:
267 267 setattr(newui, a, getattr(srcui, a))
268 268 if util.safehasattr(srcui, '_csystem'):
269 269 newui._csystem = srcui._csystem
270 270
271 271 # internal config: extensions.chgserver
272 272 newui.setconfig('extensions', 'chgserver',
273 273 srcui.config('extensions', 'chgserver'), '--config')
274 274
275 275 # command line args
276 276 args = args[:]
277 277 dispatch._parseconfig(newui, dispatch._earlygetopt(['--config'], args))
278 278
279 279 # stolen from tortoisehg.util.copydynamicconfig()
280 280 for section, name, value in srcui.walkconfig():
281 281 source = srcui.configsource(section, name)
282 282 if ':' in source or source == '--config':
283 283 # path:line or command line
284 284 continue
285 285 if source == 'none':
286 286 # ui.configsource returns 'none' by default
287 287 source = ''
288 288 newui.setconfig(section, name, value, source)
289 289
290 290 # load wd and repo config, copied from dispatch.py
291 291 cwds = dispatch._earlygetopt(['--cwd'], args)
292 292 cwd = cwds and os.path.realpath(cwds[-1]) or None
293 293 rpath = dispatch._earlygetopt(["-R", "--repository", "--repo"], args)
294 294 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
295 295
296 296 return (newui, newlui)
297 297
298 298 class channeledsystem(object):
299 299 """Propagate ui.system() request in the following format:
300 300
301 301 payload length (unsigned int),
302 302 cmd, '\0',
303 303 cwd, '\0',
304 304 envkey, '=', val, '\0',
305 305 ...
306 306 envkey, '=', val
307 307
308 308 and waits:
309 309
310 310 exitcode length (unsigned int),
311 311 exitcode (int)
312 312 """
313 313 def __init__(self, in_, out, channel):
314 314 self.in_ = in_
315 315 self.out = out
316 316 self.channel = channel
317 317
318 318 def __call__(self, cmd, environ, cwd):
319 319 args = [util.quotecommand(cmd), os.path.abspath(cwd or '.')]
320 320 args.extend('%s=%s' % (k, v) for k, v in environ.iteritems())
321 321 data = '\0'.join(args)
322 322 self.out.write(struct.pack('>cI', self.channel, len(data)))
323 323 self.out.write(data)
324 324 self.out.flush()
325 325
326 326 length = self.in_.read(4)
327 327 length, = struct.unpack('>I', length)
328 328 if length != 4:
329 329 raise error.Abort(_('invalid response'))
330 330 rc, = struct.unpack('>i', self.in_.read(4))
331 331 return rc
332 332
333 333 _iochannels = [
334 334 # server.ch, ui.fp, mode
335 335 ('cin', 'fin', 'rb'),
336 336 ('cout', 'fout', 'wb'),
337 337 ('cerr', 'ferr', 'wb'),
338 338 ]
339 339
340 340 class chgcmdserver(commandserver.server):
341 341 def __init__(self, ui, repo, fin, fout, sock, hashstate, baseaddress):
342 342 super(chgcmdserver, self).__init__(
343 343 _newchgui(ui, channeledsystem(fin, fout, 'S')), repo, fin, fout)
344 344 self.clientsock = sock
345 345 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
346 346 self.hashstate = hashstate
347 347 self.baseaddress = baseaddress
348 348 if hashstate is not None:
349 349 self.capabilities = self.capabilities.copy()
350 350 self.capabilities['validate'] = chgcmdserver.validate
351 351
352 352 def cleanup(self):
353 353 super(chgcmdserver, self).cleanup()
354 354 # dispatch._runcatch() does not flush outputs if exception is not
355 355 # handled by dispatch._dispatch()
356 356 self.ui.flush()
357 357 self._restoreio()
358 358
359 359 def attachio(self):
360 360 """Attach to client's stdio passed via unix domain socket; all
361 361 channels except cresult will no longer be used
362 362 """
363 363 # tell client to sendmsg() with 1-byte payload, which makes it
364 364 # distinctive from "attachio\n" command consumed by client.read()
365 365 self.clientsock.sendall(struct.pack('>cI', 'I', 1))
366 366 clientfds = osutil.recvfds(self.clientsock.fileno())
367 367 _log('received fds: %r\n' % clientfds)
368 368
369 369 ui = self.ui
370 370 ui.flush()
371 371 first = self._saveio()
372 372 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
373 373 assert fd > 0
374 374 fp = getattr(ui, fn)
375 375 os.dup2(fd, fp.fileno())
376 376 os.close(fd)
377 377 if not first:
378 378 continue
379 379 # reset buffering mode when client is first attached. as we want
380 380 # to see output immediately on pager, the mode stays unchanged
381 381 # when client re-attached. ferr is unchanged because it should
382 382 # be unbuffered no matter if it is a tty or not.
383 383 if fn == 'ferr':
384 384 newfp = fp
385 385 else:
386 386 # make it line buffered explicitly because the default is
387 387 # decided on first write(), where fout could be a pager.
388 388 if fp.isatty():
389 389 bufsize = 1 # line buffered
390 390 else:
391 391 bufsize = -1 # system default
392 392 newfp = os.fdopen(fp.fileno(), mode, bufsize)
393 393 setattr(ui, fn, newfp)
394 394 setattr(self, cn, newfp)
395 395
396 396 self.cresult.write(struct.pack('>i', len(clientfds)))
397 397
398 398 def _saveio(self):
399 399 if self._oldios:
400 400 return False
401 401 ui = self.ui
402 402 for cn, fn, _mode in _iochannels:
403 403 ch = getattr(self, cn)
404 404 fp = getattr(ui, fn)
405 405 fd = os.dup(fp.fileno())
406 406 self._oldios.append((ch, fp, fd))
407 407 return True
408 408
409 409 def _restoreio(self):
410 410 ui = self.ui
411 411 for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels):
412 412 newfp = getattr(ui, fn)
413 413 # close newfp while it's associated with client; otherwise it
414 414 # would be closed when newfp is deleted
415 415 if newfp is not fp:
416 416 newfp.close()
417 417 # restore original fd: fp is open again
418 418 os.dup2(fd, fp.fileno())
419 419 os.close(fd)
420 420 setattr(self, cn, ch)
421 421 setattr(ui, fn, fp)
422 422 del self._oldios[:]
423 423
424 424 def validate(self):
425 425 """Reload the config and check if the server is up to date
426 426
427 427 Read a list of '\0' separated arguments.
428 428 Write a non-empty list of '\0' separated instruction strings or '\0'
429 429 if the list is empty.
430 430 An instruction string could be either:
431 431 - "unlink $path", the client should unlink the path to stop the
432 432 outdated server.
433 433 - "redirect $path", the client should attempt to connect to $path
434 434 first. If it does not work, start a new server. It implies
435 435 "reconnect".
436 436 - "exit $n", the client should exit directly with code n.
437 437 This may happen if we cannot parse the config.
438 438 - "reconnect", the client should close the connection and
439 439 reconnect.
440 440 If neither "reconnect" nor "redirect" is included in the instruction
441 441 list, the client can continue with this server after completing all
442 442 the instructions.
443 443 """
444 444 args = self._readlist()
445 445 try:
446 446 self.ui, lui = _loadnewui(self.ui, args)
447 447 except error.ParseError as inst:
448 448 dispatch._formatparse(self.ui.warn, inst)
449 449 self.ui.flush()
450 450 self.cresult.write('exit 255')
451 451 return
452 452 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
453 453 insts = []
454 454 if newhash.mtimehash != self.hashstate.mtimehash:
455 455 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
456 456 insts.append('unlink %s' % addr)
457 457 # mtimehash is empty if one or more extensions fail to load.
458 458 # to be compatible with hg, still serve the client this time.
459 459 if self.hashstate.mtimehash:
460 460 insts.append('reconnect')
461 461 if newhash.confighash != self.hashstate.confighash:
462 462 addr = _hashaddress(self.baseaddress, newhash.confighash)
463 463 insts.append('redirect %s' % addr)
464 464 _log('validate: %s\n' % insts)
465 465 self.cresult.write('\0'.join(insts) or '\0')
466 466
467 467 def chdir(self):
468 468 """Change current directory
469 469
470 470 Note that the behavior of --cwd option is bit different from this.
471 471 It does not affect --config parameter.
472 472 """
473 473 path = self._readstr()
474 474 if not path:
475 475 return
476 476 _log('chdir to %r\n' % path)
477 477 os.chdir(path)
478 478
479 479 def setumask(self):
480 480 """Change umask"""
481 481 mask = struct.unpack('>I', self._read(4))[0]
482 482 _log('setumask %r\n' % mask)
483 483 os.umask(mask)
484 484
485 485 def getpager(self):
486 486 """Read cmdargs and write pager command to r-channel if enabled
487 487
488 488 If pager isn't enabled, this writes '\0' because channeledoutput
489 489 does not allow to write empty data.
490 490 """
491 491 args = self._readlist()
492 492 try:
493 493 cmd, _func, args, options, _cmdoptions = dispatch._parse(self.ui,
494 494 args)
495 495 except (error.Abort, error.AmbiguousCommand, error.CommandError,
496 496 error.UnknownCommand):
497 497 cmd = None
498 498 options = {}
499 499 if not cmd or 'pager' not in options:
500 500 self.cresult.write('\0')
501 501 return
502 502
503 503 pagercmd = _setuppagercmd(self.ui, options, cmd)
504 504 if pagercmd:
505 505 # Python's SIGPIPE is SIG_IGN by default. change to SIG_DFL so
506 506 # we can exit if the pipe to the pager is closed
507 507 if util.safehasattr(signal, 'SIGPIPE') and \
508 508 signal.getsignal(signal.SIGPIPE) == signal.SIG_IGN:
509 509 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
510 510 self.cresult.write(pagercmd)
511 511 else:
512 512 self.cresult.write('\0')
513 513
514 514 def setenv(self):
515 515 """Clear and update os.environ
516 516
517 517 Note that not all variables can make an effect on the running process.
518 518 """
519 519 l = self._readlist()
520 520 try:
521 521 newenv = dict(s.split('=', 1) for s in l)
522 522 except ValueError:
523 523 raise ValueError('unexpected value in setenv request')
524 524 _log('setenv: %r\n' % sorted(newenv.keys()))
525 525 os.environ.clear()
526 526 os.environ.update(newenv)
527 527
528 528 capabilities = commandserver.server.capabilities.copy()
529 529 capabilities.update({'attachio': attachio,
530 530 'chdir': chdir,
531 531 'getpager': getpager,
532 532 'setenv': setenv,
533 533 'setumask': setumask})
534 534
535 535 class _requesthandler(commandserver._requesthandler):
536 536 def _createcmdserver(self):
537 537 ui = self.server.ui
538 538 repo = self.server.repo
539 539 return chgcmdserver(ui, repo, self.rfile, self.wfile, self.connection,
540 540 self.server.hashstate, self.server.baseaddress)
541 541
542 542 def _tempaddress(address):
543 543 return '%s.%d.tmp' % (address, os.getpid())
544 544
545 545 def _hashaddress(address, hashstr):
546 546 return '%s-%s' % (address, hashstr)
547 547
548 548 class AutoExitMixIn: # use old-style to comply with SocketServer design
549 549 lastactive = time.time()
550 550 idletimeout = 3600 # default 1 hour
551 551
552 552 def startautoexitthread(self):
553 553 # note: the auto-exit check here is cheap enough to not use a thread,
554 554 # be done in serve_forever. however SocketServer is hook-unfriendly,
555 555 # you simply cannot hook serve_forever without copying a lot of code.
556 556 # besides, serve_forever's docstring suggests using thread.
557 557 thread = threading.Thread(target=self._autoexitloop)
558 558 thread.daemon = True
559 559 thread.start()
560 560
561 561 def _autoexitloop(self, interval=1):
562 562 while True:
563 563 time.sleep(interval)
564 564 if not self.issocketowner():
565 565 _log('%s is not owned, exiting.\n' % self.server_address)
566 566 break
567 567 if time.time() - self.lastactive > self.idletimeout:
568 568 _log('being idle too long. exiting.\n')
569 569 break
570 570 self.shutdown()
571 571
572 572 def process_request(self, request, address):
573 573 self.lastactive = time.time()
574 574 return socketserver.ForkingMixIn.process_request(
575 575 self, request, address)
576 576
577 577 def server_bind(self):
578 578 # use a unique temp address so we can stat the file and do ownership
579 579 # check later
580 580 tempaddress = _tempaddress(self.server_address)
581 # use relative path instead of full path at bind() if possible, since
582 # AF_UNIX path has very small length limit (107 chars) on common
583 # platforms (see sys/un.h)
584 dirname, basename = os.path.split(tempaddress)
585 bakwdfd = None
586 if dirname:
587 bakwdfd = os.open('.', os.O_DIRECTORY)
588 os.chdir(dirname)
589 self.socket.bind(basename)
590 if bakwdfd:
591 os.fchdir(bakwdfd)
592 os.close(bakwdfd)
581 util.bindunixsocket(self.socket, tempaddress)
593 582 self._socketstat = os.stat(tempaddress)
594 583 # rename will replace the old socket file if exists atomically. the
595 584 # old server will detect ownership change and exit.
596 585 util.rename(tempaddress, self.server_address)
597 586
598 587 def issocketowner(self):
599 588 try:
600 589 stat = os.stat(self.server_address)
601 590 return (stat.st_ino == self._socketstat.st_ino and
602 591 stat.st_mtime == self._socketstat.st_mtime)
603 592 except OSError:
604 593 return False
605 594
606 595 def unlinksocketfile(self):
607 596 if not self.issocketowner():
608 597 return
609 598 # it is possible to have a race condition here that we may
610 599 # remove another server's socket file. but that's okay
611 600 # since that server will detect and exit automatically and
612 601 # the client will start a new server on demand.
613 602 try:
614 603 os.unlink(self.server_address)
615 604 except OSError as exc:
616 605 if exc.errno != errno.ENOENT:
617 606 raise
618 607
619 608 class chgunixservice(commandserver.unixservice):
620 609 def init(self):
621 610 if self.repo:
622 611 # one chgserver can serve multiple repos. drop repo infomation
623 612 self.ui.setconfig('bundle', 'mainreporoot', '', 'repo')
624 613 self.repo = None
625 614 self._inithashstate()
626 615 self._checkextensions()
627 616 class cls(AutoExitMixIn, socketserver.ForkingMixIn,
628 617 socketserver.UnixStreamServer):
629 618 ui = self.ui
630 619 repo = self.repo
631 620 hashstate = self.hashstate
632 621 baseaddress = self.baseaddress
633 622 self.server = cls(self.address, _requesthandler)
634 623 self.server.idletimeout = self.ui.configint(
635 624 'chgserver', 'idletimeout', self.server.idletimeout)
636 625 self.server.startautoexitthread()
637 626 self._createsymlink()
638 627
639 628 def _inithashstate(self):
640 629 self.baseaddress = self.address
641 630 if self.ui.configbool('chgserver', 'skiphash', False):
642 631 self.hashstate = None
643 632 return
644 633 self.hashstate = hashstate.fromui(self.ui)
645 634 self.address = _hashaddress(self.address, self.hashstate.confighash)
646 635
647 636 def _checkextensions(self):
648 637 if not self.hashstate:
649 638 return
650 639 if extensions.notloaded():
651 640 # one or more extensions failed to load. mtimehash becomes
652 641 # meaningless because we do not know the paths of those extensions.
653 642 # set mtimehash to an illegal hash value to invalidate the server.
654 643 self.hashstate.mtimehash = ''
655 644
656 645 def _createsymlink(self):
657 646 if self.baseaddress == self.address:
658 647 return
659 648 tempaddress = _tempaddress(self.baseaddress)
660 649 os.symlink(os.path.basename(self.address), tempaddress)
661 650 util.rename(tempaddress, self.baseaddress)
662 651
663 652 def run(self):
664 653 try:
665 654 self.server.serve_forever()
666 655 finally:
667 656 self.server.unlinksocketfile()
668 657
669 658 def uisetup(ui):
670 659 commandserver._servicemap['chgunix'] = chgunixservice
671 660
672 661 # CHGINTERNALMARK is temporarily set by chg client to detect if chg will
673 662 # start another chg. drop it to avoid possible side effects.
674 663 if 'CHGINTERNALMARK' in os.environ:
675 664 del os.environ['CHGINTERNALMARK']
@@ -1,600 +1,615
1 1 # posix.py - Posix utility function implementations for Mercurial
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import fcntl
12 12 import getpass
13 13 import grp
14 14 import os
15 15 import pwd
16 16 import re
17 17 import select
18 18 import stat
19 19 import sys
20 20 import tempfile
21 21 import unicodedata
22 22
23 23 from .i18n import _
24 24 from . import (
25 25 encoding,
26 26 )
27 27
28 28 posixfile = open
29 29 normpath = os.path.normpath
30 30 samestat = os.path.samestat
31 31 try:
32 32 oslink = os.link
33 33 except AttributeError:
34 34 # Some platforms build Python without os.link on systems that are
35 35 # vaguely unix-like but don't have hardlink support. For those
36 36 # poor souls, just say we tried and that it failed so we fall back
37 37 # to copies.
38 38 def oslink(src, dst):
39 39 raise OSError(errno.EINVAL,
40 40 'hardlinks not supported: %s to %s' % (src, dst))
41 41 unlink = os.unlink
42 42 rename = os.rename
43 43 removedirs = os.removedirs
44 44 expandglobs = False
45 45
46 46 umask = os.umask(0)
47 47 os.umask(umask)
48 48
49 49 def split(p):
50 50 '''Same as posixpath.split, but faster
51 51
52 52 >>> import posixpath
53 53 >>> for f in ['/absolute/path/to/file',
54 54 ... 'relative/path/to/file',
55 55 ... 'file_alone',
56 56 ... 'path/to/directory/',
57 57 ... '/multiple/path//separators',
58 58 ... '/file_at_root',
59 59 ... '///multiple_leading_separators_at_root',
60 60 ... '']:
61 61 ... assert split(f) == posixpath.split(f), f
62 62 '''
63 63 ht = p.rsplit('/', 1)
64 64 if len(ht) == 1:
65 65 return '', p
66 66 nh = ht[0].rstrip('/')
67 67 if nh:
68 68 return nh, ht[1]
69 69 return ht[0] + '/', ht[1]
70 70
71 71 def openhardlinks():
72 72 '''return true if it is safe to hold open file handles to hardlinks'''
73 73 return True
74 74
75 75 def nlinks(name):
76 76 '''return number of hardlinks for the given file'''
77 77 return os.lstat(name).st_nlink
78 78
79 79 def parsepatchoutput(output_line):
80 80 """parses the output produced by patch and returns the filename"""
81 81 pf = output_line[14:]
82 82 if os.sys.platform == 'OpenVMS':
83 83 if pf[0] == '`':
84 84 pf = pf[1:-1] # Remove the quotes
85 85 else:
86 86 if pf.startswith("'") and pf.endswith("'") and " " in pf:
87 87 pf = pf[1:-1] # Remove the quotes
88 88 return pf
89 89
90 90 def sshargs(sshcmd, host, user, port):
91 91 '''Build argument list for ssh'''
92 92 args = user and ("%s@%s" % (user, host)) or host
93 93 return port and ("%s -p %s" % (args, port)) or args
94 94
95 95 def isexec(f):
96 96 """check whether a file is executable"""
97 97 return (os.lstat(f).st_mode & 0o100 != 0)
98 98
99 99 def setflags(f, l, x):
100 100 s = os.lstat(f).st_mode
101 101 if l:
102 102 if not stat.S_ISLNK(s):
103 103 # switch file to link
104 104 fp = open(f)
105 105 data = fp.read()
106 106 fp.close()
107 107 os.unlink(f)
108 108 try:
109 109 os.symlink(data, f)
110 110 except OSError:
111 111 # failed to make a link, rewrite file
112 112 fp = open(f, "w")
113 113 fp.write(data)
114 114 fp.close()
115 115 # no chmod needed at this point
116 116 return
117 117 if stat.S_ISLNK(s):
118 118 # switch link to file
119 119 data = os.readlink(f)
120 120 os.unlink(f)
121 121 fp = open(f, "w")
122 122 fp.write(data)
123 123 fp.close()
124 124 s = 0o666 & ~umask # avoid restatting for chmod
125 125
126 126 sx = s & 0o100
127 127 if x and not sx:
128 128 # Turn on +x for every +r bit when making a file executable
129 129 # and obey umask.
130 130 os.chmod(f, s | (s & 0o444) >> 2 & ~umask)
131 131 elif not x and sx:
132 132 # Turn off all +x bits
133 133 os.chmod(f, s & 0o666)
134 134
135 135 def copymode(src, dst, mode=None):
136 136 '''Copy the file mode from the file at path src to dst.
137 137 If src doesn't exist, we're using mode instead. If mode is None, we're
138 138 using umask.'''
139 139 try:
140 140 st_mode = os.lstat(src).st_mode & 0o777
141 141 except OSError as inst:
142 142 if inst.errno != errno.ENOENT:
143 143 raise
144 144 st_mode = mode
145 145 if st_mode is None:
146 146 st_mode = ~umask
147 147 st_mode &= 0o666
148 148 os.chmod(dst, st_mode)
149 149
150 150 def checkexec(path):
151 151 """
152 152 Check whether the given path is on a filesystem with UNIX-like exec flags
153 153
154 154 Requires a directory (like /foo/.hg)
155 155 """
156 156
157 157 # VFAT on some Linux versions can flip mode but it doesn't persist
158 158 # a FS remount. Frequently we can detect it if files are created
159 159 # with exec bit on.
160 160
161 161 try:
162 162 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
163 163 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
164 164 try:
165 165 os.close(fh)
166 166 m = os.stat(fn).st_mode & 0o777
167 167 new_file_has_exec = m & EXECFLAGS
168 168 os.chmod(fn, m ^ EXECFLAGS)
169 169 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0o777) == m)
170 170 finally:
171 171 os.unlink(fn)
172 172 except (IOError, OSError):
173 173 # we don't care, the user probably won't be able to commit anyway
174 174 return False
175 175 return not (new_file_has_exec or exec_flags_cannot_flip)
176 176
177 177 def checklink(path):
178 178 """check whether the given path is on a symlink-capable filesystem"""
179 179 # mktemp is not racy because symlink creation will fail if the
180 180 # file already exists
181 181 while True:
182 182 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
183 183 try:
184 184 fd = tempfile.NamedTemporaryFile(dir=path, prefix='hg-checklink-')
185 185 try:
186 186 os.symlink(os.path.basename(fd.name), name)
187 187 os.unlink(name)
188 188 return True
189 189 except OSError as inst:
190 190 # link creation might race, try again
191 191 if inst[0] == errno.EEXIST:
192 192 continue
193 193 raise
194 194 finally:
195 195 fd.close()
196 196 except AttributeError:
197 197 return False
198 198 except OSError as inst:
199 199 # sshfs might report failure while successfully creating the link
200 200 if inst[0] == errno.EIO and os.path.exists(name):
201 201 os.unlink(name)
202 202 return False
203 203
204 204 def checkosfilename(path):
205 205 '''Check that the base-relative path is a valid filename on this platform.
206 206 Returns None if the path is ok, or a UI string describing the problem.'''
207 207 pass # on posix platforms, every path is ok
208 208
209 209 def setbinary(fd):
210 210 pass
211 211
212 212 def pconvert(path):
213 213 return path
214 214
215 215 def localpath(path):
216 216 return path
217 217
218 218 def samefile(fpath1, fpath2):
219 219 """Returns whether path1 and path2 refer to the same file. This is only
220 220 guaranteed to work for files, not directories."""
221 221 return os.path.samefile(fpath1, fpath2)
222 222
223 223 def samedevice(fpath1, fpath2):
224 224 """Returns whether fpath1 and fpath2 are on the same device. This is only
225 225 guaranteed to work for files, not directories."""
226 226 st1 = os.lstat(fpath1)
227 227 st2 = os.lstat(fpath2)
228 228 return st1.st_dev == st2.st_dev
229 229
230 230 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
231 231 def normcase(path):
232 232 return path.lower()
233 233
234 234 # what normcase does to ASCII strings
235 235 normcasespec = encoding.normcasespecs.lower
236 236 # fallback normcase function for non-ASCII strings
237 237 normcasefallback = normcase
238 238
239 239 if sys.platform == 'darwin':
240 240
241 241 def normcase(path):
242 242 '''
243 243 Normalize a filename for OS X-compatible comparison:
244 244 - escape-encode invalid characters
245 245 - decompose to NFD
246 246 - lowercase
247 247 - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff]
248 248
249 249 >>> normcase('UPPER')
250 250 'upper'
251 251 >>> normcase('Caf\xc3\xa9')
252 252 'cafe\\xcc\\x81'
253 253 >>> normcase('\xc3\x89')
254 254 'e\\xcc\\x81'
255 255 >>> normcase('\xb8\xca\xc3\xca\xbe\xc8.JPG') # issue3918
256 256 '%b8%ca%c3\\xca\\xbe%c8.jpg'
257 257 '''
258 258
259 259 try:
260 260 return encoding.asciilower(path) # exception for non-ASCII
261 261 except UnicodeDecodeError:
262 262 return normcasefallback(path)
263 263
264 264 normcasespec = encoding.normcasespecs.lower
265 265
266 266 def normcasefallback(path):
267 267 try:
268 268 u = path.decode('utf-8')
269 269 except UnicodeDecodeError:
270 270 # OS X percent-encodes any bytes that aren't valid utf-8
271 271 s = ''
272 272 pos = 0
273 273 l = len(path)
274 274 while pos < l:
275 275 try:
276 276 c = encoding.getutf8char(path, pos)
277 277 pos += len(c)
278 278 except ValueError:
279 279 c = '%%%02X' % ord(path[pos])
280 280 pos += 1
281 281 s += c
282 282
283 283 u = s.decode('utf-8')
284 284
285 285 # Decompose then lowercase (HFS+ technote specifies lower)
286 286 enc = unicodedata.normalize('NFD', u).lower().encode('utf-8')
287 287 # drop HFS+ ignored characters
288 288 return encoding.hfsignoreclean(enc)
289 289
290 290 if sys.platform == 'cygwin':
291 291 # workaround for cygwin, in which mount point part of path is
292 292 # treated as case sensitive, even though underlying NTFS is case
293 293 # insensitive.
294 294
295 295 # default mount points
296 296 cygwinmountpoints = sorted([
297 297 "/usr/bin",
298 298 "/usr/lib",
299 299 "/cygdrive",
300 300 ], reverse=True)
301 301
302 302 # use upper-ing as normcase as same as NTFS workaround
303 303 def normcase(path):
304 304 pathlen = len(path)
305 305 if (pathlen == 0) or (path[0] != os.sep):
306 306 # treat as relative
307 307 return encoding.upper(path)
308 308
309 309 # to preserve case of mountpoint part
310 310 for mp in cygwinmountpoints:
311 311 if not path.startswith(mp):
312 312 continue
313 313
314 314 mplen = len(mp)
315 315 if mplen == pathlen: # mount point itself
316 316 return mp
317 317 if path[mplen] == os.sep:
318 318 return mp + encoding.upper(path[mplen:])
319 319
320 320 return encoding.upper(path)
321 321
322 322 normcasespec = encoding.normcasespecs.other
323 323 normcasefallback = normcase
324 324
325 325 # Cygwin translates native ACLs to POSIX permissions,
326 326 # but these translations are not supported by native
327 327 # tools, so the exec bit tends to be set erroneously.
328 328 # Therefore, disable executable bit access on Cygwin.
329 329 def checkexec(path):
330 330 return False
331 331
332 332 # Similarly, Cygwin's symlink emulation is likely to create
333 333 # problems when Mercurial is used from both Cygwin and native
334 334 # Windows, with other native tools, or on shared volumes
335 335 def checklink(path):
336 336 return False
337 337
338 338 _needsshellquote = None
339 339 def shellquote(s):
340 340 if os.sys.platform == 'OpenVMS':
341 341 return '"%s"' % s
342 342 global _needsshellquote
343 343 if _needsshellquote is None:
344 344 _needsshellquote = re.compile(r'[^a-zA-Z0-9._/+-]').search
345 345 if s and not _needsshellquote(s):
346 346 # "s" shouldn't have to be quoted
347 347 return s
348 348 else:
349 349 return "'%s'" % s.replace("'", "'\\''")
350 350
351 351 def quotecommand(cmd):
352 352 return cmd
353 353
354 354 def popen(command, mode='r'):
355 355 return os.popen(command, mode)
356 356
357 357 def testpid(pid):
358 358 '''return False if pid dead, True if running or not sure'''
359 359 if os.sys.platform == 'OpenVMS':
360 360 return True
361 361 try:
362 362 os.kill(pid, 0)
363 363 return True
364 364 except OSError as inst:
365 365 return inst.errno != errno.ESRCH
366 366
367 367 def explainexit(code):
368 368 """return a 2-tuple (desc, code) describing a subprocess status
369 369 (codes from kill are negative - not os.system/wait encoding)"""
370 370 if code >= 0:
371 371 return _("exited with status %d") % code, code
372 372 return _("killed by signal %d") % -code, -code
373 373
374 374 def isowner(st):
375 375 """Return True if the stat object st is from the current user."""
376 376 return st.st_uid == os.getuid()
377 377
378 378 def findexe(command):
379 379 '''Find executable for command searching like which does.
380 380 If command is a basename then PATH is searched for command.
381 381 PATH isn't searched if command is an absolute or relative path.
382 382 If command isn't found None is returned.'''
383 383 if sys.platform == 'OpenVMS':
384 384 return command
385 385
386 386 def findexisting(executable):
387 387 'Will return executable if existing file'
388 388 if os.path.isfile(executable) and os.access(executable, os.X_OK):
389 389 return executable
390 390 return None
391 391
392 392 if os.sep in command:
393 393 return findexisting(command)
394 394
395 395 if sys.platform == 'plan9':
396 396 return findexisting(os.path.join('/bin', command))
397 397
398 398 for path in os.environ.get('PATH', '').split(os.pathsep):
399 399 executable = findexisting(os.path.join(path, command))
400 400 if executable is not None:
401 401 return executable
402 402 return None
403 403
404 404 def setsignalhandler():
405 405 pass
406 406
407 407 _wantedkinds = set([stat.S_IFREG, stat.S_IFLNK])
408 408
409 409 def statfiles(files):
410 410 '''Stat each file in files. Yield each stat, or None if a file does not
411 411 exist or has a type we don't care about.'''
412 412 lstat = os.lstat
413 413 getkind = stat.S_IFMT
414 414 for nf in files:
415 415 try:
416 416 st = lstat(nf)
417 417 if getkind(st.st_mode) not in _wantedkinds:
418 418 st = None
419 419 except OSError as err:
420 420 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
421 421 raise
422 422 st = None
423 423 yield st
424 424
425 425 def getuser():
426 426 '''return name of current user'''
427 427 return getpass.getuser()
428 428
429 429 def username(uid=None):
430 430 """Return the name of the user with the given uid.
431 431
432 432 If uid is None, return the name of the current user."""
433 433
434 434 if uid is None:
435 435 uid = os.getuid()
436 436 try:
437 437 return pwd.getpwuid(uid)[0]
438 438 except KeyError:
439 439 return str(uid)
440 440
441 441 def groupname(gid=None):
442 442 """Return the name of the group with the given gid.
443 443
444 444 If gid is None, return the name of the current group."""
445 445
446 446 if gid is None:
447 447 gid = os.getgid()
448 448 try:
449 449 return grp.getgrgid(gid)[0]
450 450 except KeyError:
451 451 return str(gid)
452 452
453 453 def groupmembers(name):
454 454 """Return the list of members of the group with the given
455 455 name, KeyError if the group does not exist.
456 456 """
457 457 return list(grp.getgrnam(name).gr_mem)
458 458
459 459 def spawndetached(args):
460 460 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
461 461 args[0], args)
462 462
463 463 def gethgcmd():
464 464 return sys.argv[:1]
465 465
466 466 def termwidth():
467 467 try:
468 468 import array
469 469 import termios
470 470 for dev in (sys.stderr, sys.stdout, sys.stdin):
471 471 try:
472 472 try:
473 473 fd = dev.fileno()
474 474 except AttributeError:
475 475 continue
476 476 if not os.isatty(fd):
477 477 continue
478 478 try:
479 479 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
480 480 width = array.array('h', arri)[1]
481 481 if width > 0:
482 482 return width
483 483 except AttributeError:
484 484 pass
485 485 except ValueError:
486 486 pass
487 487 except IOError as e:
488 488 if e[0] == errno.EINVAL:
489 489 pass
490 490 else:
491 491 raise
492 492 except ImportError:
493 493 pass
494 494 return 80
495 495
496 496 def makedir(path, notindexed):
497 497 os.mkdir(path)
498 498
499 499 def unlinkpath(f, ignoremissing=False):
500 500 """unlink and remove the directory if it is empty"""
501 501 try:
502 502 os.unlink(f)
503 503 except OSError as e:
504 504 if not (ignoremissing and e.errno == errno.ENOENT):
505 505 raise
506 506 # try removing directories that might now be empty
507 507 try:
508 508 os.removedirs(os.path.dirname(f))
509 509 except OSError:
510 510 pass
511 511
512 512 def lookupreg(key, name=None, scope=None):
513 513 return None
514 514
515 515 def hidewindow():
516 516 """Hide current shell window.
517 517
518 518 Used to hide the window opened when starting asynchronous
519 519 child process under Windows, unneeded on other systems.
520 520 """
521 521 pass
522 522
523 523 class cachestat(object):
524 524 def __init__(self, path):
525 525 self.stat = os.stat(path)
526 526
527 527 def cacheable(self):
528 528 return bool(self.stat.st_ino)
529 529
530 530 __hash__ = object.__hash__
531 531
532 532 def __eq__(self, other):
533 533 try:
534 534 # Only dev, ino, size, mtime and atime are likely to change. Out
535 535 # of these, we shouldn't compare atime but should compare the
536 536 # rest. However, one of the other fields changing indicates
537 537 # something fishy going on, so return False if anything but atime
538 538 # changes.
539 539 return (self.stat.st_mode == other.stat.st_mode and
540 540 self.stat.st_ino == other.stat.st_ino and
541 541 self.stat.st_dev == other.stat.st_dev and
542 542 self.stat.st_nlink == other.stat.st_nlink and
543 543 self.stat.st_uid == other.stat.st_uid and
544 544 self.stat.st_gid == other.stat.st_gid and
545 545 self.stat.st_size == other.stat.st_size and
546 546 self.stat.st_mtime == other.stat.st_mtime and
547 547 self.stat.st_ctime == other.stat.st_ctime)
548 548 except AttributeError:
549 549 return False
550 550
551 551 def __ne__(self, other):
552 552 return not self == other
553 553
554 554 def executablepath():
555 555 return None # available on Windows only
556 556
557 557 def statislink(st):
558 558 '''check whether a stat result is a symlink'''
559 559 return st and stat.S_ISLNK(st.st_mode)
560 560
561 561 def statisexec(st):
562 562 '''check whether a stat result is an executable file'''
563 563 return st and (st.st_mode & 0o100 != 0)
564 564
565 565 def poll(fds):
566 566 """block until something happens on any file descriptor
567 567
568 568 This is a generic helper that will check for any activity
569 569 (read, write. exception) and return the list of touched files.
570 570
571 571 In unsupported cases, it will raise a NotImplementedError"""
572 572 try:
573 573 res = select.select(fds, fds, fds)
574 574 except ValueError: # out of range file descriptor
575 575 raise NotImplementedError()
576 576 return sorted(list(set(sum(res, []))))
577 577
578 578 def readpipe(pipe):
579 579 """Read all available data from a pipe."""
580 580 # We can't fstat() a pipe because Linux will always report 0.
581 581 # So, we set the pipe to non-blocking mode and read everything
582 582 # that's available.
583 583 flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
584 584 flags |= os.O_NONBLOCK
585 585 oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags)
586 586
587 587 try:
588 588 chunks = []
589 589 while True:
590 590 try:
591 591 s = pipe.read()
592 592 if not s:
593 593 break
594 594 chunks.append(s)
595 595 except IOError:
596 596 break
597 597
598 598 return ''.join(chunks)
599 599 finally:
600 600 fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
601
602 def bindunixsocket(sock, path):
603 """Bind the UNIX domain socket to the specified path"""
604 # use relative path instead of full path at bind() if possible, since
605 # AF_UNIX path has very small length limit (107 chars) on common
606 # platforms (see sys/un.h)
607 dirname, basename = os.path.split(path)
608 bakwdfd = None
609 if dirname:
610 bakwdfd = os.open('.', os.O_DIRECTORY)
611 os.chdir(dirname)
612 sock.bind(basename)
613 if bakwdfd:
614 os.fchdir(bakwdfd)
615 os.close(bakwdfd)
@@ -1,2857 +1,2858
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import collections
21 21 import datetime
22 22 import errno
23 23 import gc
24 24 import hashlib
25 25 import imp
26 26 import os
27 27 import re as remod
28 28 import shutil
29 29 import signal
30 30 import socket
31 31 import subprocess
32 32 import sys
33 33 import tempfile
34 34 import textwrap
35 35 import time
36 36 import traceback
37 37 import zlib
38 38
39 39 from . import (
40 40 encoding,
41 41 error,
42 42 i18n,
43 43 osutil,
44 44 parsers,
45 45 pycompat,
46 46 )
47 47
48 48 for attr in (
49 49 'empty',
50 50 'httplib',
51 51 'pickle',
52 52 'queue',
53 53 'urlerr',
54 54 'urlparse',
55 55 # we do import urlreq, but we do it outside the loop
56 56 #'urlreq',
57 57 'stringio',
58 58 'socketserver',
59 59 'xmlrpclib',
60 60 ):
61 61 globals()[attr] = getattr(pycompat, attr)
62 62
63 63 # This line is to make pyflakes happy:
64 64 urlreq = pycompat.urlreq
65 65
66 66 if os.name == 'nt':
67 67 from . import windows as platform
68 68 else:
69 69 from . import posix as platform
70 70
71 71 _ = i18n._
72 72
73 bindunixsocket = platform.bindunixsocket
73 74 cachestat = platform.cachestat
74 75 checkexec = platform.checkexec
75 76 checklink = platform.checklink
76 77 copymode = platform.copymode
77 78 executablepath = platform.executablepath
78 79 expandglobs = platform.expandglobs
79 80 explainexit = platform.explainexit
80 81 findexe = platform.findexe
81 82 gethgcmd = platform.gethgcmd
82 83 getuser = platform.getuser
83 84 getpid = os.getpid
84 85 groupmembers = platform.groupmembers
85 86 groupname = platform.groupname
86 87 hidewindow = platform.hidewindow
87 88 isexec = platform.isexec
88 89 isowner = platform.isowner
89 90 localpath = platform.localpath
90 91 lookupreg = platform.lookupreg
91 92 makedir = platform.makedir
92 93 nlinks = platform.nlinks
93 94 normpath = platform.normpath
94 95 normcase = platform.normcase
95 96 normcasespec = platform.normcasespec
96 97 normcasefallback = platform.normcasefallback
97 98 openhardlinks = platform.openhardlinks
98 99 oslink = platform.oslink
99 100 parsepatchoutput = platform.parsepatchoutput
100 101 pconvert = platform.pconvert
101 102 poll = platform.poll
102 103 popen = platform.popen
103 104 posixfile = platform.posixfile
104 105 quotecommand = platform.quotecommand
105 106 readpipe = platform.readpipe
106 107 rename = platform.rename
107 108 removedirs = platform.removedirs
108 109 samedevice = platform.samedevice
109 110 samefile = platform.samefile
110 111 samestat = platform.samestat
111 112 setbinary = platform.setbinary
112 113 setflags = platform.setflags
113 114 setsignalhandler = platform.setsignalhandler
114 115 shellquote = platform.shellquote
115 116 spawndetached = platform.spawndetached
116 117 split = platform.split
117 118 sshargs = platform.sshargs
118 119 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
119 120 statisexec = platform.statisexec
120 121 statislink = platform.statislink
121 122 termwidth = platform.termwidth
122 123 testpid = platform.testpid
123 124 umask = platform.umask
124 125 unlink = platform.unlink
125 126 unlinkpath = platform.unlinkpath
126 127 username = platform.username
127 128
128 129 # Python compatibility
129 130
130 131 _notset = object()
131 132
132 133 # disable Python's problematic floating point timestamps (issue4836)
133 134 # (Python hypocritically says you shouldn't change this behavior in
134 135 # libraries, and sure enough Mercurial is not a library.)
135 136 os.stat_float_times(False)
136 137
137 138 def safehasattr(thing, attr):
138 139 return getattr(thing, attr, _notset) is not _notset
139 140
140 141 DIGESTS = {
141 142 'md5': hashlib.md5,
142 143 'sha1': hashlib.sha1,
143 144 'sha512': hashlib.sha512,
144 145 }
145 146 # List of digest types from strongest to weakest
146 147 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
147 148
148 149 for k in DIGESTS_BY_STRENGTH:
149 150 assert k in DIGESTS
150 151
151 152 class digester(object):
152 153 """helper to compute digests.
153 154
154 155 This helper can be used to compute one or more digests given their name.
155 156
156 157 >>> d = digester(['md5', 'sha1'])
157 158 >>> d.update('foo')
158 159 >>> [k for k in sorted(d)]
159 160 ['md5', 'sha1']
160 161 >>> d['md5']
161 162 'acbd18db4cc2f85cedef654fccc4a4d8'
162 163 >>> d['sha1']
163 164 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
164 165 >>> digester.preferred(['md5', 'sha1'])
165 166 'sha1'
166 167 """
167 168
168 169 def __init__(self, digests, s=''):
169 170 self._hashes = {}
170 171 for k in digests:
171 172 if k not in DIGESTS:
172 173 raise Abort(_('unknown digest type: %s') % k)
173 174 self._hashes[k] = DIGESTS[k]()
174 175 if s:
175 176 self.update(s)
176 177
177 178 def update(self, data):
178 179 for h in self._hashes.values():
179 180 h.update(data)
180 181
181 182 def __getitem__(self, key):
182 183 if key not in DIGESTS:
183 184 raise Abort(_('unknown digest type: %s') % k)
184 185 return self._hashes[key].hexdigest()
185 186
186 187 def __iter__(self):
187 188 return iter(self._hashes)
188 189
189 190 @staticmethod
190 191 def preferred(supported):
191 192 """returns the strongest digest type in both supported and DIGESTS."""
192 193
193 194 for k in DIGESTS_BY_STRENGTH:
194 195 if k in supported:
195 196 return k
196 197 return None
197 198
198 199 class digestchecker(object):
199 200 """file handle wrapper that additionally checks content against a given
200 201 size and digests.
201 202
202 203 d = digestchecker(fh, size, {'md5': '...'})
203 204
204 205 When multiple digests are given, all of them are validated.
205 206 """
206 207
207 208 def __init__(self, fh, size, digests):
208 209 self._fh = fh
209 210 self._size = size
210 211 self._got = 0
211 212 self._digests = dict(digests)
212 213 self._digester = digester(self._digests.keys())
213 214
214 215 def read(self, length=-1):
215 216 content = self._fh.read(length)
216 217 self._digester.update(content)
217 218 self._got += len(content)
218 219 return content
219 220
220 221 def validate(self):
221 222 if self._size != self._got:
222 223 raise Abort(_('size mismatch: expected %d, got %d') %
223 224 (self._size, self._got))
224 225 for k, v in self._digests.items():
225 226 if v != self._digester[k]:
226 227 # i18n: first parameter is a digest name
227 228 raise Abort(_('%s mismatch: expected %s, got %s') %
228 229 (k, v, self._digester[k]))
229 230
230 231 try:
231 232 buffer = buffer
232 233 except NameError:
233 234 if sys.version_info[0] < 3:
234 235 def buffer(sliceable, offset=0):
235 236 return sliceable[offset:]
236 237 else:
237 238 def buffer(sliceable, offset=0):
238 239 return memoryview(sliceable)[offset:]
239 240
240 241 closefds = os.name == 'posix'
241 242
242 243 _chunksize = 4096
243 244
244 245 class bufferedinputpipe(object):
245 246 """a manually buffered input pipe
246 247
247 248 Python will not let us use buffered IO and lazy reading with 'polling' at
248 249 the same time. We cannot probe the buffer state and select will not detect
249 250 that data are ready to read if they are already buffered.
250 251
251 252 This class let us work around that by implementing its own buffering
252 253 (allowing efficient readline) while offering a way to know if the buffer is
253 254 empty from the output (allowing collaboration of the buffer with polling).
254 255
255 256 This class lives in the 'util' module because it makes use of the 'os'
256 257 module from the python stdlib.
257 258 """
258 259
259 260 def __init__(self, input):
260 261 self._input = input
261 262 self._buffer = []
262 263 self._eof = False
263 264 self._lenbuf = 0
264 265
265 266 @property
266 267 def hasbuffer(self):
267 268 """True is any data is currently buffered
268 269
269 270 This will be used externally a pre-step for polling IO. If there is
270 271 already data then no polling should be set in place."""
271 272 return bool(self._buffer)
272 273
273 274 @property
274 275 def closed(self):
275 276 return self._input.closed
276 277
277 278 def fileno(self):
278 279 return self._input.fileno()
279 280
280 281 def close(self):
281 282 return self._input.close()
282 283
283 284 def read(self, size):
284 285 while (not self._eof) and (self._lenbuf < size):
285 286 self._fillbuffer()
286 287 return self._frombuffer(size)
287 288
288 289 def readline(self, *args, **kwargs):
289 290 if 1 < len(self._buffer):
290 291 # this should not happen because both read and readline end with a
291 292 # _frombuffer call that collapse it.
292 293 self._buffer = [''.join(self._buffer)]
293 294 self._lenbuf = len(self._buffer[0])
294 295 lfi = -1
295 296 if self._buffer:
296 297 lfi = self._buffer[-1].find('\n')
297 298 while (not self._eof) and lfi < 0:
298 299 self._fillbuffer()
299 300 if self._buffer:
300 301 lfi = self._buffer[-1].find('\n')
301 302 size = lfi + 1
302 303 if lfi < 0: # end of file
303 304 size = self._lenbuf
304 305 elif 1 < len(self._buffer):
305 306 # we need to take previous chunks into account
306 307 size += self._lenbuf - len(self._buffer[-1])
307 308 return self._frombuffer(size)
308 309
309 310 def _frombuffer(self, size):
310 311 """return at most 'size' data from the buffer
311 312
312 313 The data are removed from the buffer."""
313 314 if size == 0 or not self._buffer:
314 315 return ''
315 316 buf = self._buffer[0]
316 317 if 1 < len(self._buffer):
317 318 buf = ''.join(self._buffer)
318 319
319 320 data = buf[:size]
320 321 buf = buf[len(data):]
321 322 if buf:
322 323 self._buffer = [buf]
323 324 self._lenbuf = len(buf)
324 325 else:
325 326 self._buffer = []
326 327 self._lenbuf = 0
327 328 return data
328 329
329 330 def _fillbuffer(self):
330 331 """read data to the buffer"""
331 332 data = os.read(self._input.fileno(), _chunksize)
332 333 if not data:
333 334 self._eof = True
334 335 else:
335 336 self._lenbuf += len(data)
336 337 self._buffer.append(data)
337 338
338 339 def popen2(cmd, env=None, newlines=False):
339 340 # Setting bufsize to -1 lets the system decide the buffer size.
340 341 # The default for bufsize is 0, meaning unbuffered. This leads to
341 342 # poor performance on Mac OS X: http://bugs.python.org/issue4194
342 343 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
343 344 close_fds=closefds,
344 345 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
345 346 universal_newlines=newlines,
346 347 env=env)
347 348 return p.stdin, p.stdout
348 349
349 350 def popen3(cmd, env=None, newlines=False):
350 351 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
351 352 return stdin, stdout, stderr
352 353
353 354 def popen4(cmd, env=None, newlines=False, bufsize=-1):
354 355 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
355 356 close_fds=closefds,
356 357 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
357 358 stderr=subprocess.PIPE,
358 359 universal_newlines=newlines,
359 360 env=env)
360 361 return p.stdin, p.stdout, p.stderr, p
361 362
362 363 def version():
363 364 """Return version information if available."""
364 365 try:
365 366 from . import __version__
366 367 return __version__.version
367 368 except ImportError:
368 369 return 'unknown'
369 370
370 371 def versiontuple(v=None, n=4):
371 372 """Parses a Mercurial version string into an N-tuple.
372 373
373 374 The version string to be parsed is specified with the ``v`` argument.
374 375 If it isn't defined, the current Mercurial version string will be parsed.
375 376
376 377 ``n`` can be 2, 3, or 4. Here is how some version strings map to
377 378 returned values:
378 379
379 380 >>> v = '3.6.1+190-df9b73d2d444'
380 381 >>> versiontuple(v, 2)
381 382 (3, 6)
382 383 >>> versiontuple(v, 3)
383 384 (3, 6, 1)
384 385 >>> versiontuple(v, 4)
385 386 (3, 6, 1, '190-df9b73d2d444')
386 387
387 388 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
388 389 (3, 6, 1, '190-df9b73d2d444+20151118')
389 390
390 391 >>> v = '3.6'
391 392 >>> versiontuple(v, 2)
392 393 (3, 6)
393 394 >>> versiontuple(v, 3)
394 395 (3, 6, None)
395 396 >>> versiontuple(v, 4)
396 397 (3, 6, None, None)
397 398 """
398 399 if not v:
399 400 v = version()
400 401 parts = v.split('+', 1)
401 402 if len(parts) == 1:
402 403 vparts, extra = parts[0], None
403 404 else:
404 405 vparts, extra = parts
405 406
406 407 vints = []
407 408 for i in vparts.split('.'):
408 409 try:
409 410 vints.append(int(i))
410 411 except ValueError:
411 412 break
412 413 # (3, 6) -> (3, 6, None)
413 414 while len(vints) < 3:
414 415 vints.append(None)
415 416
416 417 if n == 2:
417 418 return (vints[0], vints[1])
418 419 if n == 3:
419 420 return (vints[0], vints[1], vints[2])
420 421 if n == 4:
421 422 return (vints[0], vints[1], vints[2], extra)
422 423
423 424 # used by parsedate
424 425 defaultdateformats = (
425 426 '%Y-%m-%d %H:%M:%S',
426 427 '%Y-%m-%d %I:%M:%S%p',
427 428 '%Y-%m-%d %H:%M',
428 429 '%Y-%m-%d %I:%M%p',
429 430 '%Y-%m-%d',
430 431 '%m-%d',
431 432 '%m/%d',
432 433 '%m/%d/%y',
433 434 '%m/%d/%Y',
434 435 '%a %b %d %H:%M:%S %Y',
435 436 '%a %b %d %I:%M:%S%p %Y',
436 437 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
437 438 '%b %d %H:%M:%S %Y',
438 439 '%b %d %I:%M:%S%p %Y',
439 440 '%b %d %H:%M:%S',
440 441 '%b %d %I:%M:%S%p',
441 442 '%b %d %H:%M',
442 443 '%b %d %I:%M%p',
443 444 '%b %d %Y',
444 445 '%b %d',
445 446 '%H:%M:%S',
446 447 '%I:%M:%S%p',
447 448 '%H:%M',
448 449 '%I:%M%p',
449 450 )
450 451
451 452 extendeddateformats = defaultdateformats + (
452 453 "%Y",
453 454 "%Y-%m",
454 455 "%b",
455 456 "%b %Y",
456 457 )
457 458
458 459 def cachefunc(func):
459 460 '''cache the result of function calls'''
460 461 # XXX doesn't handle keywords args
461 462 if func.__code__.co_argcount == 0:
462 463 cache = []
463 464 def f():
464 465 if len(cache) == 0:
465 466 cache.append(func())
466 467 return cache[0]
467 468 return f
468 469 cache = {}
469 470 if func.__code__.co_argcount == 1:
470 471 # we gain a small amount of time because
471 472 # we don't need to pack/unpack the list
472 473 def f(arg):
473 474 if arg not in cache:
474 475 cache[arg] = func(arg)
475 476 return cache[arg]
476 477 else:
477 478 def f(*args):
478 479 if args not in cache:
479 480 cache[args] = func(*args)
480 481 return cache[args]
481 482
482 483 return f
483 484
484 485 class sortdict(dict):
485 486 '''a simple sorted dictionary'''
486 487 def __init__(self, data=None):
487 488 self._list = []
488 489 if data:
489 490 self.update(data)
490 491 def copy(self):
491 492 return sortdict(self)
492 493 def __setitem__(self, key, val):
493 494 if key in self:
494 495 self._list.remove(key)
495 496 self._list.append(key)
496 497 dict.__setitem__(self, key, val)
497 498 def __iter__(self):
498 499 return self._list.__iter__()
499 500 def update(self, src):
500 501 if isinstance(src, dict):
501 502 src = src.iteritems()
502 503 for k, v in src:
503 504 self[k] = v
504 505 def clear(self):
505 506 dict.clear(self)
506 507 self._list = []
507 508 def items(self):
508 509 return [(k, self[k]) for k in self._list]
509 510 def __delitem__(self, key):
510 511 dict.__delitem__(self, key)
511 512 self._list.remove(key)
512 513 def pop(self, key, *args, **kwargs):
513 514 dict.pop(self, key, *args, **kwargs)
514 515 try:
515 516 self._list.remove(key)
516 517 except ValueError:
517 518 pass
518 519 def keys(self):
519 520 return self._list
520 521 def iterkeys(self):
521 522 return self._list.__iter__()
522 523 def iteritems(self):
523 524 for k in self._list:
524 525 yield k, self[k]
525 526 def insert(self, index, key, val):
526 527 self._list.insert(index, key)
527 528 dict.__setitem__(self, key, val)
528 529
529 530 class _lrucachenode(object):
530 531 """A node in a doubly linked list.
531 532
532 533 Holds a reference to nodes on either side as well as a key-value
533 534 pair for the dictionary entry.
534 535 """
535 536 __slots__ = ('next', 'prev', 'key', 'value')
536 537
537 538 def __init__(self):
538 539 self.next = None
539 540 self.prev = None
540 541
541 542 self.key = _notset
542 543 self.value = None
543 544
544 545 def markempty(self):
545 546 """Mark the node as emptied."""
546 547 self.key = _notset
547 548
548 549 class lrucachedict(object):
549 550 """Dict that caches most recent accesses and sets.
550 551
551 552 The dict consists of an actual backing dict - indexed by original
552 553 key - and a doubly linked circular list defining the order of entries in
553 554 the cache.
554 555
555 556 The head node is the newest entry in the cache. If the cache is full,
556 557 we recycle head.prev and make it the new head. Cache accesses result in
557 558 the node being moved to before the existing head and being marked as the
558 559 new head node.
559 560 """
560 561 def __init__(self, max):
561 562 self._cache = {}
562 563
563 564 self._head = head = _lrucachenode()
564 565 head.prev = head
565 566 head.next = head
566 567 self._size = 1
567 568 self._capacity = max
568 569
569 570 def __len__(self):
570 571 return len(self._cache)
571 572
572 573 def __contains__(self, k):
573 574 return k in self._cache
574 575
575 576 def __iter__(self):
576 577 # We don't have to iterate in cache order, but why not.
577 578 n = self._head
578 579 for i in range(len(self._cache)):
579 580 yield n.key
580 581 n = n.next
581 582
582 583 def __getitem__(self, k):
583 584 node = self._cache[k]
584 585 self._movetohead(node)
585 586 return node.value
586 587
587 588 def __setitem__(self, k, v):
588 589 node = self._cache.get(k)
589 590 # Replace existing value and mark as newest.
590 591 if node is not None:
591 592 node.value = v
592 593 self._movetohead(node)
593 594 return
594 595
595 596 if self._size < self._capacity:
596 597 node = self._addcapacity()
597 598 else:
598 599 # Grab the last/oldest item.
599 600 node = self._head.prev
600 601
601 602 # At capacity. Kill the old entry.
602 603 if node.key is not _notset:
603 604 del self._cache[node.key]
604 605
605 606 node.key = k
606 607 node.value = v
607 608 self._cache[k] = node
608 609 # And mark it as newest entry. No need to adjust order since it
609 610 # is already self._head.prev.
610 611 self._head = node
611 612
612 613 def __delitem__(self, k):
613 614 node = self._cache.pop(k)
614 615 node.markempty()
615 616
616 617 # Temporarily mark as newest item before re-adjusting head to make
617 618 # this node the oldest item.
618 619 self._movetohead(node)
619 620 self._head = node.next
620 621
621 622 # Additional dict methods.
622 623
623 624 def get(self, k, default=None):
624 625 try:
625 626 return self._cache[k]
626 627 except KeyError:
627 628 return default
628 629
629 630 def clear(self):
630 631 n = self._head
631 632 while n.key is not _notset:
632 633 n.markempty()
633 634 n = n.next
634 635
635 636 self._cache.clear()
636 637
637 638 def copy(self):
638 639 result = lrucachedict(self._capacity)
639 640 n = self._head.prev
640 641 # Iterate in oldest-to-newest order, so the copy has the right ordering
641 642 for i in range(len(self._cache)):
642 643 result[n.key] = n.value
643 644 n = n.prev
644 645 return result
645 646
646 647 def _movetohead(self, node):
647 648 """Mark a node as the newest, making it the new head.
648 649
649 650 When a node is accessed, it becomes the freshest entry in the LRU
650 651 list, which is denoted by self._head.
651 652
652 653 Visually, let's make ``N`` the new head node (* denotes head):
653 654
654 655 previous/oldest <-> head <-> next/next newest
655 656
656 657 ----<->--- A* ---<->-----
657 658 | |
658 659 E <-> D <-> N <-> C <-> B
659 660
660 661 To:
661 662
662 663 ----<->--- N* ---<->-----
663 664 | |
664 665 E <-> D <-> C <-> B <-> A
665 666
666 667 This requires the following moves:
667 668
668 669 C.next = D (node.prev.next = node.next)
669 670 D.prev = C (node.next.prev = node.prev)
670 671 E.next = N (head.prev.next = node)
671 672 N.prev = E (node.prev = head.prev)
672 673 N.next = A (node.next = head)
673 674 A.prev = N (head.prev = node)
674 675 """
675 676 head = self._head
676 677 # C.next = D
677 678 node.prev.next = node.next
678 679 # D.prev = C
679 680 node.next.prev = node.prev
680 681 # N.prev = E
681 682 node.prev = head.prev
682 683 # N.next = A
683 684 # It is tempting to do just "head" here, however if node is
684 685 # adjacent to head, this will do bad things.
685 686 node.next = head.prev.next
686 687 # E.next = N
687 688 node.next.prev = node
688 689 # A.prev = N
689 690 node.prev.next = node
690 691
691 692 self._head = node
692 693
693 694 def _addcapacity(self):
694 695 """Add a node to the circular linked list.
695 696
696 697 The new node is inserted before the head node.
697 698 """
698 699 head = self._head
699 700 node = _lrucachenode()
700 701 head.prev.next = node
701 702 node.prev = head.prev
702 703 node.next = head
703 704 head.prev = node
704 705 self._size += 1
705 706 return node
706 707
707 708 def lrucachefunc(func):
708 709 '''cache most recent results of function calls'''
709 710 cache = {}
710 711 order = collections.deque()
711 712 if func.__code__.co_argcount == 1:
712 713 def f(arg):
713 714 if arg not in cache:
714 715 if len(cache) > 20:
715 716 del cache[order.popleft()]
716 717 cache[arg] = func(arg)
717 718 else:
718 719 order.remove(arg)
719 720 order.append(arg)
720 721 return cache[arg]
721 722 else:
722 723 def f(*args):
723 724 if args not in cache:
724 725 if len(cache) > 20:
725 726 del cache[order.popleft()]
726 727 cache[args] = func(*args)
727 728 else:
728 729 order.remove(args)
729 730 order.append(args)
730 731 return cache[args]
731 732
732 733 return f
733 734
734 735 class propertycache(object):
735 736 def __init__(self, func):
736 737 self.func = func
737 738 self.name = func.__name__
738 739 def __get__(self, obj, type=None):
739 740 result = self.func(obj)
740 741 self.cachevalue(obj, result)
741 742 return result
742 743
743 744 def cachevalue(self, obj, value):
744 745 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
745 746 obj.__dict__[self.name] = value
746 747
747 748 def pipefilter(s, cmd):
748 749 '''filter string S through command CMD, returning its output'''
749 750 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
750 751 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
751 752 pout, perr = p.communicate(s)
752 753 return pout
753 754
754 755 def tempfilter(s, cmd):
755 756 '''filter string S through a pair of temporary files with CMD.
756 757 CMD is used as a template to create the real command to be run,
757 758 with the strings INFILE and OUTFILE replaced by the real names of
758 759 the temporary files generated.'''
759 760 inname, outname = None, None
760 761 try:
761 762 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
762 763 fp = os.fdopen(infd, 'wb')
763 764 fp.write(s)
764 765 fp.close()
765 766 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
766 767 os.close(outfd)
767 768 cmd = cmd.replace('INFILE', inname)
768 769 cmd = cmd.replace('OUTFILE', outname)
769 770 code = os.system(cmd)
770 771 if sys.platform == 'OpenVMS' and code & 1:
771 772 code = 0
772 773 if code:
773 774 raise Abort(_("command '%s' failed: %s") %
774 775 (cmd, explainexit(code)))
775 776 return readfile(outname)
776 777 finally:
777 778 try:
778 779 if inname:
779 780 os.unlink(inname)
780 781 except OSError:
781 782 pass
782 783 try:
783 784 if outname:
784 785 os.unlink(outname)
785 786 except OSError:
786 787 pass
787 788
788 789 filtertable = {
789 790 'tempfile:': tempfilter,
790 791 'pipe:': pipefilter,
791 792 }
792 793
793 794 def filter(s, cmd):
794 795 "filter a string through a command that transforms its input to its output"
795 796 for name, fn in filtertable.iteritems():
796 797 if cmd.startswith(name):
797 798 return fn(s, cmd[len(name):].lstrip())
798 799 return pipefilter(s, cmd)
799 800
800 801 def binary(s):
801 802 """return true if a string is binary data"""
802 803 return bool(s and '\0' in s)
803 804
804 805 def increasingchunks(source, min=1024, max=65536):
805 806 '''return no less than min bytes per chunk while data remains,
806 807 doubling min after each chunk until it reaches max'''
807 808 def log2(x):
808 809 if not x:
809 810 return 0
810 811 i = 0
811 812 while x:
812 813 x >>= 1
813 814 i += 1
814 815 return i - 1
815 816
816 817 buf = []
817 818 blen = 0
818 819 for chunk in source:
819 820 buf.append(chunk)
820 821 blen += len(chunk)
821 822 if blen >= min:
822 823 if min < max:
823 824 min = min << 1
824 825 nmin = 1 << log2(blen)
825 826 if nmin > min:
826 827 min = nmin
827 828 if min > max:
828 829 min = max
829 830 yield ''.join(buf)
830 831 blen = 0
831 832 buf = []
832 833 if buf:
833 834 yield ''.join(buf)
834 835
835 836 Abort = error.Abort
836 837
837 838 def always(fn):
838 839 return True
839 840
840 841 def never(fn):
841 842 return False
842 843
843 844 def nogc(func):
844 845 """disable garbage collector
845 846
846 847 Python's garbage collector triggers a GC each time a certain number of
847 848 container objects (the number being defined by gc.get_threshold()) are
848 849 allocated even when marked not to be tracked by the collector. Tracking has
849 850 no effect on when GCs are triggered, only on what objects the GC looks
850 851 into. As a workaround, disable GC while building complex (huge)
851 852 containers.
852 853
853 854 This garbage collector issue have been fixed in 2.7.
854 855 """
855 856 def wrapper(*args, **kwargs):
856 857 gcenabled = gc.isenabled()
857 858 gc.disable()
858 859 try:
859 860 return func(*args, **kwargs)
860 861 finally:
861 862 if gcenabled:
862 863 gc.enable()
863 864 return wrapper
864 865
865 866 def pathto(root, n1, n2):
866 867 '''return the relative path from one place to another.
867 868 root should use os.sep to separate directories
868 869 n1 should use os.sep to separate directories
869 870 n2 should use "/" to separate directories
870 871 returns an os.sep-separated path.
871 872
872 873 If n1 is a relative path, it's assumed it's
873 874 relative to root.
874 875 n2 should always be relative to root.
875 876 '''
876 877 if not n1:
877 878 return localpath(n2)
878 879 if os.path.isabs(n1):
879 880 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
880 881 return os.path.join(root, localpath(n2))
881 882 n2 = '/'.join((pconvert(root), n2))
882 883 a, b = splitpath(n1), n2.split('/')
883 884 a.reverse()
884 885 b.reverse()
885 886 while a and b and a[-1] == b[-1]:
886 887 a.pop()
887 888 b.pop()
888 889 b.reverse()
889 890 return os.sep.join((['..'] * len(a)) + b) or '.'
890 891
891 892 def mainfrozen():
892 893 """return True if we are a frozen executable.
893 894
894 895 The code supports py2exe (most common, Windows only) and tools/freeze
895 896 (portable, not much used).
896 897 """
897 898 return (safehasattr(sys, "frozen") or # new py2exe
898 899 safehasattr(sys, "importers") or # old py2exe
899 900 imp.is_frozen("__main__")) # tools/freeze
900 901
901 902 # the location of data files matching the source code
902 903 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
903 904 # executable version (py2exe) doesn't support __file__
904 905 datapath = os.path.dirname(sys.executable)
905 906 else:
906 907 datapath = os.path.dirname(__file__)
907 908
908 909 i18n.setdatapath(datapath)
909 910
910 911 _hgexecutable = None
911 912
912 913 def hgexecutable():
913 914 """return location of the 'hg' executable.
914 915
915 916 Defaults to $HG or 'hg' in the search path.
916 917 """
917 918 if _hgexecutable is None:
918 919 hg = os.environ.get('HG')
919 920 mainmod = sys.modules['__main__']
920 921 if hg:
921 922 _sethgexecutable(hg)
922 923 elif mainfrozen():
923 924 if getattr(sys, 'frozen', None) == 'macosx_app':
924 925 # Env variable set by py2app
925 926 _sethgexecutable(os.environ['EXECUTABLEPATH'])
926 927 else:
927 928 _sethgexecutable(sys.executable)
928 929 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
929 930 _sethgexecutable(mainmod.__file__)
930 931 else:
931 932 exe = findexe('hg') or os.path.basename(sys.argv[0])
932 933 _sethgexecutable(exe)
933 934 return _hgexecutable
934 935
935 936 def _sethgexecutable(path):
936 937 """set location of the 'hg' executable"""
937 938 global _hgexecutable
938 939 _hgexecutable = path
939 940
940 941 def _isstdout(f):
941 942 fileno = getattr(f, 'fileno', None)
942 943 return fileno and fileno() == sys.__stdout__.fileno()
943 944
944 945 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
945 946 '''enhanced shell command execution.
946 947 run with environment maybe modified, maybe in different dir.
947 948
948 949 if command fails and onerr is None, return status, else raise onerr
949 950 object as exception.
950 951
951 952 if out is specified, it is assumed to be a file-like object that has a
952 953 write() method. stdout and stderr will be redirected to out.'''
953 954 if environ is None:
954 955 environ = {}
955 956 try:
956 957 sys.stdout.flush()
957 958 except Exception:
958 959 pass
959 960 def py2shell(val):
960 961 'convert python object into string that is useful to shell'
961 962 if val is None or val is False:
962 963 return '0'
963 964 if val is True:
964 965 return '1'
965 966 return str(val)
966 967 origcmd = cmd
967 968 cmd = quotecommand(cmd)
968 969 if sys.platform == 'plan9' and (sys.version_info[0] == 2
969 970 and sys.version_info[1] < 7):
970 971 # subprocess kludge to work around issues in half-baked Python
971 972 # ports, notably bichued/python:
972 973 if not cwd is None:
973 974 os.chdir(cwd)
974 975 rc = os.system(cmd)
975 976 else:
976 977 env = dict(os.environ)
977 978 env.update((k, py2shell(v)) for k, v in environ.iteritems())
978 979 env['HG'] = hgexecutable()
979 980 if out is None or _isstdout(out):
980 981 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
981 982 env=env, cwd=cwd)
982 983 else:
983 984 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
984 985 env=env, cwd=cwd, stdout=subprocess.PIPE,
985 986 stderr=subprocess.STDOUT)
986 987 while True:
987 988 line = proc.stdout.readline()
988 989 if not line:
989 990 break
990 991 out.write(line)
991 992 proc.wait()
992 993 rc = proc.returncode
993 994 if sys.platform == 'OpenVMS' and rc & 1:
994 995 rc = 0
995 996 if rc and onerr:
996 997 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
997 998 explainexit(rc)[0])
998 999 if errprefix:
999 1000 errmsg = '%s: %s' % (errprefix, errmsg)
1000 1001 raise onerr(errmsg)
1001 1002 return rc
1002 1003
1003 1004 def checksignature(func):
1004 1005 '''wrap a function with code to check for calling errors'''
1005 1006 def check(*args, **kwargs):
1006 1007 try:
1007 1008 return func(*args, **kwargs)
1008 1009 except TypeError:
1009 1010 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1010 1011 raise error.SignatureError
1011 1012 raise
1012 1013
1013 1014 return check
1014 1015
1015 1016 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1016 1017 '''copy a file, preserving mode and optionally other stat info like
1017 1018 atime/mtime
1018 1019
1019 1020 checkambig argument is used with filestat, and is useful only if
1020 1021 destination file is guarded by any lock (e.g. repo.lock or
1021 1022 repo.wlock).
1022 1023
1023 1024 copystat and checkambig should be exclusive.
1024 1025 '''
1025 1026 assert not (copystat and checkambig)
1026 1027 oldstat = None
1027 1028 if os.path.lexists(dest):
1028 1029 if checkambig:
1029 1030 oldstat = checkambig and filestat(dest)
1030 1031 unlink(dest)
1031 1032 # hardlinks are problematic on CIFS, quietly ignore this flag
1032 1033 # until we find a way to work around it cleanly (issue4546)
1033 1034 if False and hardlink:
1034 1035 try:
1035 1036 oslink(src, dest)
1036 1037 return
1037 1038 except (IOError, OSError):
1038 1039 pass # fall back to normal copy
1039 1040 if os.path.islink(src):
1040 1041 os.symlink(os.readlink(src), dest)
1041 1042 # copytime is ignored for symlinks, but in general copytime isn't needed
1042 1043 # for them anyway
1043 1044 else:
1044 1045 try:
1045 1046 shutil.copyfile(src, dest)
1046 1047 if copystat:
1047 1048 # copystat also copies mode
1048 1049 shutil.copystat(src, dest)
1049 1050 else:
1050 1051 shutil.copymode(src, dest)
1051 1052 if oldstat and oldstat.stat:
1052 1053 newstat = filestat(dest)
1053 1054 if newstat.isambig(oldstat):
1054 1055 # stat of copied file is ambiguous to original one
1055 1056 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1056 1057 os.utime(dest, (advanced, advanced))
1057 1058 except shutil.Error as inst:
1058 1059 raise Abort(str(inst))
1059 1060
1060 1061 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1061 1062 """Copy a directory tree using hardlinks if possible."""
1062 1063 num = 0
1063 1064
1064 1065 if hardlink is None:
1065 1066 hardlink = (os.stat(src).st_dev ==
1066 1067 os.stat(os.path.dirname(dst)).st_dev)
1067 1068 if hardlink:
1068 1069 topic = _('linking')
1069 1070 else:
1070 1071 topic = _('copying')
1071 1072
1072 1073 if os.path.isdir(src):
1073 1074 os.mkdir(dst)
1074 1075 for name, kind in osutil.listdir(src):
1075 1076 srcname = os.path.join(src, name)
1076 1077 dstname = os.path.join(dst, name)
1077 1078 def nprog(t, pos):
1078 1079 if pos is not None:
1079 1080 return progress(t, pos + num)
1080 1081 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1081 1082 num += n
1082 1083 else:
1083 1084 if hardlink:
1084 1085 try:
1085 1086 oslink(src, dst)
1086 1087 except (IOError, OSError):
1087 1088 hardlink = False
1088 1089 shutil.copy(src, dst)
1089 1090 else:
1090 1091 shutil.copy(src, dst)
1091 1092 num += 1
1092 1093 progress(topic, num)
1093 1094 progress(topic, None)
1094 1095
1095 1096 return hardlink, num
1096 1097
1097 1098 _winreservednames = '''con prn aux nul
1098 1099 com1 com2 com3 com4 com5 com6 com7 com8 com9
1099 1100 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1100 1101 _winreservedchars = ':*?"<>|'
1101 1102 def checkwinfilename(path):
1102 1103 r'''Check that the base-relative path is a valid filename on Windows.
1103 1104 Returns None if the path is ok, or a UI string describing the problem.
1104 1105
1105 1106 >>> checkwinfilename("just/a/normal/path")
1106 1107 >>> checkwinfilename("foo/bar/con.xml")
1107 1108 "filename contains 'con', which is reserved on Windows"
1108 1109 >>> checkwinfilename("foo/con.xml/bar")
1109 1110 "filename contains 'con', which is reserved on Windows"
1110 1111 >>> checkwinfilename("foo/bar/xml.con")
1111 1112 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1112 1113 "filename contains 'AUX', which is reserved on Windows"
1113 1114 >>> checkwinfilename("foo/bar/bla:.txt")
1114 1115 "filename contains ':', which is reserved on Windows"
1115 1116 >>> checkwinfilename("foo/bar/b\07la.txt")
1116 1117 "filename contains '\\x07', which is invalid on Windows"
1117 1118 >>> checkwinfilename("foo/bar/bla ")
1118 1119 "filename ends with ' ', which is not allowed on Windows"
1119 1120 >>> checkwinfilename("../bar")
1120 1121 >>> checkwinfilename("foo\\")
1121 1122 "filename ends with '\\', which is invalid on Windows"
1122 1123 >>> checkwinfilename("foo\\/bar")
1123 1124 "directory name ends with '\\', which is invalid on Windows"
1124 1125 '''
1125 1126 if path.endswith('\\'):
1126 1127 return _("filename ends with '\\', which is invalid on Windows")
1127 1128 if '\\/' in path:
1128 1129 return _("directory name ends with '\\', which is invalid on Windows")
1129 1130 for n in path.replace('\\', '/').split('/'):
1130 1131 if not n:
1131 1132 continue
1132 1133 for c in n:
1133 1134 if c in _winreservedchars:
1134 1135 return _("filename contains '%s', which is reserved "
1135 1136 "on Windows") % c
1136 1137 if ord(c) <= 31:
1137 1138 return _("filename contains %r, which is invalid "
1138 1139 "on Windows") % c
1139 1140 base = n.split('.')[0]
1140 1141 if base and base.lower() in _winreservednames:
1141 1142 return _("filename contains '%s', which is reserved "
1142 1143 "on Windows") % base
1143 1144 t = n[-1]
1144 1145 if t in '. ' and n not in '..':
1145 1146 return _("filename ends with '%s', which is not allowed "
1146 1147 "on Windows") % t
1147 1148
1148 1149 if os.name == 'nt':
1149 1150 checkosfilename = checkwinfilename
1150 1151 else:
1151 1152 checkosfilename = platform.checkosfilename
1152 1153
1153 1154 def makelock(info, pathname):
1154 1155 try:
1155 1156 return os.symlink(info, pathname)
1156 1157 except OSError as why:
1157 1158 if why.errno == errno.EEXIST:
1158 1159 raise
1159 1160 except AttributeError: # no symlink in os
1160 1161 pass
1161 1162
1162 1163 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1163 1164 os.write(ld, info)
1164 1165 os.close(ld)
1165 1166
1166 1167 def readlock(pathname):
1167 1168 try:
1168 1169 return os.readlink(pathname)
1169 1170 except OSError as why:
1170 1171 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1171 1172 raise
1172 1173 except AttributeError: # no symlink in os
1173 1174 pass
1174 1175 fp = posixfile(pathname)
1175 1176 r = fp.read()
1176 1177 fp.close()
1177 1178 return r
1178 1179
1179 1180 def fstat(fp):
1180 1181 '''stat file object that may not have fileno method.'''
1181 1182 try:
1182 1183 return os.fstat(fp.fileno())
1183 1184 except AttributeError:
1184 1185 return os.stat(fp.name)
1185 1186
1186 1187 # File system features
1187 1188
1188 1189 def checkcase(path):
1189 1190 """
1190 1191 Return true if the given path is on a case-sensitive filesystem
1191 1192
1192 1193 Requires a path (like /foo/.hg) ending with a foldable final
1193 1194 directory component.
1194 1195 """
1195 1196 s1 = os.lstat(path)
1196 1197 d, b = os.path.split(path)
1197 1198 b2 = b.upper()
1198 1199 if b == b2:
1199 1200 b2 = b.lower()
1200 1201 if b == b2:
1201 1202 return True # no evidence against case sensitivity
1202 1203 p2 = os.path.join(d, b2)
1203 1204 try:
1204 1205 s2 = os.lstat(p2)
1205 1206 if s2 == s1:
1206 1207 return False
1207 1208 return True
1208 1209 except OSError:
1209 1210 return True
1210 1211
1211 1212 try:
1212 1213 import re2
1213 1214 _re2 = None
1214 1215 except ImportError:
1215 1216 _re2 = False
1216 1217
1217 1218 class _re(object):
1218 1219 def _checkre2(self):
1219 1220 global _re2
1220 1221 try:
1221 1222 # check if match works, see issue3964
1222 1223 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1223 1224 except ImportError:
1224 1225 _re2 = False
1225 1226
1226 1227 def compile(self, pat, flags=0):
1227 1228 '''Compile a regular expression, using re2 if possible
1228 1229
1229 1230 For best performance, use only re2-compatible regexp features. The
1230 1231 only flags from the re module that are re2-compatible are
1231 1232 IGNORECASE and MULTILINE.'''
1232 1233 if _re2 is None:
1233 1234 self._checkre2()
1234 1235 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1235 1236 if flags & remod.IGNORECASE:
1236 1237 pat = '(?i)' + pat
1237 1238 if flags & remod.MULTILINE:
1238 1239 pat = '(?m)' + pat
1239 1240 try:
1240 1241 return re2.compile(pat)
1241 1242 except re2.error:
1242 1243 pass
1243 1244 return remod.compile(pat, flags)
1244 1245
1245 1246 @propertycache
1246 1247 def escape(self):
1247 1248 '''Return the version of escape corresponding to self.compile.
1248 1249
1249 1250 This is imperfect because whether re2 or re is used for a particular
1250 1251 function depends on the flags, etc, but it's the best we can do.
1251 1252 '''
1252 1253 global _re2
1253 1254 if _re2 is None:
1254 1255 self._checkre2()
1255 1256 if _re2:
1256 1257 return re2.escape
1257 1258 else:
1258 1259 return remod.escape
1259 1260
1260 1261 re = _re()
1261 1262
1262 1263 _fspathcache = {}
1263 1264 def fspath(name, root):
1264 1265 '''Get name in the case stored in the filesystem
1265 1266
1266 1267 The name should be relative to root, and be normcase-ed for efficiency.
1267 1268
1268 1269 Note that this function is unnecessary, and should not be
1269 1270 called, for case-sensitive filesystems (simply because it's expensive).
1270 1271
1271 1272 The root should be normcase-ed, too.
1272 1273 '''
1273 1274 def _makefspathcacheentry(dir):
1274 1275 return dict((normcase(n), n) for n in os.listdir(dir))
1275 1276
1276 1277 seps = os.sep
1277 1278 if os.altsep:
1278 1279 seps = seps + os.altsep
1279 1280 # Protect backslashes. This gets silly very quickly.
1280 1281 seps.replace('\\','\\\\')
1281 1282 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1282 1283 dir = os.path.normpath(root)
1283 1284 result = []
1284 1285 for part, sep in pattern.findall(name):
1285 1286 if sep:
1286 1287 result.append(sep)
1287 1288 continue
1288 1289
1289 1290 if dir not in _fspathcache:
1290 1291 _fspathcache[dir] = _makefspathcacheentry(dir)
1291 1292 contents = _fspathcache[dir]
1292 1293
1293 1294 found = contents.get(part)
1294 1295 if not found:
1295 1296 # retry "once per directory" per "dirstate.walk" which
1296 1297 # may take place for each patches of "hg qpush", for example
1297 1298 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1298 1299 found = contents.get(part)
1299 1300
1300 1301 result.append(found or part)
1301 1302 dir = os.path.join(dir, part)
1302 1303
1303 1304 return ''.join(result)
1304 1305
1305 1306 def checknlink(testfile):
1306 1307 '''check whether hardlink count reporting works properly'''
1307 1308
1308 1309 # testfile may be open, so we need a separate file for checking to
1309 1310 # work around issue2543 (or testfile may get lost on Samba shares)
1310 1311 f1 = testfile + ".hgtmp1"
1311 1312 if os.path.lexists(f1):
1312 1313 return False
1313 1314 try:
1314 1315 posixfile(f1, 'w').close()
1315 1316 except IOError:
1316 1317 return False
1317 1318
1318 1319 f2 = testfile + ".hgtmp2"
1319 1320 fd = None
1320 1321 try:
1321 1322 oslink(f1, f2)
1322 1323 # nlinks() may behave differently for files on Windows shares if
1323 1324 # the file is open.
1324 1325 fd = posixfile(f2)
1325 1326 return nlinks(f2) > 1
1326 1327 except OSError:
1327 1328 return False
1328 1329 finally:
1329 1330 if fd is not None:
1330 1331 fd.close()
1331 1332 for f in (f1, f2):
1332 1333 try:
1333 1334 os.unlink(f)
1334 1335 except OSError:
1335 1336 pass
1336 1337
1337 1338 def endswithsep(path):
1338 1339 '''Check path ends with os.sep or os.altsep.'''
1339 1340 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1340 1341
1341 1342 def splitpath(path):
1342 1343 '''Split path by os.sep.
1343 1344 Note that this function does not use os.altsep because this is
1344 1345 an alternative of simple "xxx.split(os.sep)".
1345 1346 It is recommended to use os.path.normpath() before using this
1346 1347 function if need.'''
1347 1348 return path.split(os.sep)
1348 1349
1349 1350 def gui():
1350 1351 '''Are we running in a GUI?'''
1351 1352 if sys.platform == 'darwin':
1352 1353 if 'SSH_CONNECTION' in os.environ:
1353 1354 # handle SSH access to a box where the user is logged in
1354 1355 return False
1355 1356 elif getattr(osutil, 'isgui', None):
1356 1357 # check if a CoreGraphics session is available
1357 1358 return osutil.isgui()
1358 1359 else:
1359 1360 # pure build; use a safe default
1360 1361 return True
1361 1362 else:
1362 1363 return os.name == "nt" or os.environ.get("DISPLAY")
1363 1364
1364 1365 def mktempcopy(name, emptyok=False, createmode=None):
1365 1366 """Create a temporary file with the same contents from name
1366 1367
1367 1368 The permission bits are copied from the original file.
1368 1369
1369 1370 If the temporary file is going to be truncated immediately, you
1370 1371 can use emptyok=True as an optimization.
1371 1372
1372 1373 Returns the name of the temporary file.
1373 1374 """
1374 1375 d, fn = os.path.split(name)
1375 1376 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1376 1377 os.close(fd)
1377 1378 # Temporary files are created with mode 0600, which is usually not
1378 1379 # what we want. If the original file already exists, just copy
1379 1380 # its mode. Otherwise, manually obey umask.
1380 1381 copymode(name, temp, createmode)
1381 1382 if emptyok:
1382 1383 return temp
1383 1384 try:
1384 1385 try:
1385 1386 ifp = posixfile(name, "rb")
1386 1387 except IOError as inst:
1387 1388 if inst.errno == errno.ENOENT:
1388 1389 return temp
1389 1390 if not getattr(inst, 'filename', None):
1390 1391 inst.filename = name
1391 1392 raise
1392 1393 ofp = posixfile(temp, "wb")
1393 1394 for chunk in filechunkiter(ifp):
1394 1395 ofp.write(chunk)
1395 1396 ifp.close()
1396 1397 ofp.close()
1397 1398 except: # re-raises
1398 1399 try: os.unlink(temp)
1399 1400 except OSError: pass
1400 1401 raise
1401 1402 return temp
1402 1403
1403 1404 class filestat(object):
1404 1405 """help to exactly detect change of a file
1405 1406
1406 1407 'stat' attribute is result of 'os.stat()' if specified 'path'
1407 1408 exists. Otherwise, it is None. This can avoid preparative
1408 1409 'exists()' examination on client side of this class.
1409 1410 """
1410 1411 def __init__(self, path):
1411 1412 try:
1412 1413 self.stat = os.stat(path)
1413 1414 except OSError as err:
1414 1415 if err.errno != errno.ENOENT:
1415 1416 raise
1416 1417 self.stat = None
1417 1418
1418 1419 __hash__ = object.__hash__
1419 1420
1420 1421 def __eq__(self, old):
1421 1422 try:
1422 1423 # if ambiguity between stat of new and old file is
1423 1424 # avoided, comparision of size, ctime and mtime is enough
1424 1425 # to exactly detect change of a file regardless of platform
1425 1426 return (self.stat.st_size == old.stat.st_size and
1426 1427 self.stat.st_ctime == old.stat.st_ctime and
1427 1428 self.stat.st_mtime == old.stat.st_mtime)
1428 1429 except AttributeError:
1429 1430 return False
1430 1431
1431 1432 def isambig(self, old):
1432 1433 """Examine whether new (= self) stat is ambiguous against old one
1433 1434
1434 1435 "S[N]" below means stat of a file at N-th change:
1435 1436
1436 1437 - S[n-1].ctime < S[n].ctime: can detect change of a file
1437 1438 - S[n-1].ctime == S[n].ctime
1438 1439 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1439 1440 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1440 1441 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1441 1442 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1442 1443
1443 1444 Case (*2) above means that a file was changed twice or more at
1444 1445 same time in sec (= S[n-1].ctime), and comparison of timestamp
1445 1446 is ambiguous.
1446 1447
1447 1448 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1448 1449 timestamp is ambiguous".
1449 1450
1450 1451 But advancing mtime only in case (*2) doesn't work as
1451 1452 expected, because naturally advanced S[n].mtime in case (*1)
1452 1453 might be equal to manually advanced S[n-1 or earlier].mtime.
1453 1454
1454 1455 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1455 1456 treated as ambiguous regardless of mtime, to avoid overlooking
1456 1457 by confliction between such mtime.
1457 1458
1458 1459 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1459 1460 S[n].mtime", even if size of a file isn't changed.
1460 1461 """
1461 1462 try:
1462 1463 return (self.stat.st_ctime == old.stat.st_ctime)
1463 1464 except AttributeError:
1464 1465 return False
1465 1466
1466 1467 def __ne__(self, other):
1467 1468 return not self == other
1468 1469
1469 1470 class atomictempfile(object):
1470 1471 '''writable file object that atomically updates a file
1471 1472
1472 1473 All writes will go to a temporary copy of the original file. Call
1473 1474 close() when you are done writing, and atomictempfile will rename
1474 1475 the temporary copy to the original name, making the changes
1475 1476 visible. If the object is destroyed without being closed, all your
1476 1477 writes are discarded.
1477 1478
1478 1479 checkambig argument of constructor is used with filestat, and is
1479 1480 useful only if target file is guarded by any lock (e.g. repo.lock
1480 1481 or repo.wlock).
1481 1482 '''
1482 1483 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1483 1484 self.__name = name # permanent name
1484 1485 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1485 1486 createmode=createmode)
1486 1487 self._fp = posixfile(self._tempname, mode)
1487 1488 self._checkambig = checkambig
1488 1489
1489 1490 # delegated methods
1490 1491 self.read = self._fp.read
1491 1492 self.write = self._fp.write
1492 1493 self.seek = self._fp.seek
1493 1494 self.tell = self._fp.tell
1494 1495 self.fileno = self._fp.fileno
1495 1496
1496 1497 def close(self):
1497 1498 if not self._fp.closed:
1498 1499 self._fp.close()
1499 1500 filename = localpath(self.__name)
1500 1501 oldstat = self._checkambig and filestat(filename)
1501 1502 if oldstat and oldstat.stat:
1502 1503 rename(self._tempname, filename)
1503 1504 newstat = filestat(filename)
1504 1505 if newstat.isambig(oldstat):
1505 1506 # stat of changed file is ambiguous to original one
1506 1507 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1507 1508 os.utime(filename, (advanced, advanced))
1508 1509 else:
1509 1510 rename(self._tempname, filename)
1510 1511
1511 1512 def discard(self):
1512 1513 if not self._fp.closed:
1513 1514 try:
1514 1515 os.unlink(self._tempname)
1515 1516 except OSError:
1516 1517 pass
1517 1518 self._fp.close()
1518 1519
1519 1520 def __del__(self):
1520 1521 if safehasattr(self, '_fp'): # constructor actually did something
1521 1522 self.discard()
1522 1523
1523 1524 def __enter__(self):
1524 1525 return self
1525 1526
1526 1527 def __exit__(self, exctype, excvalue, traceback):
1527 1528 if exctype is not None:
1528 1529 self.discard()
1529 1530 else:
1530 1531 self.close()
1531 1532
1532 1533 def makedirs(name, mode=None, notindexed=False):
1533 1534 """recursive directory creation with parent mode inheritance
1534 1535
1535 1536 Newly created directories are marked as "not to be indexed by
1536 1537 the content indexing service", if ``notindexed`` is specified
1537 1538 for "write" mode access.
1538 1539 """
1539 1540 try:
1540 1541 makedir(name, notindexed)
1541 1542 except OSError as err:
1542 1543 if err.errno == errno.EEXIST:
1543 1544 return
1544 1545 if err.errno != errno.ENOENT or not name:
1545 1546 raise
1546 1547 parent = os.path.dirname(os.path.abspath(name))
1547 1548 if parent == name:
1548 1549 raise
1549 1550 makedirs(parent, mode, notindexed)
1550 1551 try:
1551 1552 makedir(name, notindexed)
1552 1553 except OSError as err:
1553 1554 # Catch EEXIST to handle races
1554 1555 if err.errno == errno.EEXIST:
1555 1556 return
1556 1557 raise
1557 1558 if mode is not None:
1558 1559 os.chmod(name, mode)
1559 1560
1560 1561 def readfile(path):
1561 1562 with open(path, 'rb') as fp:
1562 1563 return fp.read()
1563 1564
1564 1565 def writefile(path, text):
1565 1566 with open(path, 'wb') as fp:
1566 1567 fp.write(text)
1567 1568
1568 1569 def appendfile(path, text):
1569 1570 with open(path, 'ab') as fp:
1570 1571 fp.write(text)
1571 1572
1572 1573 class chunkbuffer(object):
1573 1574 """Allow arbitrary sized chunks of data to be efficiently read from an
1574 1575 iterator over chunks of arbitrary size."""
1575 1576
1576 1577 def __init__(self, in_iter):
1577 1578 """in_iter is the iterator that's iterating over the input chunks.
1578 1579 targetsize is how big a buffer to try to maintain."""
1579 1580 def splitbig(chunks):
1580 1581 for chunk in chunks:
1581 1582 if len(chunk) > 2**20:
1582 1583 pos = 0
1583 1584 while pos < len(chunk):
1584 1585 end = pos + 2 ** 18
1585 1586 yield chunk[pos:end]
1586 1587 pos = end
1587 1588 else:
1588 1589 yield chunk
1589 1590 self.iter = splitbig(in_iter)
1590 1591 self._queue = collections.deque()
1591 1592 self._chunkoffset = 0
1592 1593
1593 1594 def read(self, l=None):
1594 1595 """Read L bytes of data from the iterator of chunks of data.
1595 1596 Returns less than L bytes if the iterator runs dry.
1596 1597
1597 1598 If size parameter is omitted, read everything"""
1598 1599 if l is None:
1599 1600 return ''.join(self.iter)
1600 1601
1601 1602 left = l
1602 1603 buf = []
1603 1604 queue = self._queue
1604 1605 while left > 0:
1605 1606 # refill the queue
1606 1607 if not queue:
1607 1608 target = 2**18
1608 1609 for chunk in self.iter:
1609 1610 queue.append(chunk)
1610 1611 target -= len(chunk)
1611 1612 if target <= 0:
1612 1613 break
1613 1614 if not queue:
1614 1615 break
1615 1616
1616 1617 # The easy way to do this would be to queue.popleft(), modify the
1617 1618 # chunk (if necessary), then queue.appendleft(). However, for cases
1618 1619 # where we read partial chunk content, this incurs 2 dequeue
1619 1620 # mutations and creates a new str for the remaining chunk in the
1620 1621 # queue. Our code below avoids this overhead.
1621 1622
1622 1623 chunk = queue[0]
1623 1624 chunkl = len(chunk)
1624 1625 offset = self._chunkoffset
1625 1626
1626 1627 # Use full chunk.
1627 1628 if offset == 0 and left >= chunkl:
1628 1629 left -= chunkl
1629 1630 queue.popleft()
1630 1631 buf.append(chunk)
1631 1632 # self._chunkoffset remains at 0.
1632 1633 continue
1633 1634
1634 1635 chunkremaining = chunkl - offset
1635 1636
1636 1637 # Use all of unconsumed part of chunk.
1637 1638 if left >= chunkremaining:
1638 1639 left -= chunkremaining
1639 1640 queue.popleft()
1640 1641 # offset == 0 is enabled by block above, so this won't merely
1641 1642 # copy via ``chunk[0:]``.
1642 1643 buf.append(chunk[offset:])
1643 1644 self._chunkoffset = 0
1644 1645
1645 1646 # Partial chunk needed.
1646 1647 else:
1647 1648 buf.append(chunk[offset:offset + left])
1648 1649 self._chunkoffset += left
1649 1650 left -= chunkremaining
1650 1651
1651 1652 return ''.join(buf)
1652 1653
1653 1654 def filechunkiter(f, size=65536, limit=None):
1654 1655 """Create a generator that produces the data in the file size
1655 1656 (default 65536) bytes at a time, up to optional limit (default is
1656 1657 to read all data). Chunks may be less than size bytes if the
1657 1658 chunk is the last chunk in the file, or the file is a socket or
1658 1659 some other type of file that sometimes reads less data than is
1659 1660 requested."""
1660 1661 assert size >= 0
1661 1662 assert limit is None or limit >= 0
1662 1663 while True:
1663 1664 if limit is None:
1664 1665 nbytes = size
1665 1666 else:
1666 1667 nbytes = min(limit, size)
1667 1668 s = nbytes and f.read(nbytes)
1668 1669 if not s:
1669 1670 break
1670 1671 if limit:
1671 1672 limit -= len(s)
1672 1673 yield s
1673 1674
1674 1675 def makedate(timestamp=None):
1675 1676 '''Return a unix timestamp (or the current time) as a (unixtime,
1676 1677 offset) tuple based off the local timezone.'''
1677 1678 if timestamp is None:
1678 1679 timestamp = time.time()
1679 1680 if timestamp < 0:
1680 1681 hint = _("check your clock")
1681 1682 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1682 1683 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1683 1684 datetime.datetime.fromtimestamp(timestamp))
1684 1685 tz = delta.days * 86400 + delta.seconds
1685 1686 return timestamp, tz
1686 1687
1687 1688 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1688 1689 """represent a (unixtime, offset) tuple as a localized time.
1689 1690 unixtime is seconds since the epoch, and offset is the time zone's
1690 1691 number of seconds away from UTC.
1691 1692
1692 1693 >>> datestr((0, 0))
1693 1694 'Thu Jan 01 00:00:00 1970 +0000'
1694 1695 >>> datestr((42, 0))
1695 1696 'Thu Jan 01 00:00:42 1970 +0000'
1696 1697 >>> datestr((-42, 0))
1697 1698 'Wed Dec 31 23:59:18 1969 +0000'
1698 1699 >>> datestr((0x7fffffff, 0))
1699 1700 'Tue Jan 19 03:14:07 2038 +0000'
1700 1701 >>> datestr((-0x80000000, 0))
1701 1702 'Fri Dec 13 20:45:52 1901 +0000'
1702 1703 """
1703 1704 t, tz = date or makedate()
1704 1705 if "%1" in format or "%2" in format or "%z" in format:
1705 1706 sign = (tz > 0) and "-" or "+"
1706 1707 minutes = abs(tz) // 60
1707 1708 q, r = divmod(minutes, 60)
1708 1709 format = format.replace("%z", "%1%2")
1709 1710 format = format.replace("%1", "%c%02d" % (sign, q))
1710 1711 format = format.replace("%2", "%02d" % r)
1711 1712 d = t - tz
1712 1713 if d > 0x7fffffff:
1713 1714 d = 0x7fffffff
1714 1715 elif d < -0x80000000:
1715 1716 d = -0x80000000
1716 1717 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1717 1718 # because they use the gmtime() system call which is buggy on Windows
1718 1719 # for negative values.
1719 1720 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1720 1721 s = t.strftime(format)
1721 1722 return s
1722 1723
1723 1724 def shortdate(date=None):
1724 1725 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1725 1726 return datestr(date, format='%Y-%m-%d')
1726 1727
1727 1728 def parsetimezone(tz):
1728 1729 """parse a timezone string and return an offset integer"""
1729 1730 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1730 1731 sign = (tz[0] == "+") and 1 or -1
1731 1732 hours = int(tz[1:3])
1732 1733 minutes = int(tz[3:5])
1733 1734 return -sign * (hours * 60 + minutes) * 60
1734 1735 if tz == "GMT" or tz == "UTC":
1735 1736 return 0
1736 1737 return None
1737 1738
1738 1739 def strdate(string, format, defaults=[]):
1739 1740 """parse a localized time string and return a (unixtime, offset) tuple.
1740 1741 if the string cannot be parsed, ValueError is raised."""
1741 1742 # NOTE: unixtime = localunixtime + offset
1742 1743 offset, date = parsetimezone(string.split()[-1]), string
1743 1744 if offset is not None:
1744 1745 date = " ".join(string.split()[:-1])
1745 1746
1746 1747 # add missing elements from defaults
1747 1748 usenow = False # default to using biased defaults
1748 1749 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1749 1750 found = [True for p in part if ("%"+p) in format]
1750 1751 if not found:
1751 1752 date += "@" + defaults[part][usenow]
1752 1753 format += "@%" + part[0]
1753 1754 else:
1754 1755 # We've found a specific time element, less specific time
1755 1756 # elements are relative to today
1756 1757 usenow = True
1757 1758
1758 1759 timetuple = time.strptime(date, format)
1759 1760 localunixtime = int(calendar.timegm(timetuple))
1760 1761 if offset is None:
1761 1762 # local timezone
1762 1763 unixtime = int(time.mktime(timetuple))
1763 1764 offset = unixtime - localunixtime
1764 1765 else:
1765 1766 unixtime = localunixtime + offset
1766 1767 return unixtime, offset
1767 1768
1768 1769 def parsedate(date, formats=None, bias=None):
1769 1770 """parse a localized date/time and return a (unixtime, offset) tuple.
1770 1771
1771 1772 The date may be a "unixtime offset" string or in one of the specified
1772 1773 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1773 1774
1774 1775 >>> parsedate(' today ') == parsedate(\
1775 1776 datetime.date.today().strftime('%b %d'))
1776 1777 True
1777 1778 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1778 1779 datetime.timedelta(days=1)\
1779 1780 ).strftime('%b %d'))
1780 1781 True
1781 1782 >>> now, tz = makedate()
1782 1783 >>> strnow, strtz = parsedate('now')
1783 1784 >>> (strnow - now) < 1
1784 1785 True
1785 1786 >>> tz == strtz
1786 1787 True
1787 1788 """
1788 1789 if bias is None:
1789 1790 bias = {}
1790 1791 if not date:
1791 1792 return 0, 0
1792 1793 if isinstance(date, tuple) and len(date) == 2:
1793 1794 return date
1794 1795 if not formats:
1795 1796 formats = defaultdateformats
1796 1797 date = date.strip()
1797 1798
1798 1799 if date == 'now' or date == _('now'):
1799 1800 return makedate()
1800 1801 if date == 'today' or date == _('today'):
1801 1802 date = datetime.date.today().strftime('%b %d')
1802 1803 elif date == 'yesterday' or date == _('yesterday'):
1803 1804 date = (datetime.date.today() -
1804 1805 datetime.timedelta(days=1)).strftime('%b %d')
1805 1806
1806 1807 try:
1807 1808 when, offset = map(int, date.split(' '))
1808 1809 except ValueError:
1809 1810 # fill out defaults
1810 1811 now = makedate()
1811 1812 defaults = {}
1812 1813 for part in ("d", "mb", "yY", "HI", "M", "S"):
1813 1814 # this piece is for rounding the specific end of unknowns
1814 1815 b = bias.get(part)
1815 1816 if b is None:
1816 1817 if part[0] in "HMS":
1817 1818 b = "00"
1818 1819 else:
1819 1820 b = "0"
1820 1821
1821 1822 # this piece is for matching the generic end to today's date
1822 1823 n = datestr(now, "%" + part[0])
1823 1824
1824 1825 defaults[part] = (b, n)
1825 1826
1826 1827 for format in formats:
1827 1828 try:
1828 1829 when, offset = strdate(date, format, defaults)
1829 1830 except (ValueError, OverflowError):
1830 1831 pass
1831 1832 else:
1832 1833 break
1833 1834 else:
1834 1835 raise Abort(_('invalid date: %r') % date)
1835 1836 # validate explicit (probably user-specified) date and
1836 1837 # time zone offset. values must fit in signed 32 bits for
1837 1838 # current 32-bit linux runtimes. timezones go from UTC-12
1838 1839 # to UTC+14
1839 1840 if when < -0x80000000 or when > 0x7fffffff:
1840 1841 raise Abort(_('date exceeds 32 bits: %d') % when)
1841 1842 if offset < -50400 or offset > 43200:
1842 1843 raise Abort(_('impossible time zone offset: %d') % offset)
1843 1844 return when, offset
1844 1845
1845 1846 def matchdate(date):
1846 1847 """Return a function that matches a given date match specifier
1847 1848
1848 1849 Formats include:
1849 1850
1850 1851 '{date}' match a given date to the accuracy provided
1851 1852
1852 1853 '<{date}' on or before a given date
1853 1854
1854 1855 '>{date}' on or after a given date
1855 1856
1856 1857 >>> p1 = parsedate("10:29:59")
1857 1858 >>> p2 = parsedate("10:30:00")
1858 1859 >>> p3 = parsedate("10:30:59")
1859 1860 >>> p4 = parsedate("10:31:00")
1860 1861 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1861 1862 >>> f = matchdate("10:30")
1862 1863 >>> f(p1[0])
1863 1864 False
1864 1865 >>> f(p2[0])
1865 1866 True
1866 1867 >>> f(p3[0])
1867 1868 True
1868 1869 >>> f(p4[0])
1869 1870 False
1870 1871 >>> f(p5[0])
1871 1872 False
1872 1873 """
1873 1874
1874 1875 def lower(date):
1875 1876 d = {'mb': "1", 'd': "1"}
1876 1877 return parsedate(date, extendeddateformats, d)[0]
1877 1878
1878 1879 def upper(date):
1879 1880 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1880 1881 for days in ("31", "30", "29"):
1881 1882 try:
1882 1883 d["d"] = days
1883 1884 return parsedate(date, extendeddateformats, d)[0]
1884 1885 except Abort:
1885 1886 pass
1886 1887 d["d"] = "28"
1887 1888 return parsedate(date, extendeddateformats, d)[0]
1888 1889
1889 1890 date = date.strip()
1890 1891
1891 1892 if not date:
1892 1893 raise Abort(_("dates cannot consist entirely of whitespace"))
1893 1894 elif date[0] == "<":
1894 1895 if not date[1:]:
1895 1896 raise Abort(_("invalid day spec, use '<DATE'"))
1896 1897 when = upper(date[1:])
1897 1898 return lambda x: x <= when
1898 1899 elif date[0] == ">":
1899 1900 if not date[1:]:
1900 1901 raise Abort(_("invalid day spec, use '>DATE'"))
1901 1902 when = lower(date[1:])
1902 1903 return lambda x: x >= when
1903 1904 elif date[0] == "-":
1904 1905 try:
1905 1906 days = int(date[1:])
1906 1907 except ValueError:
1907 1908 raise Abort(_("invalid day spec: %s") % date[1:])
1908 1909 if days < 0:
1909 1910 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1910 1911 % date[1:])
1911 1912 when = makedate()[0] - days * 3600 * 24
1912 1913 return lambda x: x >= when
1913 1914 elif " to " in date:
1914 1915 a, b = date.split(" to ")
1915 1916 start, stop = lower(a), upper(b)
1916 1917 return lambda x: x >= start and x <= stop
1917 1918 else:
1918 1919 start, stop = lower(date), upper(date)
1919 1920 return lambda x: x >= start and x <= stop
1920 1921
1921 1922 def stringmatcher(pattern):
1922 1923 """
1923 1924 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1924 1925 returns the matcher name, pattern, and matcher function.
1925 1926 missing or unknown prefixes are treated as literal matches.
1926 1927
1927 1928 helper for tests:
1928 1929 >>> def test(pattern, *tests):
1929 1930 ... kind, pattern, matcher = stringmatcher(pattern)
1930 1931 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1931 1932
1932 1933 exact matching (no prefix):
1933 1934 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1934 1935 ('literal', 'abcdefg', [False, False, True])
1935 1936
1936 1937 regex matching ('re:' prefix)
1937 1938 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1938 1939 ('re', 'a.+b', [False, False, True])
1939 1940
1940 1941 force exact matches ('literal:' prefix)
1941 1942 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1942 1943 ('literal', 're:foobar', [False, True])
1943 1944
1944 1945 unknown prefixes are ignored and treated as literals
1945 1946 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1946 1947 ('literal', 'foo:bar', [False, False, True])
1947 1948 """
1948 1949 if pattern.startswith('re:'):
1949 1950 pattern = pattern[3:]
1950 1951 try:
1951 1952 regex = remod.compile(pattern)
1952 1953 except remod.error as e:
1953 1954 raise error.ParseError(_('invalid regular expression: %s')
1954 1955 % e)
1955 1956 return 're', pattern, regex.search
1956 1957 elif pattern.startswith('literal:'):
1957 1958 pattern = pattern[8:]
1958 1959 return 'literal', pattern, pattern.__eq__
1959 1960
1960 1961 def shortuser(user):
1961 1962 """Return a short representation of a user name or email address."""
1962 1963 f = user.find('@')
1963 1964 if f >= 0:
1964 1965 user = user[:f]
1965 1966 f = user.find('<')
1966 1967 if f >= 0:
1967 1968 user = user[f + 1:]
1968 1969 f = user.find(' ')
1969 1970 if f >= 0:
1970 1971 user = user[:f]
1971 1972 f = user.find('.')
1972 1973 if f >= 0:
1973 1974 user = user[:f]
1974 1975 return user
1975 1976
1976 1977 def emailuser(user):
1977 1978 """Return the user portion of an email address."""
1978 1979 f = user.find('@')
1979 1980 if f >= 0:
1980 1981 user = user[:f]
1981 1982 f = user.find('<')
1982 1983 if f >= 0:
1983 1984 user = user[f + 1:]
1984 1985 return user
1985 1986
1986 1987 def email(author):
1987 1988 '''get email of author.'''
1988 1989 r = author.find('>')
1989 1990 if r == -1:
1990 1991 r = None
1991 1992 return author[author.find('<') + 1:r]
1992 1993
1993 1994 def ellipsis(text, maxlength=400):
1994 1995 """Trim string to at most maxlength (default: 400) columns in display."""
1995 1996 return encoding.trim(text, maxlength, ellipsis='...')
1996 1997
1997 1998 def unitcountfn(*unittable):
1998 1999 '''return a function that renders a readable count of some quantity'''
1999 2000
2000 2001 def go(count):
2001 2002 for multiplier, divisor, format in unittable:
2002 2003 if count >= divisor * multiplier:
2003 2004 return format % (count / float(divisor))
2004 2005 return unittable[-1][2] % count
2005 2006
2006 2007 return go
2007 2008
2008 2009 bytecount = unitcountfn(
2009 2010 (100, 1 << 30, _('%.0f GB')),
2010 2011 (10, 1 << 30, _('%.1f GB')),
2011 2012 (1, 1 << 30, _('%.2f GB')),
2012 2013 (100, 1 << 20, _('%.0f MB')),
2013 2014 (10, 1 << 20, _('%.1f MB')),
2014 2015 (1, 1 << 20, _('%.2f MB')),
2015 2016 (100, 1 << 10, _('%.0f KB')),
2016 2017 (10, 1 << 10, _('%.1f KB')),
2017 2018 (1, 1 << 10, _('%.2f KB')),
2018 2019 (1, 1, _('%.0f bytes')),
2019 2020 )
2020 2021
2021 2022 def uirepr(s):
2022 2023 # Avoid double backslash in Windows path repr()
2023 2024 return repr(s).replace('\\\\', '\\')
2024 2025
2025 2026 # delay import of textwrap
2026 2027 def MBTextWrapper(**kwargs):
2027 2028 class tw(textwrap.TextWrapper):
2028 2029 """
2029 2030 Extend TextWrapper for width-awareness.
2030 2031
2031 2032 Neither number of 'bytes' in any encoding nor 'characters' is
2032 2033 appropriate to calculate terminal columns for specified string.
2033 2034
2034 2035 Original TextWrapper implementation uses built-in 'len()' directly,
2035 2036 so overriding is needed to use width information of each characters.
2036 2037
2037 2038 In addition, characters classified into 'ambiguous' width are
2038 2039 treated as wide in East Asian area, but as narrow in other.
2039 2040
2040 2041 This requires use decision to determine width of such characters.
2041 2042 """
2042 2043 def _cutdown(self, ucstr, space_left):
2043 2044 l = 0
2044 2045 colwidth = encoding.ucolwidth
2045 2046 for i in xrange(len(ucstr)):
2046 2047 l += colwidth(ucstr[i])
2047 2048 if space_left < l:
2048 2049 return (ucstr[:i], ucstr[i:])
2049 2050 return ucstr, ''
2050 2051
2051 2052 # overriding of base class
2052 2053 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2053 2054 space_left = max(width - cur_len, 1)
2054 2055
2055 2056 if self.break_long_words:
2056 2057 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2057 2058 cur_line.append(cut)
2058 2059 reversed_chunks[-1] = res
2059 2060 elif not cur_line:
2060 2061 cur_line.append(reversed_chunks.pop())
2061 2062
2062 2063 # this overriding code is imported from TextWrapper of Python 2.6
2063 2064 # to calculate columns of string by 'encoding.ucolwidth()'
2064 2065 def _wrap_chunks(self, chunks):
2065 2066 colwidth = encoding.ucolwidth
2066 2067
2067 2068 lines = []
2068 2069 if self.width <= 0:
2069 2070 raise ValueError("invalid width %r (must be > 0)" % self.width)
2070 2071
2071 2072 # Arrange in reverse order so items can be efficiently popped
2072 2073 # from a stack of chucks.
2073 2074 chunks.reverse()
2074 2075
2075 2076 while chunks:
2076 2077
2077 2078 # Start the list of chunks that will make up the current line.
2078 2079 # cur_len is just the length of all the chunks in cur_line.
2079 2080 cur_line = []
2080 2081 cur_len = 0
2081 2082
2082 2083 # Figure out which static string will prefix this line.
2083 2084 if lines:
2084 2085 indent = self.subsequent_indent
2085 2086 else:
2086 2087 indent = self.initial_indent
2087 2088
2088 2089 # Maximum width for this line.
2089 2090 width = self.width - len(indent)
2090 2091
2091 2092 # First chunk on line is whitespace -- drop it, unless this
2092 2093 # is the very beginning of the text (i.e. no lines started yet).
2093 2094 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2094 2095 del chunks[-1]
2095 2096
2096 2097 while chunks:
2097 2098 l = colwidth(chunks[-1])
2098 2099
2099 2100 # Can at least squeeze this chunk onto the current line.
2100 2101 if cur_len + l <= width:
2101 2102 cur_line.append(chunks.pop())
2102 2103 cur_len += l
2103 2104
2104 2105 # Nope, this line is full.
2105 2106 else:
2106 2107 break
2107 2108
2108 2109 # The current line is full, and the next chunk is too big to
2109 2110 # fit on *any* line (not just this one).
2110 2111 if chunks and colwidth(chunks[-1]) > width:
2111 2112 self._handle_long_word(chunks, cur_line, cur_len, width)
2112 2113
2113 2114 # If the last chunk on this line is all whitespace, drop it.
2114 2115 if (self.drop_whitespace and
2115 2116 cur_line and cur_line[-1].strip() == ''):
2116 2117 del cur_line[-1]
2117 2118
2118 2119 # Convert current line back to a string and store it in list
2119 2120 # of all lines (return value).
2120 2121 if cur_line:
2121 2122 lines.append(indent + ''.join(cur_line))
2122 2123
2123 2124 return lines
2124 2125
2125 2126 global MBTextWrapper
2126 2127 MBTextWrapper = tw
2127 2128 return tw(**kwargs)
2128 2129
2129 2130 def wrap(line, width, initindent='', hangindent=''):
2130 2131 maxindent = max(len(hangindent), len(initindent))
2131 2132 if width <= maxindent:
2132 2133 # adjust for weird terminal size
2133 2134 width = max(78, maxindent + 1)
2134 2135 line = line.decode(encoding.encoding, encoding.encodingmode)
2135 2136 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2136 2137 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2137 2138 wrapper = MBTextWrapper(width=width,
2138 2139 initial_indent=initindent,
2139 2140 subsequent_indent=hangindent)
2140 2141 return wrapper.fill(line).encode(encoding.encoding)
2141 2142
2142 2143 def iterlines(iterator):
2143 2144 for chunk in iterator:
2144 2145 for line in chunk.splitlines():
2145 2146 yield line
2146 2147
2147 2148 def expandpath(path):
2148 2149 return os.path.expanduser(os.path.expandvars(path))
2149 2150
2150 2151 def hgcmd():
2151 2152 """Return the command used to execute current hg
2152 2153
2153 2154 This is different from hgexecutable() because on Windows we want
2154 2155 to avoid things opening new shell windows like batch files, so we
2155 2156 get either the python call or current executable.
2156 2157 """
2157 2158 if mainfrozen():
2158 2159 if getattr(sys, 'frozen', None) == 'macosx_app':
2159 2160 # Env variable set by py2app
2160 2161 return [os.environ['EXECUTABLEPATH']]
2161 2162 else:
2162 2163 return [sys.executable]
2163 2164 return gethgcmd()
2164 2165
2165 2166 def rundetached(args, condfn):
2166 2167 """Execute the argument list in a detached process.
2167 2168
2168 2169 condfn is a callable which is called repeatedly and should return
2169 2170 True once the child process is known to have started successfully.
2170 2171 At this point, the child process PID is returned. If the child
2171 2172 process fails to start or finishes before condfn() evaluates to
2172 2173 True, return -1.
2173 2174 """
2174 2175 # Windows case is easier because the child process is either
2175 2176 # successfully starting and validating the condition or exiting
2176 2177 # on failure. We just poll on its PID. On Unix, if the child
2177 2178 # process fails to start, it will be left in a zombie state until
2178 2179 # the parent wait on it, which we cannot do since we expect a long
2179 2180 # running process on success. Instead we listen for SIGCHLD telling
2180 2181 # us our child process terminated.
2181 2182 terminated = set()
2182 2183 def handler(signum, frame):
2183 2184 terminated.add(os.wait())
2184 2185 prevhandler = None
2185 2186 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2186 2187 if SIGCHLD is not None:
2187 2188 prevhandler = signal.signal(SIGCHLD, handler)
2188 2189 try:
2189 2190 pid = spawndetached(args)
2190 2191 while not condfn():
2191 2192 if ((pid in terminated or not testpid(pid))
2192 2193 and not condfn()):
2193 2194 return -1
2194 2195 time.sleep(0.1)
2195 2196 return pid
2196 2197 finally:
2197 2198 if prevhandler is not None:
2198 2199 signal.signal(signal.SIGCHLD, prevhandler)
2199 2200
2200 2201 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2201 2202 """Return the result of interpolating items in the mapping into string s.
2202 2203
2203 2204 prefix is a single character string, or a two character string with
2204 2205 a backslash as the first character if the prefix needs to be escaped in
2205 2206 a regular expression.
2206 2207
2207 2208 fn is an optional function that will be applied to the replacement text
2208 2209 just before replacement.
2209 2210
2210 2211 escape_prefix is an optional flag that allows using doubled prefix for
2211 2212 its escaping.
2212 2213 """
2213 2214 fn = fn or (lambda s: s)
2214 2215 patterns = '|'.join(mapping.keys())
2215 2216 if escape_prefix:
2216 2217 patterns += '|' + prefix
2217 2218 if len(prefix) > 1:
2218 2219 prefix_char = prefix[1:]
2219 2220 else:
2220 2221 prefix_char = prefix
2221 2222 mapping[prefix_char] = prefix_char
2222 2223 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2223 2224 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2224 2225
2225 2226 def getport(port):
2226 2227 """Return the port for a given network service.
2227 2228
2228 2229 If port is an integer, it's returned as is. If it's a string, it's
2229 2230 looked up using socket.getservbyname(). If there's no matching
2230 2231 service, error.Abort is raised.
2231 2232 """
2232 2233 try:
2233 2234 return int(port)
2234 2235 except ValueError:
2235 2236 pass
2236 2237
2237 2238 try:
2238 2239 return socket.getservbyname(port)
2239 2240 except socket.error:
2240 2241 raise Abort(_("no port number associated with service '%s'") % port)
2241 2242
2242 2243 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2243 2244 '0': False, 'no': False, 'false': False, 'off': False,
2244 2245 'never': False}
2245 2246
2246 2247 def parsebool(s):
2247 2248 """Parse s into a boolean.
2248 2249
2249 2250 If s is not a valid boolean, returns None.
2250 2251 """
2251 2252 return _booleans.get(s.lower(), None)
2252 2253
2253 2254 _hexdig = '0123456789ABCDEFabcdef'
2254 2255 _hextochr = dict((a + b, chr(int(a + b, 16)))
2255 2256 for a in _hexdig for b in _hexdig)
2256 2257
2257 2258 def _urlunquote(s):
2258 2259 """Decode HTTP/HTML % encoding.
2259 2260
2260 2261 >>> _urlunquote('abc%20def')
2261 2262 'abc def'
2262 2263 """
2263 2264 res = s.split('%')
2264 2265 # fastpath
2265 2266 if len(res) == 1:
2266 2267 return s
2267 2268 s = res[0]
2268 2269 for item in res[1:]:
2269 2270 try:
2270 2271 s += _hextochr[item[:2]] + item[2:]
2271 2272 except KeyError:
2272 2273 s += '%' + item
2273 2274 except UnicodeDecodeError:
2274 2275 s += unichr(int(item[:2], 16)) + item[2:]
2275 2276 return s
2276 2277
2277 2278 class url(object):
2278 2279 r"""Reliable URL parser.
2279 2280
2280 2281 This parses URLs and provides attributes for the following
2281 2282 components:
2282 2283
2283 2284 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2284 2285
2285 2286 Missing components are set to None. The only exception is
2286 2287 fragment, which is set to '' if present but empty.
2287 2288
2288 2289 If parsefragment is False, fragment is included in query. If
2289 2290 parsequery is False, query is included in path. If both are
2290 2291 False, both fragment and query are included in path.
2291 2292
2292 2293 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2293 2294
2294 2295 Note that for backward compatibility reasons, bundle URLs do not
2295 2296 take host names. That means 'bundle://../' has a path of '../'.
2296 2297
2297 2298 Examples:
2298 2299
2299 2300 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2300 2301 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2301 2302 >>> url('ssh://[::1]:2200//home/joe/repo')
2302 2303 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2303 2304 >>> url('file:///home/joe/repo')
2304 2305 <url scheme: 'file', path: '/home/joe/repo'>
2305 2306 >>> url('file:///c:/temp/foo/')
2306 2307 <url scheme: 'file', path: 'c:/temp/foo/'>
2307 2308 >>> url('bundle:foo')
2308 2309 <url scheme: 'bundle', path: 'foo'>
2309 2310 >>> url('bundle://../foo')
2310 2311 <url scheme: 'bundle', path: '../foo'>
2311 2312 >>> url(r'c:\foo\bar')
2312 2313 <url path: 'c:\\foo\\bar'>
2313 2314 >>> url(r'\\blah\blah\blah')
2314 2315 <url path: '\\\\blah\\blah\\blah'>
2315 2316 >>> url(r'\\blah\blah\blah#baz')
2316 2317 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2317 2318 >>> url(r'file:///C:\users\me')
2318 2319 <url scheme: 'file', path: 'C:\\users\\me'>
2319 2320
2320 2321 Authentication credentials:
2321 2322
2322 2323 >>> url('ssh://joe:xyz@x/repo')
2323 2324 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2324 2325 >>> url('ssh://joe@x/repo')
2325 2326 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2326 2327
2327 2328 Query strings and fragments:
2328 2329
2329 2330 >>> url('http://host/a?b#c')
2330 2331 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2331 2332 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2332 2333 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2333 2334 """
2334 2335
2335 2336 _safechars = "!~*'()+"
2336 2337 _safepchars = "/!~*'()+:\\"
2337 2338 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2338 2339
2339 2340 def __init__(self, path, parsequery=True, parsefragment=True):
2340 2341 # We slowly chomp away at path until we have only the path left
2341 2342 self.scheme = self.user = self.passwd = self.host = None
2342 2343 self.port = self.path = self.query = self.fragment = None
2343 2344 self._localpath = True
2344 2345 self._hostport = ''
2345 2346 self._origpath = path
2346 2347
2347 2348 if parsefragment and '#' in path:
2348 2349 path, self.fragment = path.split('#', 1)
2349 2350 if not path:
2350 2351 path = None
2351 2352
2352 2353 # special case for Windows drive letters and UNC paths
2353 2354 if hasdriveletter(path) or path.startswith(r'\\'):
2354 2355 self.path = path
2355 2356 return
2356 2357
2357 2358 # For compatibility reasons, we can't handle bundle paths as
2358 2359 # normal URLS
2359 2360 if path.startswith('bundle:'):
2360 2361 self.scheme = 'bundle'
2361 2362 path = path[7:]
2362 2363 if path.startswith('//'):
2363 2364 path = path[2:]
2364 2365 self.path = path
2365 2366 return
2366 2367
2367 2368 if self._matchscheme(path):
2368 2369 parts = path.split(':', 1)
2369 2370 if parts[0]:
2370 2371 self.scheme, path = parts
2371 2372 self._localpath = False
2372 2373
2373 2374 if not path:
2374 2375 path = None
2375 2376 if self._localpath:
2376 2377 self.path = ''
2377 2378 return
2378 2379 else:
2379 2380 if self._localpath:
2380 2381 self.path = path
2381 2382 return
2382 2383
2383 2384 if parsequery and '?' in path:
2384 2385 path, self.query = path.split('?', 1)
2385 2386 if not path:
2386 2387 path = None
2387 2388 if not self.query:
2388 2389 self.query = None
2389 2390
2390 2391 # // is required to specify a host/authority
2391 2392 if path and path.startswith('//'):
2392 2393 parts = path[2:].split('/', 1)
2393 2394 if len(parts) > 1:
2394 2395 self.host, path = parts
2395 2396 else:
2396 2397 self.host = parts[0]
2397 2398 path = None
2398 2399 if not self.host:
2399 2400 self.host = None
2400 2401 # path of file:///d is /d
2401 2402 # path of file:///d:/ is d:/, not /d:/
2402 2403 if path and not hasdriveletter(path):
2403 2404 path = '/' + path
2404 2405
2405 2406 if self.host and '@' in self.host:
2406 2407 self.user, self.host = self.host.rsplit('@', 1)
2407 2408 if ':' in self.user:
2408 2409 self.user, self.passwd = self.user.split(':', 1)
2409 2410 if not self.host:
2410 2411 self.host = None
2411 2412
2412 2413 # Don't split on colons in IPv6 addresses without ports
2413 2414 if (self.host and ':' in self.host and
2414 2415 not (self.host.startswith('[') and self.host.endswith(']'))):
2415 2416 self._hostport = self.host
2416 2417 self.host, self.port = self.host.rsplit(':', 1)
2417 2418 if not self.host:
2418 2419 self.host = None
2419 2420
2420 2421 if (self.host and self.scheme == 'file' and
2421 2422 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2422 2423 raise Abort(_('file:// URLs can only refer to localhost'))
2423 2424
2424 2425 self.path = path
2425 2426
2426 2427 # leave the query string escaped
2427 2428 for a in ('user', 'passwd', 'host', 'port',
2428 2429 'path', 'fragment'):
2429 2430 v = getattr(self, a)
2430 2431 if v is not None:
2431 2432 setattr(self, a, _urlunquote(v))
2432 2433
2433 2434 def __repr__(self):
2434 2435 attrs = []
2435 2436 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2436 2437 'query', 'fragment'):
2437 2438 v = getattr(self, a)
2438 2439 if v is not None:
2439 2440 attrs.append('%s: %r' % (a, v))
2440 2441 return '<url %s>' % ', '.join(attrs)
2441 2442
2442 2443 def __str__(self):
2443 2444 r"""Join the URL's components back into a URL string.
2444 2445
2445 2446 Examples:
2446 2447
2447 2448 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2448 2449 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2449 2450 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2450 2451 'http://user:pw@host:80/?foo=bar&baz=42'
2451 2452 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2452 2453 'http://user:pw@host:80/?foo=bar%3dbaz'
2453 2454 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2454 2455 'ssh://user:pw@[::1]:2200//home/joe#'
2455 2456 >>> str(url('http://localhost:80//'))
2456 2457 'http://localhost:80//'
2457 2458 >>> str(url('http://localhost:80/'))
2458 2459 'http://localhost:80/'
2459 2460 >>> str(url('http://localhost:80'))
2460 2461 'http://localhost:80/'
2461 2462 >>> str(url('bundle:foo'))
2462 2463 'bundle:foo'
2463 2464 >>> str(url('bundle://../foo'))
2464 2465 'bundle:../foo'
2465 2466 >>> str(url('path'))
2466 2467 'path'
2467 2468 >>> str(url('file:///tmp/foo/bar'))
2468 2469 'file:///tmp/foo/bar'
2469 2470 >>> str(url('file:///c:/tmp/foo/bar'))
2470 2471 'file:///c:/tmp/foo/bar'
2471 2472 >>> print url(r'bundle:foo\bar')
2472 2473 bundle:foo\bar
2473 2474 >>> print url(r'file:///D:\data\hg')
2474 2475 file:///D:\data\hg
2475 2476 """
2476 2477 if self._localpath:
2477 2478 s = self.path
2478 2479 if self.scheme == 'bundle':
2479 2480 s = 'bundle:' + s
2480 2481 if self.fragment:
2481 2482 s += '#' + self.fragment
2482 2483 return s
2483 2484
2484 2485 s = self.scheme + ':'
2485 2486 if self.user or self.passwd or self.host:
2486 2487 s += '//'
2487 2488 elif self.scheme and (not self.path or self.path.startswith('/')
2488 2489 or hasdriveletter(self.path)):
2489 2490 s += '//'
2490 2491 if hasdriveletter(self.path):
2491 2492 s += '/'
2492 2493 if self.user:
2493 2494 s += urlreq.quote(self.user, safe=self._safechars)
2494 2495 if self.passwd:
2495 2496 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2496 2497 if self.user or self.passwd:
2497 2498 s += '@'
2498 2499 if self.host:
2499 2500 if not (self.host.startswith('[') and self.host.endswith(']')):
2500 2501 s += urlreq.quote(self.host)
2501 2502 else:
2502 2503 s += self.host
2503 2504 if self.port:
2504 2505 s += ':' + urlreq.quote(self.port)
2505 2506 if self.host:
2506 2507 s += '/'
2507 2508 if self.path:
2508 2509 # TODO: similar to the query string, we should not unescape the
2509 2510 # path when we store it, the path might contain '%2f' = '/',
2510 2511 # which we should *not* escape.
2511 2512 s += urlreq.quote(self.path, safe=self._safepchars)
2512 2513 if self.query:
2513 2514 # we store the query in escaped form.
2514 2515 s += '?' + self.query
2515 2516 if self.fragment is not None:
2516 2517 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2517 2518 return s
2518 2519
2519 2520 def authinfo(self):
2520 2521 user, passwd = self.user, self.passwd
2521 2522 try:
2522 2523 self.user, self.passwd = None, None
2523 2524 s = str(self)
2524 2525 finally:
2525 2526 self.user, self.passwd = user, passwd
2526 2527 if not self.user:
2527 2528 return (s, None)
2528 2529 # authinfo[1] is passed to urllib2 password manager, and its
2529 2530 # URIs must not contain credentials. The host is passed in the
2530 2531 # URIs list because Python < 2.4.3 uses only that to search for
2531 2532 # a password.
2532 2533 return (s, (None, (s, self.host),
2533 2534 self.user, self.passwd or ''))
2534 2535
2535 2536 def isabs(self):
2536 2537 if self.scheme and self.scheme != 'file':
2537 2538 return True # remote URL
2538 2539 if hasdriveletter(self.path):
2539 2540 return True # absolute for our purposes - can't be joined()
2540 2541 if self.path.startswith(r'\\'):
2541 2542 return True # Windows UNC path
2542 2543 if self.path.startswith('/'):
2543 2544 return True # POSIX-style
2544 2545 return False
2545 2546
2546 2547 def localpath(self):
2547 2548 if self.scheme == 'file' or self.scheme == 'bundle':
2548 2549 path = self.path or '/'
2549 2550 # For Windows, we need to promote hosts containing drive
2550 2551 # letters to paths with drive letters.
2551 2552 if hasdriveletter(self._hostport):
2552 2553 path = self._hostport + '/' + self.path
2553 2554 elif (self.host is not None and self.path
2554 2555 and not hasdriveletter(path)):
2555 2556 path = '/' + path
2556 2557 return path
2557 2558 return self._origpath
2558 2559
2559 2560 def islocal(self):
2560 2561 '''whether localpath will return something that posixfile can open'''
2561 2562 return (not self.scheme or self.scheme == 'file'
2562 2563 or self.scheme == 'bundle')
2563 2564
2564 2565 def hasscheme(path):
2565 2566 return bool(url(path).scheme)
2566 2567
2567 2568 def hasdriveletter(path):
2568 2569 return path and path[1:2] == ':' and path[0:1].isalpha()
2569 2570
2570 2571 def urllocalpath(path):
2571 2572 return url(path, parsequery=False, parsefragment=False).localpath()
2572 2573
2573 2574 def hidepassword(u):
2574 2575 '''hide user credential in a url string'''
2575 2576 u = url(u)
2576 2577 if u.passwd:
2577 2578 u.passwd = '***'
2578 2579 return str(u)
2579 2580
2580 2581 def removeauth(u):
2581 2582 '''remove all authentication information from a url string'''
2582 2583 u = url(u)
2583 2584 u.user = u.passwd = None
2584 2585 return str(u)
2585 2586
2586 2587 def isatty(fp):
2587 2588 try:
2588 2589 return fp.isatty()
2589 2590 except AttributeError:
2590 2591 return False
2591 2592
2592 2593 timecount = unitcountfn(
2593 2594 (1, 1e3, _('%.0f s')),
2594 2595 (100, 1, _('%.1f s')),
2595 2596 (10, 1, _('%.2f s')),
2596 2597 (1, 1, _('%.3f s')),
2597 2598 (100, 0.001, _('%.1f ms')),
2598 2599 (10, 0.001, _('%.2f ms')),
2599 2600 (1, 0.001, _('%.3f ms')),
2600 2601 (100, 0.000001, _('%.1f us')),
2601 2602 (10, 0.000001, _('%.2f us')),
2602 2603 (1, 0.000001, _('%.3f us')),
2603 2604 (100, 0.000000001, _('%.1f ns')),
2604 2605 (10, 0.000000001, _('%.2f ns')),
2605 2606 (1, 0.000000001, _('%.3f ns')),
2606 2607 )
2607 2608
2608 2609 _timenesting = [0]
2609 2610
2610 2611 def timed(func):
2611 2612 '''Report the execution time of a function call to stderr.
2612 2613
2613 2614 During development, use as a decorator when you need to measure
2614 2615 the cost of a function, e.g. as follows:
2615 2616
2616 2617 @util.timed
2617 2618 def foo(a, b, c):
2618 2619 pass
2619 2620 '''
2620 2621
2621 2622 def wrapper(*args, **kwargs):
2622 2623 start = time.time()
2623 2624 indent = 2
2624 2625 _timenesting[0] += indent
2625 2626 try:
2626 2627 return func(*args, **kwargs)
2627 2628 finally:
2628 2629 elapsed = time.time() - start
2629 2630 _timenesting[0] -= indent
2630 2631 sys.stderr.write('%s%s: %s\n' %
2631 2632 (' ' * _timenesting[0], func.__name__,
2632 2633 timecount(elapsed)))
2633 2634 return wrapper
2634 2635
2635 2636 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2636 2637 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2637 2638
2638 2639 def sizetoint(s):
2639 2640 '''Convert a space specifier to a byte count.
2640 2641
2641 2642 >>> sizetoint('30')
2642 2643 30
2643 2644 >>> sizetoint('2.2kb')
2644 2645 2252
2645 2646 >>> sizetoint('6M')
2646 2647 6291456
2647 2648 '''
2648 2649 t = s.strip().lower()
2649 2650 try:
2650 2651 for k, u in _sizeunits:
2651 2652 if t.endswith(k):
2652 2653 return int(float(t[:-len(k)]) * u)
2653 2654 return int(t)
2654 2655 except ValueError:
2655 2656 raise error.ParseError(_("couldn't parse size: %s") % s)
2656 2657
2657 2658 class hooks(object):
2658 2659 '''A collection of hook functions that can be used to extend a
2659 2660 function's behavior. Hooks are called in lexicographic order,
2660 2661 based on the names of their sources.'''
2661 2662
2662 2663 def __init__(self):
2663 2664 self._hooks = []
2664 2665
2665 2666 def add(self, source, hook):
2666 2667 self._hooks.append((source, hook))
2667 2668
2668 2669 def __call__(self, *args):
2669 2670 self._hooks.sort(key=lambda x: x[0])
2670 2671 results = []
2671 2672 for source, hook in self._hooks:
2672 2673 results.append(hook(*args))
2673 2674 return results
2674 2675
2675 2676 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2676 2677 '''Yields lines for a nicely formatted stacktrace.
2677 2678 Skips the 'skip' last entries.
2678 2679 Each file+linenumber is formatted according to fileline.
2679 2680 Each line is formatted according to line.
2680 2681 If line is None, it yields:
2681 2682 length of longest filepath+line number,
2682 2683 filepath+linenumber,
2683 2684 function
2684 2685
2685 2686 Not be used in production code but very convenient while developing.
2686 2687 '''
2687 2688 entries = [(fileline % (fn, ln), func)
2688 2689 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2689 2690 if entries:
2690 2691 fnmax = max(len(entry[0]) for entry in entries)
2691 2692 for fnln, func in entries:
2692 2693 if line is None:
2693 2694 yield (fnmax, fnln, func)
2694 2695 else:
2695 2696 yield line % (fnmax, fnln, func)
2696 2697
2697 2698 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2698 2699 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2699 2700 Skips the 'skip' last entries. By default it will flush stdout first.
2700 2701 It can be used everywhere and intentionally does not require an ui object.
2701 2702 Not be used in production code but very convenient while developing.
2702 2703 '''
2703 2704 if otherf:
2704 2705 otherf.flush()
2705 2706 f.write('%s at:\n' % msg)
2706 2707 for line in getstackframes(skip + 1):
2707 2708 f.write(line)
2708 2709 f.flush()
2709 2710
2710 2711 class dirs(object):
2711 2712 '''a multiset of directory names from a dirstate or manifest'''
2712 2713
2713 2714 def __init__(self, map, skip=None):
2714 2715 self._dirs = {}
2715 2716 addpath = self.addpath
2716 2717 if safehasattr(map, 'iteritems') and skip is not None:
2717 2718 for f, s in map.iteritems():
2718 2719 if s[0] != skip:
2719 2720 addpath(f)
2720 2721 else:
2721 2722 for f in map:
2722 2723 addpath(f)
2723 2724
2724 2725 def addpath(self, path):
2725 2726 dirs = self._dirs
2726 2727 for base in finddirs(path):
2727 2728 if base in dirs:
2728 2729 dirs[base] += 1
2729 2730 return
2730 2731 dirs[base] = 1
2731 2732
2732 2733 def delpath(self, path):
2733 2734 dirs = self._dirs
2734 2735 for base in finddirs(path):
2735 2736 if dirs[base] > 1:
2736 2737 dirs[base] -= 1
2737 2738 return
2738 2739 del dirs[base]
2739 2740
2740 2741 def __iter__(self):
2741 2742 return self._dirs.iterkeys()
2742 2743
2743 2744 def __contains__(self, d):
2744 2745 return d in self._dirs
2745 2746
2746 2747 if safehasattr(parsers, 'dirs'):
2747 2748 dirs = parsers.dirs
2748 2749
2749 2750 def finddirs(path):
2750 2751 pos = path.rfind('/')
2751 2752 while pos != -1:
2752 2753 yield path[:pos]
2753 2754 pos = path.rfind('/', 0, pos)
2754 2755
2755 2756 # compression utility
2756 2757
2757 2758 class nocompress(object):
2758 2759 def compress(self, x):
2759 2760 return x
2760 2761 def flush(self):
2761 2762 return ""
2762 2763
2763 2764 compressors = {
2764 2765 None: nocompress,
2765 2766 # lambda to prevent early import
2766 2767 'BZ': lambda: bz2.BZ2Compressor(),
2767 2768 'GZ': lambda: zlib.compressobj(),
2768 2769 }
2769 2770 # also support the old form by courtesies
2770 2771 compressors['UN'] = compressors[None]
2771 2772
2772 2773 def _makedecompressor(decompcls):
2773 2774 def generator(f):
2774 2775 d = decompcls()
2775 2776 for chunk in filechunkiter(f):
2776 2777 yield d.decompress(chunk)
2777 2778 def func(fh):
2778 2779 return chunkbuffer(generator(fh))
2779 2780 return func
2780 2781
2781 2782 class ctxmanager(object):
2782 2783 '''A context manager for use in 'with' blocks to allow multiple
2783 2784 contexts to be entered at once. This is both safer and more
2784 2785 flexible than contextlib.nested.
2785 2786
2786 2787 Once Mercurial supports Python 2.7+, this will become mostly
2787 2788 unnecessary.
2788 2789 '''
2789 2790
2790 2791 def __init__(self, *args):
2791 2792 '''Accepts a list of no-argument functions that return context
2792 2793 managers. These will be invoked at __call__ time.'''
2793 2794 self._pending = args
2794 2795 self._atexit = []
2795 2796
2796 2797 def __enter__(self):
2797 2798 return self
2798 2799
2799 2800 def enter(self):
2800 2801 '''Create and enter context managers in the order in which they were
2801 2802 passed to the constructor.'''
2802 2803 values = []
2803 2804 for func in self._pending:
2804 2805 obj = func()
2805 2806 values.append(obj.__enter__())
2806 2807 self._atexit.append(obj.__exit__)
2807 2808 del self._pending
2808 2809 return values
2809 2810
2810 2811 def atexit(self, func, *args, **kwargs):
2811 2812 '''Add a function to call when this context manager exits. The
2812 2813 ordering of multiple atexit calls is unspecified, save that
2813 2814 they will happen before any __exit__ functions.'''
2814 2815 def wrapper(exc_type, exc_val, exc_tb):
2815 2816 func(*args, **kwargs)
2816 2817 self._atexit.append(wrapper)
2817 2818 return func
2818 2819
2819 2820 def __exit__(self, exc_type, exc_val, exc_tb):
2820 2821 '''Context managers are exited in the reverse order from which
2821 2822 they were created.'''
2822 2823 received = exc_type is not None
2823 2824 suppressed = False
2824 2825 pending = None
2825 2826 self._atexit.reverse()
2826 2827 for exitfunc in self._atexit:
2827 2828 try:
2828 2829 if exitfunc(exc_type, exc_val, exc_tb):
2829 2830 suppressed = True
2830 2831 exc_type = None
2831 2832 exc_val = None
2832 2833 exc_tb = None
2833 2834 except BaseException:
2834 2835 pending = sys.exc_info()
2835 2836 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2836 2837 del self._atexit
2837 2838 if pending:
2838 2839 raise exc_val
2839 2840 return received and suppressed
2840 2841
2841 2842 def _bz2():
2842 2843 d = bz2.BZ2Decompressor()
2843 2844 # Bzip2 stream start with BZ, but we stripped it.
2844 2845 # we put it back for good measure.
2845 2846 d.decompress('BZ')
2846 2847 return d
2847 2848
2848 2849 decompressors = {None: lambda fh: fh,
2849 2850 '_truncatedBZ': _makedecompressor(_bz2),
2850 2851 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2851 2852 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2852 2853 }
2853 2854 # also support the old form by courtesies
2854 2855 decompressors['UN'] = decompressors[None]
2855 2856
2856 2857 # convenient shortcut
2857 2858 dst = debugstacktrace
@@ -1,473 +1,476
1 1 # windows.py - Windows utility function implementations for Mercurial
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import _winreg
11 11 import errno
12 12 import msvcrt
13 13 import os
14 14 import re
15 15 import stat
16 16 import sys
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 encoding,
21 21 osutil,
22 22 win32,
23 23 )
24 24
25 25 executablepath = win32.executablepath
26 26 getuser = win32.getuser
27 27 hidewindow = win32.hidewindow
28 28 makedir = win32.makedir
29 29 nlinks = win32.nlinks
30 30 oslink = win32.oslink
31 31 samedevice = win32.samedevice
32 32 samefile = win32.samefile
33 33 setsignalhandler = win32.setsignalhandler
34 34 spawndetached = win32.spawndetached
35 35 split = os.path.split
36 36 termwidth = win32.termwidth
37 37 testpid = win32.testpid
38 38 unlink = win32.unlink
39 39
40 40 umask = 0o022
41 41
42 42 class mixedfilemodewrapper(object):
43 43 """Wraps a file handle when it is opened in read/write mode.
44 44
45 45 fopen() and fdopen() on Windows have a specific-to-Windows requirement
46 46 that files opened with mode r+, w+, or a+ make a call to a file positioning
47 47 function when switching between reads and writes. Without this extra call,
48 48 Python will raise a not very intuitive "IOError: [Errno 0] Error."
49 49
50 50 This class wraps posixfile instances when the file is opened in read/write
51 51 mode and automatically adds checks or inserts appropriate file positioning
52 52 calls when necessary.
53 53 """
54 54 OPNONE = 0
55 55 OPREAD = 1
56 56 OPWRITE = 2
57 57
58 58 def __init__(self, fp):
59 59 object.__setattr__(self, '_fp', fp)
60 60 object.__setattr__(self, '_lastop', 0)
61 61
62 62 def __getattr__(self, name):
63 63 return getattr(self._fp, name)
64 64
65 65 def __setattr__(self, name, value):
66 66 return self._fp.__setattr__(name, value)
67 67
68 68 def _noopseek(self):
69 69 self._fp.seek(0, os.SEEK_CUR)
70 70
71 71 def seek(self, *args, **kwargs):
72 72 object.__setattr__(self, '_lastop', self.OPNONE)
73 73 return self._fp.seek(*args, **kwargs)
74 74
75 75 def write(self, d):
76 76 if self._lastop == self.OPREAD:
77 77 self._noopseek()
78 78
79 79 object.__setattr__(self, '_lastop', self.OPWRITE)
80 80 return self._fp.write(d)
81 81
82 82 def writelines(self, *args, **kwargs):
83 83 if self._lastop == self.OPREAD:
84 84 self._noopeseek()
85 85
86 86 object.__setattr__(self, '_lastop', self.OPWRITE)
87 87 return self._fp.writelines(*args, **kwargs)
88 88
89 89 def read(self, *args, **kwargs):
90 90 if self._lastop == self.OPWRITE:
91 91 self._noopseek()
92 92
93 93 object.__setattr__(self, '_lastop', self.OPREAD)
94 94 return self._fp.read(*args, **kwargs)
95 95
96 96 def readline(self, *args, **kwargs):
97 97 if self._lastop == self.OPWRITE:
98 98 self._noopseek()
99 99
100 100 object.__setattr__(self, '_lastop', self.OPREAD)
101 101 return self._fp.readline(*args, **kwargs)
102 102
103 103 def readlines(self, *args, **kwargs):
104 104 if self._lastop == self.OPWRITE:
105 105 self._noopseek()
106 106
107 107 object.__setattr__(self, '_lastop', self.OPREAD)
108 108 return self._fp.readlines(*args, **kwargs)
109 109
110 110 def posixfile(name, mode='r', buffering=-1):
111 111 '''Open a file with even more POSIX-like semantics'''
112 112 try:
113 113 fp = osutil.posixfile(name, mode, buffering) # may raise WindowsError
114 114
115 115 # The position when opening in append mode is implementation defined, so
116 116 # make it consistent with other platforms, which position at EOF.
117 117 if 'a' in mode:
118 118 fp.seek(0, os.SEEK_END)
119 119
120 120 if '+' in mode:
121 121 return mixedfilemodewrapper(fp)
122 122
123 123 return fp
124 124 except WindowsError as err:
125 125 # convert to a friendlier exception
126 126 raise IOError(err.errno, '%s: %s' % (name, err.strerror))
127 127
128 128 class winstdout(object):
129 129 '''stdout on windows misbehaves if sent through a pipe'''
130 130
131 131 def __init__(self, fp):
132 132 self.fp = fp
133 133
134 134 def __getattr__(self, key):
135 135 return getattr(self.fp, key)
136 136
137 137 def close(self):
138 138 try:
139 139 self.fp.close()
140 140 except IOError:
141 141 pass
142 142
143 143 def write(self, s):
144 144 try:
145 145 # This is workaround for "Not enough space" error on
146 146 # writing large size of data to console.
147 147 limit = 16000
148 148 l = len(s)
149 149 start = 0
150 150 self.softspace = 0
151 151 while start < l:
152 152 end = start + limit
153 153 self.fp.write(s[start:end])
154 154 start = end
155 155 except IOError as inst:
156 156 if inst.errno != 0:
157 157 raise
158 158 self.close()
159 159 raise IOError(errno.EPIPE, 'Broken pipe')
160 160
161 161 def flush(self):
162 162 try:
163 163 return self.fp.flush()
164 164 except IOError as inst:
165 165 if inst.errno != errno.EINVAL:
166 166 raise
167 167 self.close()
168 168 raise IOError(errno.EPIPE, 'Broken pipe')
169 169
170 170 sys.__stdout__ = sys.stdout = winstdout(sys.stdout)
171 171
172 172 def _is_win_9x():
173 173 '''return true if run on windows 95, 98 or me.'''
174 174 try:
175 175 return sys.getwindowsversion()[3] == 1
176 176 except AttributeError:
177 177 return 'command' in os.environ.get('comspec', '')
178 178
179 179 def openhardlinks():
180 180 return not _is_win_9x()
181 181
182 182 def parsepatchoutput(output_line):
183 183 """parses the output produced by patch and returns the filename"""
184 184 pf = output_line[14:]
185 185 if pf[0] == '`':
186 186 pf = pf[1:-1] # Remove the quotes
187 187 return pf
188 188
189 189 def sshargs(sshcmd, host, user, port):
190 190 '''Build argument list for ssh or Plink'''
191 191 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
192 192 args = user and ("%s@%s" % (user, host)) or host
193 193 return port and ("%s %s %s" % (args, pflag, port)) or args
194 194
195 195 def setflags(f, l, x):
196 196 pass
197 197
198 198 def copymode(src, dst, mode=None):
199 199 pass
200 200
201 201 def checkexec(path):
202 202 return False
203 203
204 204 def checklink(path):
205 205 return False
206 206
207 207 def setbinary(fd):
208 208 # When run without console, pipes may expose invalid
209 209 # fileno(), usually set to -1.
210 210 fno = getattr(fd, 'fileno', None)
211 211 if fno is not None and fno() >= 0:
212 212 msvcrt.setmode(fno(), os.O_BINARY)
213 213
214 214 def pconvert(path):
215 215 return path.replace(os.sep, '/')
216 216
217 217 def localpath(path):
218 218 return path.replace('/', '\\')
219 219
220 220 def normpath(path):
221 221 return pconvert(os.path.normpath(path))
222 222
223 223 def normcase(path):
224 224 return encoding.upper(path) # NTFS compares via upper()
225 225
226 226 # see posix.py for definitions
227 227 normcasespec = encoding.normcasespecs.upper
228 228 normcasefallback = encoding.upperfallback
229 229
230 230 def samestat(s1, s2):
231 231 return False
232 232
233 233 # A sequence of backslashes is special iff it precedes a double quote:
234 234 # - if there's an even number of backslashes, the double quote is not
235 235 # quoted (i.e. it ends the quoted region)
236 236 # - if there's an odd number of backslashes, the double quote is quoted
237 237 # - in both cases, every pair of backslashes is unquoted into a single
238 238 # backslash
239 239 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
240 240 # So, to quote a string, we must surround it in double quotes, double
241 241 # the number of backslashes that precede double quotes and add another
242 242 # backslash before every double quote (being careful with the double
243 243 # quote we've appended to the end)
244 244 _quotere = None
245 245 _needsshellquote = None
246 246 def shellquote(s):
247 247 r"""
248 248 >>> shellquote(r'C:\Users\xyz')
249 249 '"C:\\Users\\xyz"'
250 250 >>> shellquote(r'C:\Users\xyz/mixed')
251 251 '"C:\\Users\\xyz/mixed"'
252 252 >>> # Would be safe not to quote too, since it is all double backslashes
253 253 >>> shellquote(r'C:\\Users\\xyz')
254 254 '"C:\\\\Users\\\\xyz"'
255 255 >>> # But this must be quoted
256 256 >>> shellquote(r'C:\\Users\\xyz/abc')
257 257 '"C:\\\\Users\\\\xyz/abc"'
258 258 """
259 259 global _quotere
260 260 if _quotere is None:
261 261 _quotere = re.compile(r'(\\*)("|\\$)')
262 262 global _needsshellquote
263 263 if _needsshellquote is None:
264 264 # ":" is also treated as "safe character", because it is used as a part
265 265 # of path name on Windows. "\" is also part of a path name, but isn't
266 266 # safe because shlex.split() (kind of) treats it as an escape char and
267 267 # drops it. It will leave the next character, even if it is another
268 268 # "\".
269 269 _needsshellquote = re.compile(r'[^a-zA-Z0-9._:/-]').search
270 270 if s and not _needsshellquote(s) and not _quotere.search(s):
271 271 # "s" shouldn't have to be quoted
272 272 return s
273 273 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
274 274
275 275 def quotecommand(cmd):
276 276 """Build a command string suitable for os.popen* calls."""
277 277 if sys.version_info < (2, 7, 1):
278 278 # Python versions since 2.7.1 do this extra quoting themselves
279 279 return '"' + cmd + '"'
280 280 return cmd
281 281
282 282 def popen(command, mode='r'):
283 283 # Work around "popen spawned process may not write to stdout
284 284 # under windows"
285 285 # http://bugs.python.org/issue1366
286 286 command += " 2> %s" % os.devnull
287 287 return os.popen(quotecommand(command), mode)
288 288
289 289 def explainexit(code):
290 290 return _("exited with status %d") % code, code
291 291
292 292 # if you change this stub into a real check, please try to implement the
293 293 # username and groupname functions above, too.
294 294 def isowner(st):
295 295 return True
296 296
297 297 def findexe(command):
298 298 '''Find executable for command searching like cmd.exe does.
299 299 If command is a basename then PATH is searched for command.
300 300 PATH isn't searched if command is an absolute or relative path.
301 301 An extension from PATHEXT is found and added if not present.
302 302 If command isn't found None is returned.'''
303 303 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
304 304 pathexts = [ext for ext in pathext.lower().split(os.pathsep)]
305 305 if os.path.splitext(command)[1].lower() in pathexts:
306 306 pathexts = ['']
307 307
308 308 def findexisting(pathcommand):
309 309 'Will append extension (if needed) and return existing file'
310 310 for ext in pathexts:
311 311 executable = pathcommand + ext
312 312 if os.path.exists(executable):
313 313 return executable
314 314 return None
315 315
316 316 if os.sep in command:
317 317 return findexisting(command)
318 318
319 319 for path in os.environ.get('PATH', '').split(os.pathsep):
320 320 executable = findexisting(os.path.join(path, command))
321 321 if executable is not None:
322 322 return executable
323 323 return findexisting(os.path.expanduser(os.path.expandvars(command)))
324 324
325 325 _wantedkinds = set([stat.S_IFREG, stat.S_IFLNK])
326 326
327 327 def statfiles(files):
328 328 '''Stat each file in files. Yield each stat, or None if a file
329 329 does not exist or has a type we don't care about.
330 330
331 331 Cluster and cache stat per directory to minimize number of OS stat calls.'''
332 332 dircache = {} # dirname -> filename -> status | None if file does not exist
333 333 getkind = stat.S_IFMT
334 334 for nf in files:
335 335 nf = normcase(nf)
336 336 dir, base = os.path.split(nf)
337 337 if not dir:
338 338 dir = '.'
339 339 cache = dircache.get(dir, None)
340 340 if cache is None:
341 341 try:
342 342 dmap = dict([(normcase(n), s)
343 343 for n, k, s in osutil.listdir(dir, True)
344 344 if getkind(s.st_mode) in _wantedkinds])
345 345 except OSError as err:
346 346 # Python >= 2.5 returns ENOENT and adds winerror field
347 347 # EINVAL is raised if dir is not a directory.
348 348 if err.errno not in (errno.ENOENT, errno.EINVAL,
349 349 errno.ENOTDIR):
350 350 raise
351 351 dmap = {}
352 352 cache = dircache.setdefault(dir, dmap)
353 353 yield cache.get(base, None)
354 354
355 355 def username(uid=None):
356 356 """Return the name of the user with the given uid.
357 357
358 358 If uid is None, return the name of the current user."""
359 359 return None
360 360
361 361 def groupname(gid=None):
362 362 """Return the name of the group with the given gid.
363 363
364 364 If gid is None, return the name of the current group."""
365 365 return None
366 366
367 367 def removedirs(name):
368 368 """special version of os.removedirs that does not remove symlinked
369 369 directories or junction points if they actually contain files"""
370 370 if osutil.listdir(name):
371 371 return
372 372 os.rmdir(name)
373 373 head, tail = os.path.split(name)
374 374 if not tail:
375 375 head, tail = os.path.split(head)
376 376 while head and tail:
377 377 try:
378 378 if osutil.listdir(head):
379 379 return
380 380 os.rmdir(head)
381 381 except (ValueError, OSError):
382 382 break
383 383 head, tail = os.path.split(head)
384 384
385 385 def unlinkpath(f, ignoremissing=False):
386 386 """unlink and remove the directory if it is empty"""
387 387 try:
388 388 unlink(f)
389 389 except OSError as e:
390 390 if not (ignoremissing and e.errno == errno.ENOENT):
391 391 raise
392 392 # try removing directories that might now be empty
393 393 try:
394 394 removedirs(os.path.dirname(f))
395 395 except OSError:
396 396 pass
397 397
398 398 def rename(src, dst):
399 399 '''atomically rename file src to dst, replacing dst if it exists'''
400 400 try:
401 401 os.rename(src, dst)
402 402 except OSError as e:
403 403 if e.errno != errno.EEXIST:
404 404 raise
405 405 unlink(dst)
406 406 os.rename(src, dst)
407 407
408 408 def gethgcmd():
409 409 return [sys.executable] + sys.argv[:1]
410 410
411 411 def groupmembers(name):
412 412 # Don't support groups on Windows for now
413 413 raise KeyError
414 414
415 415 def isexec(f):
416 416 return False
417 417
418 418 class cachestat(object):
419 419 def __init__(self, path):
420 420 pass
421 421
422 422 def cacheable(self):
423 423 return False
424 424
425 425 def lookupreg(key, valname=None, scope=None):
426 426 ''' Look up a key/value name in the Windows registry.
427 427
428 428 valname: value name. If unspecified, the default value for the key
429 429 is used.
430 430 scope: optionally specify scope for registry lookup, this can be
431 431 a sequence of scopes to look up in order. Default (CURRENT_USER,
432 432 LOCAL_MACHINE).
433 433 '''
434 434 if scope is None:
435 435 scope = (_winreg.HKEY_CURRENT_USER, _winreg.HKEY_LOCAL_MACHINE)
436 436 elif not isinstance(scope, (list, tuple)):
437 437 scope = (scope,)
438 438 for s in scope:
439 439 try:
440 440 val = _winreg.QueryValueEx(_winreg.OpenKey(s, key), valname)[0]
441 441 # never let a Unicode string escape into the wild
442 442 return encoding.tolocal(val.encode('UTF-8'))
443 443 except EnvironmentError:
444 444 pass
445 445
446 446 expandglobs = True
447 447
448 448 def statislink(st):
449 449 '''check whether a stat result is a symlink'''
450 450 return False
451 451
452 452 def statisexec(st):
453 453 '''check whether a stat result is an executable file'''
454 454 return False
455 455
456 456 def poll(fds):
457 457 # see posix.py for description
458 458 raise NotImplementedError()
459 459
460 460 def readpipe(pipe):
461 461 """Read all available data from a pipe."""
462 462 chunks = []
463 463 while True:
464 464 size = win32.peekpipe(pipe)
465 465 if not size:
466 466 break
467 467
468 468 s = pipe.read(size)
469 469 if not s:
470 470 break
471 471 chunks.append(s)
472 472
473 473 return ''.join(chunks)
474
475 def bindunixsocket(sock, path):
476 raise NotImplementedError('unsupported platform')
General Comments 0
You need to be logged in to leave comments. Login now