##// END OF EJS Templates
py3: bulk replace sys.stdin/out/err by util's...
Yuya Nishihara -
r30473:39d13b8c default
parent child Browse files
Show More
@@ -1,643 +1,643 b''
1 1 # chgserver.py - command server extension for cHg
2 2 #
3 3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """command server extension for cHg (EXPERIMENTAL)
9 9
10 10 'S' channel (read/write)
11 11 propagate ui.system() request to client
12 12
13 13 'attachio' command
14 14 attach client's stdio passed by sendmsg()
15 15
16 16 'chdir' command
17 17 change current directory
18 18
19 19 'getpager' command
20 20 checks if pager is enabled and which pager should be executed
21 21
22 22 'setenv' command
23 23 replace os.environ completely
24 24
25 25 'setumask' command
26 26 set umask
27 27
28 28 'validate' command
29 29 reload the config and check if the server is up to date
30 30
31 31 Config
32 32 ------
33 33
34 34 ::
35 35
36 36 [chgserver]
37 37 idletimeout = 3600 # seconds, after which an idle server will exit
38 38 skiphash = False # whether to skip config or env change checks
39 39 """
40 40
41 41 from __future__ import absolute_import
42 42
43 43 import errno
44 44 import hashlib
45 45 import inspect
46 46 import os
47 47 import re
48 48 import signal
49 49 import struct
50 50 import sys
51 51 import time
52 52
53 53 from mercurial.i18n import _
54 54
55 55 from mercurial import (
56 56 cmdutil,
57 57 commands,
58 58 commandserver,
59 59 dispatch,
60 60 error,
61 61 extensions,
62 62 osutil,
63 63 util,
64 64 )
65 65
66 66 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
67 67 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
68 68 # be specifying the version(s) of Mercurial they are tested with, or
69 69 # leave the attribute unspecified.
70 70 testedwith = 'ships-with-hg-core'
71 71
72 72 _log = commandserver.log
73 73
74 74 def _hashlist(items):
75 75 """return sha1 hexdigest for a list"""
76 76 return hashlib.sha1(str(items)).hexdigest()
77 77
78 78 # sensitive config sections affecting confighash
79 79 _configsections = [
80 80 'alias', # affects global state commands.table
81 81 'extdiff', # uisetup will register new commands
82 82 'extensions',
83 83 ]
84 84
85 85 # sensitive environment variables affecting confighash
86 86 _envre = re.compile(r'''\A(?:
87 87 CHGHG
88 88 |HG.*
89 89 |LANG(?:UAGE)?
90 90 |LC_.*
91 91 |LD_.*
92 92 |PATH
93 93 |PYTHON.*
94 94 |TERM(?:INFO)?
95 95 |TZ
96 96 )\Z''', re.X)
97 97
98 98 def _confighash(ui):
99 99 """return a quick hash for detecting config/env changes
100 100
101 101 confighash is the hash of sensitive config items and environment variables.
102 102
103 103 for chgserver, it is designed that once confighash changes, the server is
104 104 not qualified to serve its client and should redirect the client to a new
105 105 server. different from mtimehash, confighash change will not mark the
106 106 server outdated and exit since the user can have different configs at the
107 107 same time.
108 108 """
109 109 sectionitems = []
110 110 for section in _configsections:
111 111 sectionitems.append(ui.configitems(section))
112 112 sectionhash = _hashlist(sectionitems)
113 113 envitems = [(k, v) for k, v in os.environ.iteritems() if _envre.match(k)]
114 114 envhash = _hashlist(sorted(envitems))
115 115 return sectionhash[:6] + envhash[:6]
116 116
117 117 def _getmtimepaths(ui):
118 118 """get a list of paths that should be checked to detect change
119 119
120 120 The list will include:
121 121 - extensions (will not cover all files for complex extensions)
122 122 - mercurial/__version__.py
123 123 - python binary
124 124 """
125 125 modules = [m for n, m in extensions.extensions(ui)]
126 126 try:
127 127 from mercurial import __version__
128 128 modules.append(__version__)
129 129 except ImportError:
130 130 pass
131 131 files = [sys.executable]
132 132 for m in modules:
133 133 try:
134 134 files.append(inspect.getabsfile(m))
135 135 except TypeError:
136 136 pass
137 137 return sorted(set(files))
138 138
139 139 def _mtimehash(paths):
140 140 """return a quick hash for detecting file changes
141 141
142 142 mtimehash calls stat on given paths and calculate a hash based on size and
143 143 mtime of each file. mtimehash does not read file content because reading is
144 144 expensive. therefore it's not 100% reliable for detecting content changes.
145 145 it's possible to return different hashes for same file contents.
146 146 it's also possible to return a same hash for different file contents for
147 147 some carefully crafted situation.
148 148
149 149 for chgserver, it is designed that once mtimehash changes, the server is
150 150 considered outdated immediately and should no longer provide service.
151 151
152 152 mtimehash is not included in confighash because we only know the paths of
153 153 extensions after importing them (there is imp.find_module but that faces
154 154 race conditions). We need to calculate confighash without importing.
155 155 """
156 156 def trystat(path):
157 157 try:
158 158 st = os.stat(path)
159 159 return (st.st_mtime, st.st_size)
160 160 except OSError:
161 161 # could be ENOENT, EPERM etc. not fatal in any case
162 162 pass
163 163 return _hashlist(map(trystat, paths))[:12]
164 164
165 165 class hashstate(object):
166 166 """a structure storing confighash, mtimehash, paths used for mtimehash"""
167 167 def __init__(self, confighash, mtimehash, mtimepaths):
168 168 self.confighash = confighash
169 169 self.mtimehash = mtimehash
170 170 self.mtimepaths = mtimepaths
171 171
172 172 @staticmethod
173 173 def fromui(ui, mtimepaths=None):
174 174 if mtimepaths is None:
175 175 mtimepaths = _getmtimepaths(ui)
176 176 confighash = _confighash(ui)
177 177 mtimehash = _mtimehash(mtimepaths)
178 178 _log('confighash = %s mtimehash = %s\n' % (confighash, mtimehash))
179 179 return hashstate(confighash, mtimehash, mtimepaths)
180 180
181 181 # copied from hgext/pager.py:uisetup()
182 182 def _setuppagercmd(ui, options, cmd):
183 183 if not ui.formatted():
184 184 return
185 185
186 186 p = ui.config("pager", "pager", os.environ.get("PAGER"))
187 187 usepager = False
188 188 always = util.parsebool(options['pager'])
189 189 auto = options['pager'] == 'auto'
190 190
191 191 if not p:
192 192 pass
193 193 elif always:
194 194 usepager = True
195 195 elif not auto:
196 196 usepager = False
197 197 else:
198 198 attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff']
199 199 attend = ui.configlist('pager', 'attend', attended)
200 200 ignore = ui.configlist('pager', 'ignore')
201 201 cmds, _ = cmdutil.findcmd(cmd, commands.table)
202 202
203 203 for cmd in cmds:
204 204 var = 'attend-%s' % cmd
205 205 if ui.config('pager', var):
206 206 usepager = ui.configbool('pager', var)
207 207 break
208 208 if (cmd in attend or
209 209 (cmd not in ignore and not attend)):
210 210 usepager = True
211 211 break
212 212
213 213 if usepager:
214 214 ui.setconfig('ui', 'formatted', ui.formatted(), 'pager')
215 215 ui.setconfig('ui', 'interactive', False, 'pager')
216 216 return p
217 217
218 218 def _newchgui(srcui, csystem):
219 219 class chgui(srcui.__class__):
220 220 def __init__(self, src=None):
221 221 super(chgui, self).__init__(src)
222 222 if src:
223 223 self._csystem = getattr(src, '_csystem', csystem)
224 224 else:
225 225 self._csystem = csystem
226 226
227 227 def system(self, cmd, environ=None, cwd=None, onerr=None,
228 228 errprefix=None):
229 229 # fallback to the original system method if the output needs to be
230 230 # captured (to self._buffers), or the output stream is not stdout
231 231 # (e.g. stderr, cStringIO), because the chg client is not aware of
232 232 # these situations and will behave differently (write to stdout).
233 233 if (any(s[1] for s in self._bufferstates)
234 234 or not util.safehasattr(self.fout, 'fileno')
235 or self.fout.fileno() != sys.stdout.fileno()):
235 or self.fout.fileno() != util.stdout.fileno()):
236 236 return super(chgui, self).system(cmd, environ, cwd, onerr,
237 237 errprefix)
238 238 # copied from mercurial/util.py:system()
239 239 self.flush()
240 240 def py2shell(val):
241 241 if val is None or val is False:
242 242 return '0'
243 243 if val is True:
244 244 return '1'
245 245 return str(val)
246 246 env = os.environ.copy()
247 247 if environ:
248 248 env.update((k, py2shell(v)) for k, v in environ.iteritems())
249 249 env['HG'] = util.hgexecutable()
250 250 rc = self._csystem(cmd, env, cwd)
251 251 if rc and onerr:
252 252 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
253 253 util.explainexit(rc)[0])
254 254 if errprefix:
255 255 errmsg = '%s: %s' % (errprefix, errmsg)
256 256 raise onerr(errmsg)
257 257 return rc
258 258
259 259 return chgui(srcui)
260 260
261 261 def _loadnewui(srcui, args):
262 262 newui = srcui.__class__()
263 263 for a in ['fin', 'fout', 'ferr', 'environ']:
264 264 setattr(newui, a, getattr(srcui, a))
265 265 if util.safehasattr(srcui, '_csystem'):
266 266 newui._csystem = srcui._csystem
267 267
268 268 # internal config: extensions.chgserver
269 269 newui.setconfig('extensions', 'chgserver',
270 270 srcui.config('extensions', 'chgserver'), '--config')
271 271
272 272 # command line args
273 273 args = args[:]
274 274 dispatch._parseconfig(newui, dispatch._earlygetopt(['--config'], args))
275 275
276 276 # stolen from tortoisehg.util.copydynamicconfig()
277 277 for section, name, value in srcui.walkconfig():
278 278 source = srcui.configsource(section, name)
279 279 if ':' in source or source == '--config':
280 280 # path:line or command line
281 281 continue
282 282 if source == 'none':
283 283 # ui.configsource returns 'none' by default
284 284 source = ''
285 285 newui.setconfig(section, name, value, source)
286 286
287 287 # load wd and repo config, copied from dispatch.py
288 288 cwds = dispatch._earlygetopt(['--cwd'], args)
289 289 cwd = cwds and os.path.realpath(cwds[-1]) or None
290 290 rpath = dispatch._earlygetopt(["-R", "--repository", "--repo"], args)
291 291 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
292 292
293 293 return (newui, newlui)
294 294
295 295 class channeledsystem(object):
296 296 """Propagate ui.system() request in the following format:
297 297
298 298 payload length (unsigned int),
299 299 cmd, '\0',
300 300 cwd, '\0',
301 301 envkey, '=', val, '\0',
302 302 ...
303 303 envkey, '=', val
304 304
305 305 and waits:
306 306
307 307 exitcode length (unsigned int),
308 308 exitcode (int)
309 309 """
310 310 def __init__(self, in_, out, channel):
311 311 self.in_ = in_
312 312 self.out = out
313 313 self.channel = channel
314 314
315 315 def __call__(self, cmd, environ, cwd):
316 316 args = [util.quotecommand(cmd), os.path.abspath(cwd or '.')]
317 317 args.extend('%s=%s' % (k, v) for k, v in environ.iteritems())
318 318 data = '\0'.join(args)
319 319 self.out.write(struct.pack('>cI', self.channel, len(data)))
320 320 self.out.write(data)
321 321 self.out.flush()
322 322
323 323 length = self.in_.read(4)
324 324 length, = struct.unpack('>I', length)
325 325 if length != 4:
326 326 raise error.Abort(_('invalid response'))
327 327 rc, = struct.unpack('>i', self.in_.read(4))
328 328 return rc
329 329
330 330 _iochannels = [
331 331 # server.ch, ui.fp, mode
332 332 ('cin', 'fin', 'rb'),
333 333 ('cout', 'fout', 'wb'),
334 334 ('cerr', 'ferr', 'wb'),
335 335 ]
336 336
337 337 class chgcmdserver(commandserver.server):
338 338 def __init__(self, ui, repo, fin, fout, sock, hashstate, baseaddress):
339 339 super(chgcmdserver, self).__init__(
340 340 _newchgui(ui, channeledsystem(fin, fout, 'S')), repo, fin, fout)
341 341 self.clientsock = sock
342 342 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
343 343 self.hashstate = hashstate
344 344 self.baseaddress = baseaddress
345 345 if hashstate is not None:
346 346 self.capabilities = self.capabilities.copy()
347 347 self.capabilities['validate'] = chgcmdserver.validate
348 348
349 349 def cleanup(self):
350 350 super(chgcmdserver, self).cleanup()
351 351 # dispatch._runcatch() does not flush outputs if exception is not
352 352 # handled by dispatch._dispatch()
353 353 self.ui.flush()
354 354 self._restoreio()
355 355
356 356 def attachio(self):
357 357 """Attach to client's stdio passed via unix domain socket; all
358 358 channels except cresult will no longer be used
359 359 """
360 360 # tell client to sendmsg() with 1-byte payload, which makes it
361 361 # distinctive from "attachio\n" command consumed by client.read()
362 362 self.clientsock.sendall(struct.pack('>cI', 'I', 1))
363 363 clientfds = osutil.recvfds(self.clientsock.fileno())
364 364 _log('received fds: %r\n' % clientfds)
365 365
366 366 ui = self.ui
367 367 ui.flush()
368 368 first = self._saveio()
369 369 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
370 370 assert fd > 0
371 371 fp = getattr(ui, fn)
372 372 os.dup2(fd, fp.fileno())
373 373 os.close(fd)
374 374 if not first:
375 375 continue
376 376 # reset buffering mode when client is first attached. as we want
377 377 # to see output immediately on pager, the mode stays unchanged
378 378 # when client re-attached. ferr is unchanged because it should
379 379 # be unbuffered no matter if it is a tty or not.
380 380 if fn == 'ferr':
381 381 newfp = fp
382 382 else:
383 383 # make it line buffered explicitly because the default is
384 384 # decided on first write(), where fout could be a pager.
385 385 if fp.isatty():
386 386 bufsize = 1 # line buffered
387 387 else:
388 388 bufsize = -1 # system default
389 389 newfp = os.fdopen(fp.fileno(), mode, bufsize)
390 390 setattr(ui, fn, newfp)
391 391 setattr(self, cn, newfp)
392 392
393 393 self.cresult.write(struct.pack('>i', len(clientfds)))
394 394
395 395 def _saveio(self):
396 396 if self._oldios:
397 397 return False
398 398 ui = self.ui
399 399 for cn, fn, _mode in _iochannels:
400 400 ch = getattr(self, cn)
401 401 fp = getattr(ui, fn)
402 402 fd = os.dup(fp.fileno())
403 403 self._oldios.append((ch, fp, fd))
404 404 return True
405 405
406 406 def _restoreio(self):
407 407 ui = self.ui
408 408 for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels):
409 409 newfp = getattr(ui, fn)
410 410 # close newfp while it's associated with client; otherwise it
411 411 # would be closed when newfp is deleted
412 412 if newfp is not fp:
413 413 newfp.close()
414 414 # restore original fd: fp is open again
415 415 os.dup2(fd, fp.fileno())
416 416 os.close(fd)
417 417 setattr(self, cn, ch)
418 418 setattr(ui, fn, fp)
419 419 del self._oldios[:]
420 420
421 421 def validate(self):
422 422 """Reload the config and check if the server is up to date
423 423
424 424 Read a list of '\0' separated arguments.
425 425 Write a non-empty list of '\0' separated instruction strings or '\0'
426 426 if the list is empty.
427 427 An instruction string could be either:
428 428 - "unlink $path", the client should unlink the path to stop the
429 429 outdated server.
430 430 - "redirect $path", the client should attempt to connect to $path
431 431 first. If it does not work, start a new server. It implies
432 432 "reconnect".
433 433 - "exit $n", the client should exit directly with code n.
434 434 This may happen if we cannot parse the config.
435 435 - "reconnect", the client should close the connection and
436 436 reconnect.
437 437 If neither "reconnect" nor "redirect" is included in the instruction
438 438 list, the client can continue with this server after completing all
439 439 the instructions.
440 440 """
441 441 args = self._readlist()
442 442 try:
443 443 self.ui, lui = _loadnewui(self.ui, args)
444 444 except error.ParseError as inst:
445 445 dispatch._formatparse(self.ui.warn, inst)
446 446 self.ui.flush()
447 447 self.cresult.write('exit 255')
448 448 return
449 449 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
450 450 insts = []
451 451 if newhash.mtimehash != self.hashstate.mtimehash:
452 452 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
453 453 insts.append('unlink %s' % addr)
454 454 # mtimehash is empty if one or more extensions fail to load.
455 455 # to be compatible with hg, still serve the client this time.
456 456 if self.hashstate.mtimehash:
457 457 insts.append('reconnect')
458 458 if newhash.confighash != self.hashstate.confighash:
459 459 addr = _hashaddress(self.baseaddress, newhash.confighash)
460 460 insts.append('redirect %s' % addr)
461 461 _log('validate: %s\n' % insts)
462 462 self.cresult.write('\0'.join(insts) or '\0')
463 463
464 464 def chdir(self):
465 465 """Change current directory
466 466
467 467 Note that the behavior of --cwd option is bit different from this.
468 468 It does not affect --config parameter.
469 469 """
470 470 path = self._readstr()
471 471 if not path:
472 472 return
473 473 _log('chdir to %r\n' % path)
474 474 os.chdir(path)
475 475
476 476 def setumask(self):
477 477 """Change umask"""
478 478 mask = struct.unpack('>I', self._read(4))[0]
479 479 _log('setumask %r\n' % mask)
480 480 os.umask(mask)
481 481
482 482 def getpager(self):
483 483 """Read cmdargs and write pager command to r-channel if enabled
484 484
485 485 If pager isn't enabled, this writes '\0' because channeledoutput
486 486 does not allow to write empty data.
487 487 """
488 488 args = self._readlist()
489 489 try:
490 490 cmd, _func, args, options, _cmdoptions = dispatch._parse(self.ui,
491 491 args)
492 492 except (error.Abort, error.AmbiguousCommand, error.CommandError,
493 493 error.UnknownCommand):
494 494 cmd = None
495 495 options = {}
496 496 if not cmd or 'pager' not in options:
497 497 self.cresult.write('\0')
498 498 return
499 499
500 500 pagercmd = _setuppagercmd(self.ui, options, cmd)
501 501 if pagercmd:
502 502 # Python's SIGPIPE is SIG_IGN by default. change to SIG_DFL so
503 503 # we can exit if the pipe to the pager is closed
504 504 if util.safehasattr(signal, 'SIGPIPE') and \
505 505 signal.getsignal(signal.SIGPIPE) == signal.SIG_IGN:
506 506 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
507 507 self.cresult.write(pagercmd)
508 508 else:
509 509 self.cresult.write('\0')
510 510
511 511 def setenv(self):
512 512 """Clear and update os.environ
513 513
514 514 Note that not all variables can make an effect on the running process.
515 515 """
516 516 l = self._readlist()
517 517 try:
518 518 newenv = dict(s.split('=', 1) for s in l)
519 519 except ValueError:
520 520 raise ValueError('unexpected value in setenv request')
521 521 _log('setenv: %r\n' % sorted(newenv.keys()))
522 522 os.environ.clear()
523 523 os.environ.update(newenv)
524 524
525 525 capabilities = commandserver.server.capabilities.copy()
526 526 capabilities.update({'attachio': attachio,
527 527 'chdir': chdir,
528 528 'getpager': getpager,
529 529 'setenv': setenv,
530 530 'setumask': setumask})
531 531
532 532 def _tempaddress(address):
533 533 return '%s.%d.tmp' % (address, os.getpid())
534 534
535 535 def _hashaddress(address, hashstr):
536 536 return '%s-%s' % (address, hashstr)
537 537
538 538 class chgunixservicehandler(object):
539 539 """Set of operations for chg services"""
540 540
541 541 pollinterval = 1 # [sec]
542 542
543 543 def __init__(self, ui):
544 544 self.ui = ui
545 545 self._idletimeout = ui.configint('chgserver', 'idletimeout', 3600)
546 546 self._lastactive = time.time()
547 547
548 548 def bindsocket(self, sock, address):
549 549 self._inithashstate(address)
550 550 self._checkextensions()
551 551 self._bind(sock)
552 552 self._createsymlink()
553 553
554 554 def _inithashstate(self, address):
555 555 self._baseaddress = address
556 556 if self.ui.configbool('chgserver', 'skiphash', False):
557 557 self._hashstate = None
558 558 self._realaddress = address
559 559 return
560 560 self._hashstate = hashstate.fromui(self.ui)
561 561 self._realaddress = _hashaddress(address, self._hashstate.confighash)
562 562
563 563 def _checkextensions(self):
564 564 if not self._hashstate:
565 565 return
566 566 if extensions.notloaded():
567 567 # one or more extensions failed to load. mtimehash becomes
568 568 # meaningless because we do not know the paths of those extensions.
569 569 # set mtimehash to an illegal hash value to invalidate the server.
570 570 self._hashstate.mtimehash = ''
571 571
572 572 def _bind(self, sock):
573 573 # use a unique temp address so we can stat the file and do ownership
574 574 # check later
575 575 tempaddress = _tempaddress(self._realaddress)
576 576 util.bindunixsocket(sock, tempaddress)
577 577 self._socketstat = os.stat(tempaddress)
578 578 # rename will replace the old socket file if exists atomically. the
579 579 # old server will detect ownership change and exit.
580 580 util.rename(tempaddress, self._realaddress)
581 581
582 582 def _createsymlink(self):
583 583 if self._baseaddress == self._realaddress:
584 584 return
585 585 tempaddress = _tempaddress(self._baseaddress)
586 586 os.symlink(os.path.basename(self._realaddress), tempaddress)
587 587 util.rename(tempaddress, self._baseaddress)
588 588
589 589 def _issocketowner(self):
590 590 try:
591 591 stat = os.stat(self._realaddress)
592 592 return (stat.st_ino == self._socketstat.st_ino and
593 593 stat.st_mtime == self._socketstat.st_mtime)
594 594 except OSError:
595 595 return False
596 596
597 597 def unlinksocket(self, address):
598 598 if not self._issocketowner():
599 599 return
600 600 # it is possible to have a race condition here that we may
601 601 # remove another server's socket file. but that's okay
602 602 # since that server will detect and exit automatically and
603 603 # the client will start a new server on demand.
604 604 try:
605 605 os.unlink(self._realaddress)
606 606 except OSError as exc:
607 607 if exc.errno != errno.ENOENT:
608 608 raise
609 609
610 610 def printbanner(self, address):
611 611 # no "listening at" message should be printed to simulate hg behavior
612 612 pass
613 613
614 614 def shouldexit(self):
615 615 if not self._issocketowner():
616 616 self.ui.debug('%s is not owned, exiting.\n' % self._realaddress)
617 617 return True
618 618 if time.time() - self._lastactive > self._idletimeout:
619 619 self.ui.debug('being idle too long. exiting.\n')
620 620 return True
621 621 return False
622 622
623 623 def newconnection(self):
624 624 self._lastactive = time.time()
625 625
626 626 def createcmdserver(self, repo, conn, fin, fout):
627 627 return chgcmdserver(self.ui, repo, fin, fout, conn,
628 628 self._hashstate, self._baseaddress)
629 629
630 630 def chgunixservice(ui, repo, opts):
631 631 if repo:
632 632 # one chgserver can serve multiple repos. drop repo information
633 633 ui.setconfig('bundle', 'mainreporoot', '', 'repo')
634 634 h = chgunixservicehandler(ui)
635 635 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
636 636
637 637 def uisetup(ui):
638 638 commandserver._servicemap['chgunix'] = chgunixservice
639 639
640 640 # CHGINTERNALMARK is temporarily set by chg client to detect if chg will
641 641 # start another chg. drop it to avoid possible side effects.
642 642 if 'CHGINTERNALMARK' in os.environ:
643 643 del os.environ['CHGINTERNALMARK']
@@ -1,175 +1,175 b''
1 1 # pager.py - display output using a pager
2 2 #
3 3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 #
8 8 # To load the extension, add it to your configuration file:
9 9 #
10 10 # [extension]
11 11 # pager =
12 12 #
13 13 # Run 'hg help pager' to get info on configuration.
14 14
15 15 '''browse command output with an external pager
16 16
17 17 To set the pager that should be used, set the application variable::
18 18
19 19 [pager]
20 20 pager = less -FRX
21 21
22 22 If no pager is set, the pager extensions uses the environment variable
23 23 $PAGER. If neither pager.pager, nor $PAGER is set, no pager is used.
24 24
25 25 You can disable the pager for certain commands by adding them to the
26 26 pager.ignore list::
27 27
28 28 [pager]
29 29 ignore = version, help, update
30 30
31 31 You can also enable the pager only for certain commands using
32 32 pager.attend. Below is the default list of commands to be paged::
33 33
34 34 [pager]
35 35 attend = annotate, cat, diff, export, glog, log, qdiff
36 36
37 37 Setting pager.attend to an empty value will cause all commands to be
38 38 paged.
39 39
40 40 If pager.attend is present, pager.ignore will be ignored.
41 41
42 42 Lastly, you can enable and disable paging for individual commands with
43 43 the attend-<command> option. This setting takes precedence over
44 44 existing attend and ignore options and defaults::
45 45
46 46 [pager]
47 47 attend-cat = false
48 48
49 49 To ignore global commands like :hg:`version` or :hg:`help`, you have
50 50 to specify them in your user configuration file.
51 51
52 52 To control whether the pager is used at all for an individual command,
53 53 you can use --pager=<value>::
54 54
55 55 - use as needed: `auto`.
56 56 - require the pager: `yes` or `on`.
57 57 - suppress the pager: `no` or `off` (any unrecognized value
58 58 will also work).
59 59
60 60 '''
61 61 from __future__ import absolute_import
62 62
63 63 import atexit
64 64 import os
65 65 import signal
66 66 import subprocess
67 67 import sys
68 68
69 69 from mercurial.i18n import _
70 70 from mercurial import (
71 71 cmdutil,
72 72 commands,
73 73 dispatch,
74 74 extensions,
75 75 util,
76 76 )
77 77
78 78 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
79 79 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
80 80 # be specifying the version(s) of Mercurial they are tested with, or
81 81 # leave the attribute unspecified.
82 82 testedwith = 'ships-with-hg-core'
83 83
84 84 def _runpager(ui, p):
85 85 pager = subprocess.Popen(p, shell=True, bufsize=-1,
86 86 close_fds=util.closefds, stdin=subprocess.PIPE,
87 stdout=sys.stdout, stderr=sys.stderr)
87 stdout=util.stdout, stderr=util.stderr)
88 88
89 89 # back up original file objects and descriptors
90 90 olduifout = ui.fout
91 oldstdout = sys.stdout
92 stdoutfd = os.dup(sys.stdout.fileno())
93 stderrfd = os.dup(sys.stderr.fileno())
91 oldstdout = util.stdout
92 stdoutfd = os.dup(util.stdout.fileno())
93 stderrfd = os.dup(util.stderr.fileno())
94 94
95 95 # create new line-buffered stdout so that output can show up immediately
96 ui.fout = sys.stdout = newstdout = os.fdopen(sys.stdout.fileno(), 'wb', 1)
97 os.dup2(pager.stdin.fileno(), sys.stdout.fileno())
98 if ui._isatty(sys.stderr):
99 os.dup2(pager.stdin.fileno(), sys.stderr.fileno())
96 ui.fout = util.stdout = newstdout = os.fdopen(util.stdout.fileno(), 'wb', 1)
97 os.dup2(pager.stdin.fileno(), util.stdout.fileno())
98 if ui._isatty(util.stderr):
99 os.dup2(pager.stdin.fileno(), util.stderr.fileno())
100 100
101 101 @atexit.register
102 102 def killpager():
103 103 if util.safehasattr(signal, "SIGINT"):
104 104 signal.signal(signal.SIGINT, signal.SIG_IGN)
105 105 pager.stdin.close()
106 106 ui.fout = olduifout
107 sys.stdout = oldstdout
107 util.stdout = oldstdout
108 108 # close new stdout while it's associated with pager; otherwise stdout
109 109 # fd would be closed when newstdout is deleted
110 110 newstdout.close()
111 111 # restore original fds: stdout is open again
112 os.dup2(stdoutfd, sys.stdout.fileno())
113 os.dup2(stderrfd, sys.stderr.fileno())
112 os.dup2(stdoutfd, util.stdout.fileno())
113 os.dup2(stderrfd, util.stderr.fileno())
114 114 pager.wait()
115 115
116 116 def uisetup(ui):
117 117 if '--debugger' in sys.argv or not ui.formatted():
118 118 return
119 119
120 120 # chg has its own pager implementation
121 121 argv = sys.argv[:]
122 122 if 'chgunix' in dispatch._earlygetopt(['--cmdserver'], argv):
123 123 return
124 124
125 125 def pagecmd(orig, ui, options, cmd, cmdfunc):
126 126 p = ui.config("pager", "pager", os.environ.get("PAGER"))
127 127 usepager = False
128 128 always = util.parsebool(options['pager'])
129 129 auto = options['pager'] == 'auto'
130 130
131 131 if not p:
132 132 pass
133 133 elif always:
134 134 usepager = True
135 135 elif not auto:
136 136 usepager = False
137 137 else:
138 138 attend = ui.configlist('pager', 'attend', attended)
139 139 ignore = ui.configlist('pager', 'ignore')
140 140 cmds, _ = cmdutil.findcmd(cmd, commands.table)
141 141
142 142 for cmd in cmds:
143 143 var = 'attend-%s' % cmd
144 144 if ui.config('pager', var):
145 145 usepager = ui.configbool('pager', var)
146 146 break
147 147 if (cmd in attend or
148 148 (cmd not in ignore and not attend)):
149 149 usepager = True
150 150 break
151 151
152 152 setattr(ui, 'pageractive', usepager)
153 153
154 154 if usepager:
155 155 ui.setconfig('ui', 'formatted', ui.formatted(), 'pager')
156 156 ui.setconfig('ui', 'interactive', False, 'pager')
157 157 if util.safehasattr(signal, "SIGPIPE"):
158 158 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
159 159 _runpager(ui, p)
160 160 return orig(ui, options, cmd, cmdfunc)
161 161
162 162 # Wrap dispatch._runcommand after color is loaded so color can see
163 163 # ui.pageractive. Otherwise, if we loaded first, color's wrapped
164 164 # dispatch._runcommand would run without having access to ui.pageractive.
165 165 def afterloaded(loaded):
166 166 extensions.wrapfunction(dispatch, '_runcommand', pagecmd)
167 167 extensions.afterloaded('color', afterloaded)
168 168
169 169 def extsetup(ui):
170 170 commands.globalopts.append(
171 171 ('', 'pager', 'auto',
172 172 _("when to paginate (boolean, always, auto, or never)"),
173 173 _('TYPE')))
174 174
175 175 attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff']
@@ -1,3577 +1,3577 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import sys
14 14 import tempfile
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 bin,
19 19 hex,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 )
24 24
25 25 from . import (
26 26 bookmarks,
27 27 changelog,
28 28 copies,
29 29 crecord as crecordmod,
30 30 encoding,
31 31 error,
32 32 formatter,
33 33 graphmod,
34 34 lock as lockmod,
35 35 match as matchmod,
36 36 obsolete,
37 37 patch,
38 38 pathutil,
39 39 phases,
40 40 repair,
41 41 revlog,
42 42 revset,
43 43 scmutil,
44 44 templatekw,
45 45 templater,
46 46 util,
47 47 )
48 48 stringio = util.stringio
49 49
50 50 def ishunk(x):
51 51 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
52 52 return isinstance(x, hunkclasses)
53 53
54 54 def newandmodified(chunks, originalchunks):
55 55 newlyaddedandmodifiedfiles = set()
56 56 for chunk in chunks:
57 57 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
58 58 originalchunks:
59 59 newlyaddedandmodifiedfiles.add(chunk.header.filename())
60 60 return newlyaddedandmodifiedfiles
61 61
62 62 def parsealiases(cmd):
63 63 return cmd.lstrip("^").split("|")
64 64
65 65 def setupwrapcolorwrite(ui):
66 66 # wrap ui.write so diff output can be labeled/colorized
67 67 def wrapwrite(orig, *args, **kw):
68 68 label = kw.pop('label', '')
69 69 for chunk, l in patch.difflabel(lambda: args):
70 70 orig(chunk, label=label + l)
71 71
72 72 oldwrite = ui.write
73 73 def wrap(*args, **kwargs):
74 74 return wrapwrite(oldwrite, *args, **kwargs)
75 75 setattr(ui, 'write', wrap)
76 76 return oldwrite
77 77
78 78 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
79 79 if usecurses:
80 80 if testfile:
81 81 recordfn = crecordmod.testdecorator(testfile,
82 82 crecordmod.testchunkselector)
83 83 else:
84 84 recordfn = crecordmod.chunkselector
85 85
86 86 return crecordmod.filterpatch(ui, originalhunks, recordfn)
87 87
88 88 else:
89 89 return patch.filterpatch(ui, originalhunks, operation)
90 90
91 91 def recordfilter(ui, originalhunks, operation=None):
92 92 """ Prompts the user to filter the originalhunks and return a list of
93 93 selected hunks.
94 94 *operation* is used for to build ui messages to indicate the user what
95 95 kind of filtering they are doing: reverting, committing, shelving, etc.
96 96 (see patch.filterpatch).
97 97 """
98 98 usecurses = crecordmod.checkcurses(ui)
99 99 testfile = ui.config('experimental', 'crecordtest', None)
100 100 oldwrite = setupwrapcolorwrite(ui)
101 101 try:
102 102 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
103 103 testfile, operation)
104 104 finally:
105 105 ui.write = oldwrite
106 106 return newchunks, newopts
107 107
108 108 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
109 109 filterfn, *pats, **opts):
110 110 from . import merge as mergemod
111 111 if not ui.interactive():
112 112 if cmdsuggest:
113 113 msg = _('running non-interactively, use %s instead') % cmdsuggest
114 114 else:
115 115 msg = _('running non-interactively')
116 116 raise error.Abort(msg)
117 117
118 118 # make sure username is set before going interactive
119 119 if not opts.get('user'):
120 120 ui.username() # raise exception, username not provided
121 121
122 122 def recordfunc(ui, repo, message, match, opts):
123 123 """This is generic record driver.
124 124
125 125 Its job is to interactively filter local changes, and
126 126 accordingly prepare working directory into a state in which the
127 127 job can be delegated to a non-interactive commit command such as
128 128 'commit' or 'qrefresh'.
129 129
130 130 After the actual job is done by non-interactive command, the
131 131 working directory is restored to its original state.
132 132
133 133 In the end we'll record interesting changes, and everything else
134 134 will be left in place, so the user can continue working.
135 135 """
136 136
137 137 checkunfinished(repo, commit=True)
138 138 wctx = repo[None]
139 139 merge = len(wctx.parents()) > 1
140 140 if merge:
141 141 raise error.Abort(_('cannot partially commit a merge '
142 142 '(use "hg commit" instead)'))
143 143
144 144 def fail(f, msg):
145 145 raise error.Abort('%s: %s' % (f, msg))
146 146
147 147 force = opts.get('force')
148 148 if not force:
149 149 vdirs = []
150 150 match.explicitdir = vdirs.append
151 151 match.bad = fail
152 152
153 153 status = repo.status(match=match)
154 154 if not force:
155 155 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
156 156 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
157 157 diffopts.nodates = True
158 158 diffopts.git = True
159 159 diffopts.showfunc = True
160 160 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
161 161 originalchunks = patch.parsepatch(originaldiff)
162 162
163 163 # 1. filter patch, since we are intending to apply subset of it
164 164 try:
165 165 chunks, newopts = filterfn(ui, originalchunks)
166 166 except patch.PatchError as err:
167 167 raise error.Abort(_('error parsing patch: %s') % err)
168 168 opts.update(newopts)
169 169
170 170 # We need to keep a backup of files that have been newly added and
171 171 # modified during the recording process because there is a previous
172 172 # version without the edit in the workdir
173 173 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
174 174 contenders = set()
175 175 for h in chunks:
176 176 try:
177 177 contenders.update(set(h.files()))
178 178 except AttributeError:
179 179 pass
180 180
181 181 changed = status.modified + status.added + status.removed
182 182 newfiles = [f for f in changed if f in contenders]
183 183 if not newfiles:
184 184 ui.status(_('no changes to record\n'))
185 185 return 0
186 186
187 187 modified = set(status.modified)
188 188
189 189 # 2. backup changed files, so we can restore them in the end
190 190
191 191 if backupall:
192 192 tobackup = changed
193 193 else:
194 194 tobackup = [f for f in newfiles if f in modified or f in \
195 195 newlyaddedandmodifiedfiles]
196 196 backups = {}
197 197 if tobackup:
198 198 backupdir = repo.join('record-backups')
199 199 try:
200 200 os.mkdir(backupdir)
201 201 except OSError as err:
202 202 if err.errno != errno.EEXIST:
203 203 raise
204 204 try:
205 205 # backup continues
206 206 for f in tobackup:
207 207 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
208 208 dir=backupdir)
209 209 os.close(fd)
210 210 ui.debug('backup %r as %r\n' % (f, tmpname))
211 211 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
212 212 backups[f] = tmpname
213 213
214 214 fp = stringio()
215 215 for c in chunks:
216 216 fname = c.filename()
217 217 if fname in backups:
218 218 c.write(fp)
219 219 dopatch = fp.tell()
220 220 fp.seek(0)
221 221
222 222 # 2.5 optionally review / modify patch in text editor
223 223 if opts.get('review', False):
224 224 patchtext = (crecordmod.diffhelptext
225 225 + crecordmod.patchhelptext
226 226 + fp.read())
227 227 reviewedpatch = ui.edit(patchtext, "",
228 228 extra={"suffix": ".diff"})
229 229 fp.truncate(0)
230 230 fp.write(reviewedpatch)
231 231 fp.seek(0)
232 232
233 233 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
234 234 # 3a. apply filtered patch to clean repo (clean)
235 235 if backups:
236 236 # Equivalent to hg.revert
237 237 m = scmutil.matchfiles(repo, backups.keys())
238 238 mergemod.update(repo, repo.dirstate.p1(),
239 239 False, True, matcher=m)
240 240
241 241 # 3b. (apply)
242 242 if dopatch:
243 243 try:
244 244 ui.debug('applying patch\n')
245 245 ui.debug(fp.getvalue())
246 246 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
247 247 except patch.PatchError as err:
248 248 raise error.Abort(str(err))
249 249 del fp
250 250
251 251 # 4. We prepared working directory according to filtered
252 252 # patch. Now is the time to delegate the job to
253 253 # commit/qrefresh or the like!
254 254
255 255 # Make all of the pathnames absolute.
256 256 newfiles = [repo.wjoin(nf) for nf in newfiles]
257 257 return commitfunc(ui, repo, *newfiles, **opts)
258 258 finally:
259 259 # 5. finally restore backed-up files
260 260 try:
261 261 dirstate = repo.dirstate
262 262 for realname, tmpname in backups.iteritems():
263 263 ui.debug('restoring %r to %r\n' % (tmpname, realname))
264 264
265 265 if dirstate[realname] == 'n':
266 266 # without normallookup, restoring timestamp
267 267 # may cause partially committed files
268 268 # to be treated as unmodified
269 269 dirstate.normallookup(realname)
270 270
271 271 # copystat=True here and above are a hack to trick any
272 272 # editors that have f open that we haven't modified them.
273 273 #
274 274 # Also note that this racy as an editor could notice the
275 275 # file's mtime before we've finished writing it.
276 276 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
277 277 os.unlink(tmpname)
278 278 if tobackup:
279 279 os.rmdir(backupdir)
280 280 except OSError:
281 281 pass
282 282
283 283 def recordinwlock(ui, repo, message, match, opts):
284 284 with repo.wlock():
285 285 return recordfunc(ui, repo, message, match, opts)
286 286
287 287 return commit(ui, repo, recordinwlock, pats, opts)
288 288
289 289 def findpossible(cmd, table, strict=False):
290 290 """
291 291 Return cmd -> (aliases, command table entry)
292 292 for each matching command.
293 293 Return debug commands (or their aliases) only if no normal command matches.
294 294 """
295 295 choice = {}
296 296 debugchoice = {}
297 297
298 298 if cmd in table:
299 299 # short-circuit exact matches, "log" alias beats "^log|history"
300 300 keys = [cmd]
301 301 else:
302 302 keys = table.keys()
303 303
304 304 allcmds = []
305 305 for e in keys:
306 306 aliases = parsealiases(e)
307 307 allcmds.extend(aliases)
308 308 found = None
309 309 if cmd in aliases:
310 310 found = cmd
311 311 elif not strict:
312 312 for a in aliases:
313 313 if a.startswith(cmd):
314 314 found = a
315 315 break
316 316 if found is not None:
317 317 if aliases[0].startswith("debug") or found.startswith("debug"):
318 318 debugchoice[found] = (aliases, table[e])
319 319 else:
320 320 choice[found] = (aliases, table[e])
321 321
322 322 if not choice and debugchoice:
323 323 choice = debugchoice
324 324
325 325 return choice, allcmds
326 326
327 327 def findcmd(cmd, table, strict=True):
328 328 """Return (aliases, command table entry) for command string."""
329 329 choice, allcmds = findpossible(cmd, table, strict)
330 330
331 331 if cmd in choice:
332 332 return choice[cmd]
333 333
334 334 if len(choice) > 1:
335 335 clist = choice.keys()
336 336 clist.sort()
337 337 raise error.AmbiguousCommand(cmd, clist)
338 338
339 339 if choice:
340 340 return choice.values()[0]
341 341
342 342 raise error.UnknownCommand(cmd, allcmds)
343 343
344 344 def findrepo(p):
345 345 while not os.path.isdir(os.path.join(p, ".hg")):
346 346 oldp, p = p, os.path.dirname(p)
347 347 if p == oldp:
348 348 return None
349 349
350 350 return p
351 351
352 352 def bailifchanged(repo, merge=True):
353 353 if merge and repo.dirstate.p2() != nullid:
354 354 raise error.Abort(_('outstanding uncommitted merge'))
355 355 modified, added, removed, deleted = repo.status()[:4]
356 356 if modified or added or removed or deleted:
357 357 raise error.Abort(_('uncommitted changes'))
358 358 ctx = repo[None]
359 359 for s in sorted(ctx.substate):
360 360 ctx.sub(s).bailifchanged()
361 361
362 362 def logmessage(ui, opts):
363 363 """ get the log message according to -m and -l option """
364 364 message = opts.get('message')
365 365 logfile = opts.get('logfile')
366 366
367 367 if message and logfile:
368 368 raise error.Abort(_('options --message and --logfile are mutually '
369 369 'exclusive'))
370 370 if not message and logfile:
371 371 try:
372 372 if logfile == '-':
373 373 message = ui.fin.read()
374 374 else:
375 375 message = '\n'.join(util.readfile(logfile).splitlines())
376 376 except IOError as inst:
377 377 raise error.Abort(_("can't read commit message '%s': %s") %
378 378 (logfile, inst.strerror))
379 379 return message
380 380
381 381 def mergeeditform(ctxorbool, baseformname):
382 382 """return appropriate editform name (referencing a committemplate)
383 383
384 384 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
385 385 merging is committed.
386 386
387 387 This returns baseformname with '.merge' appended if it is a merge,
388 388 otherwise '.normal' is appended.
389 389 """
390 390 if isinstance(ctxorbool, bool):
391 391 if ctxorbool:
392 392 return baseformname + ".merge"
393 393 elif 1 < len(ctxorbool.parents()):
394 394 return baseformname + ".merge"
395 395
396 396 return baseformname + ".normal"
397 397
398 398 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
399 399 editform='', **opts):
400 400 """get appropriate commit message editor according to '--edit' option
401 401
402 402 'finishdesc' is a function to be called with edited commit message
403 403 (= 'description' of the new changeset) just after editing, but
404 404 before checking empty-ness. It should return actual text to be
405 405 stored into history. This allows to change description before
406 406 storing.
407 407
408 408 'extramsg' is a extra message to be shown in the editor instead of
409 409 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
410 410 is automatically added.
411 411
412 412 'editform' is a dot-separated list of names, to distinguish
413 413 the purpose of commit text editing.
414 414
415 415 'getcommiteditor' returns 'commitforceeditor' regardless of
416 416 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
417 417 they are specific for usage in MQ.
418 418 """
419 419 if edit or finishdesc or extramsg:
420 420 return lambda r, c, s: commitforceeditor(r, c, s,
421 421 finishdesc=finishdesc,
422 422 extramsg=extramsg,
423 423 editform=editform)
424 424 elif editform:
425 425 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
426 426 else:
427 427 return commiteditor
428 428
429 429 def loglimit(opts):
430 430 """get the log limit according to option -l/--limit"""
431 431 limit = opts.get('limit')
432 432 if limit:
433 433 try:
434 434 limit = int(limit)
435 435 except ValueError:
436 436 raise error.Abort(_('limit must be a positive integer'))
437 437 if limit <= 0:
438 438 raise error.Abort(_('limit must be positive'))
439 439 else:
440 440 limit = None
441 441 return limit
442 442
443 443 def makefilename(repo, pat, node, desc=None,
444 444 total=None, seqno=None, revwidth=None, pathname=None):
445 445 node_expander = {
446 446 'H': lambda: hex(node),
447 447 'R': lambda: str(repo.changelog.rev(node)),
448 448 'h': lambda: short(node),
449 449 'm': lambda: re.sub('[^\w]', '_', str(desc))
450 450 }
451 451 expander = {
452 452 '%': lambda: '%',
453 453 'b': lambda: os.path.basename(repo.root),
454 454 }
455 455
456 456 try:
457 457 if node:
458 458 expander.update(node_expander)
459 459 if node:
460 460 expander['r'] = (lambda:
461 461 str(repo.changelog.rev(node)).zfill(revwidth or 0))
462 462 if total is not None:
463 463 expander['N'] = lambda: str(total)
464 464 if seqno is not None:
465 465 expander['n'] = lambda: str(seqno)
466 466 if total is not None and seqno is not None:
467 467 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
468 468 if pathname is not None:
469 469 expander['s'] = lambda: os.path.basename(pathname)
470 470 expander['d'] = lambda: os.path.dirname(pathname) or '.'
471 471 expander['p'] = lambda: pathname
472 472
473 473 newname = []
474 474 patlen = len(pat)
475 475 i = 0
476 476 while i < patlen:
477 477 c = pat[i]
478 478 if c == '%':
479 479 i += 1
480 480 c = pat[i]
481 481 c = expander[c]()
482 482 newname.append(c)
483 483 i += 1
484 484 return ''.join(newname)
485 485 except KeyError as inst:
486 486 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
487 487 inst.args[0])
488 488
489 489 class _unclosablefile(object):
490 490 def __init__(self, fp):
491 491 self._fp = fp
492 492
493 493 def close(self):
494 494 pass
495 495
496 496 def __iter__(self):
497 497 return iter(self._fp)
498 498
499 499 def __getattr__(self, attr):
500 500 return getattr(self._fp, attr)
501 501
502 502 def __enter__(self):
503 503 return self
504 504
505 505 def __exit__(self, exc_type, exc_value, exc_tb):
506 506 pass
507 507
508 508 def makefileobj(repo, pat, node=None, desc=None, total=None,
509 509 seqno=None, revwidth=None, mode='wb', modemap=None,
510 510 pathname=None):
511 511
512 512 writable = mode not in ('r', 'rb')
513 513
514 514 if not pat or pat == '-':
515 515 if writable:
516 516 fp = repo.ui.fout
517 517 else:
518 518 fp = repo.ui.fin
519 519 return _unclosablefile(fp)
520 520 if util.safehasattr(pat, 'write') and writable:
521 521 return pat
522 522 if util.safehasattr(pat, 'read') and 'r' in mode:
523 523 return pat
524 524 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
525 525 if modemap is not None:
526 526 mode = modemap.get(fn, mode)
527 527 if mode == 'wb':
528 528 modemap[fn] = 'ab'
529 529 return open(fn, mode)
530 530
531 531 def openrevlog(repo, cmd, file_, opts):
532 532 """opens the changelog, manifest, a filelog or a given revlog"""
533 533 cl = opts['changelog']
534 534 mf = opts['manifest']
535 535 dir = opts['dir']
536 536 msg = None
537 537 if cl and mf:
538 538 msg = _('cannot specify --changelog and --manifest at the same time')
539 539 elif cl and dir:
540 540 msg = _('cannot specify --changelog and --dir at the same time')
541 541 elif cl or mf or dir:
542 542 if file_:
543 543 msg = _('cannot specify filename with --changelog or --manifest')
544 544 elif not repo:
545 545 msg = _('cannot specify --changelog or --manifest or --dir '
546 546 'without a repository')
547 547 if msg:
548 548 raise error.Abort(msg)
549 549
550 550 r = None
551 551 if repo:
552 552 if cl:
553 553 r = repo.unfiltered().changelog
554 554 elif dir:
555 555 if 'treemanifest' not in repo.requirements:
556 556 raise error.Abort(_("--dir can only be used on repos with "
557 557 "treemanifest enabled"))
558 558 dirlog = repo.manifestlog._revlog.dirlog(dir)
559 559 if len(dirlog):
560 560 r = dirlog
561 561 elif mf:
562 562 r = repo.manifestlog._revlog
563 563 elif file_:
564 564 filelog = repo.file(file_)
565 565 if len(filelog):
566 566 r = filelog
567 567 if not r:
568 568 if not file_:
569 569 raise error.CommandError(cmd, _('invalid arguments'))
570 570 if not os.path.isfile(file_):
571 571 raise error.Abort(_("revlog '%s' not found") % file_)
572 572 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
573 573 file_[:-2] + ".i")
574 574 return r
575 575
576 576 def copy(ui, repo, pats, opts, rename=False):
577 577 # called with the repo lock held
578 578 #
579 579 # hgsep => pathname that uses "/" to separate directories
580 580 # ossep => pathname that uses os.sep to separate directories
581 581 cwd = repo.getcwd()
582 582 targets = {}
583 583 after = opts.get("after")
584 584 dryrun = opts.get("dry_run")
585 585 wctx = repo[None]
586 586
587 587 def walkpat(pat):
588 588 srcs = []
589 589 if after:
590 590 badstates = '?'
591 591 else:
592 592 badstates = '?r'
593 593 m = scmutil.match(repo[None], [pat], opts, globbed=True)
594 594 for abs in repo.walk(m):
595 595 state = repo.dirstate[abs]
596 596 rel = m.rel(abs)
597 597 exact = m.exact(abs)
598 598 if state in badstates:
599 599 if exact and state == '?':
600 600 ui.warn(_('%s: not copying - file is not managed\n') % rel)
601 601 if exact and state == 'r':
602 602 ui.warn(_('%s: not copying - file has been marked for'
603 603 ' remove\n') % rel)
604 604 continue
605 605 # abs: hgsep
606 606 # rel: ossep
607 607 srcs.append((abs, rel, exact))
608 608 return srcs
609 609
610 610 # abssrc: hgsep
611 611 # relsrc: ossep
612 612 # otarget: ossep
613 613 def copyfile(abssrc, relsrc, otarget, exact):
614 614 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
615 615 if '/' in abstarget:
616 616 # We cannot normalize abstarget itself, this would prevent
617 617 # case only renames, like a => A.
618 618 abspath, absname = abstarget.rsplit('/', 1)
619 619 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
620 620 reltarget = repo.pathto(abstarget, cwd)
621 621 target = repo.wjoin(abstarget)
622 622 src = repo.wjoin(abssrc)
623 623 state = repo.dirstate[abstarget]
624 624
625 625 scmutil.checkportable(ui, abstarget)
626 626
627 627 # check for collisions
628 628 prevsrc = targets.get(abstarget)
629 629 if prevsrc is not None:
630 630 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
631 631 (reltarget, repo.pathto(abssrc, cwd),
632 632 repo.pathto(prevsrc, cwd)))
633 633 return
634 634
635 635 # check for overwrites
636 636 exists = os.path.lexists(target)
637 637 samefile = False
638 638 if exists and abssrc != abstarget:
639 639 if (repo.dirstate.normalize(abssrc) ==
640 640 repo.dirstate.normalize(abstarget)):
641 641 if not rename:
642 642 ui.warn(_("%s: can't copy - same file\n") % reltarget)
643 643 return
644 644 exists = False
645 645 samefile = True
646 646
647 647 if not after and exists or after and state in 'mn':
648 648 if not opts['force']:
649 649 if state in 'mn':
650 650 msg = _('%s: not overwriting - file already committed\n')
651 651 if after:
652 652 flags = '--after --force'
653 653 else:
654 654 flags = '--force'
655 655 if rename:
656 656 hint = _('(hg rename %s to replace the file by '
657 657 'recording a rename)\n') % flags
658 658 else:
659 659 hint = _('(hg copy %s to replace the file by '
660 660 'recording a copy)\n') % flags
661 661 else:
662 662 msg = _('%s: not overwriting - file exists\n')
663 663 if rename:
664 664 hint = _('(hg rename --after to record the rename)\n')
665 665 else:
666 666 hint = _('(hg copy --after to record the copy)\n')
667 667 ui.warn(msg % reltarget)
668 668 ui.warn(hint)
669 669 return
670 670
671 671 if after:
672 672 if not exists:
673 673 if rename:
674 674 ui.warn(_('%s: not recording move - %s does not exist\n') %
675 675 (relsrc, reltarget))
676 676 else:
677 677 ui.warn(_('%s: not recording copy - %s does not exist\n') %
678 678 (relsrc, reltarget))
679 679 return
680 680 elif not dryrun:
681 681 try:
682 682 if exists:
683 683 os.unlink(target)
684 684 targetdir = os.path.dirname(target) or '.'
685 685 if not os.path.isdir(targetdir):
686 686 os.makedirs(targetdir)
687 687 if samefile:
688 688 tmp = target + "~hgrename"
689 689 os.rename(src, tmp)
690 690 os.rename(tmp, target)
691 691 else:
692 692 util.copyfile(src, target)
693 693 srcexists = True
694 694 except IOError as inst:
695 695 if inst.errno == errno.ENOENT:
696 696 ui.warn(_('%s: deleted in working directory\n') % relsrc)
697 697 srcexists = False
698 698 else:
699 699 ui.warn(_('%s: cannot copy - %s\n') %
700 700 (relsrc, inst.strerror))
701 701 return True # report a failure
702 702
703 703 if ui.verbose or not exact:
704 704 if rename:
705 705 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
706 706 else:
707 707 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
708 708
709 709 targets[abstarget] = abssrc
710 710
711 711 # fix up dirstate
712 712 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
713 713 dryrun=dryrun, cwd=cwd)
714 714 if rename and not dryrun:
715 715 if not after and srcexists and not samefile:
716 716 util.unlinkpath(repo.wjoin(abssrc))
717 717 wctx.forget([abssrc])
718 718
719 719 # pat: ossep
720 720 # dest ossep
721 721 # srcs: list of (hgsep, hgsep, ossep, bool)
722 722 # return: function that takes hgsep and returns ossep
723 723 def targetpathfn(pat, dest, srcs):
724 724 if os.path.isdir(pat):
725 725 abspfx = pathutil.canonpath(repo.root, cwd, pat)
726 726 abspfx = util.localpath(abspfx)
727 727 if destdirexists:
728 728 striplen = len(os.path.split(abspfx)[0])
729 729 else:
730 730 striplen = len(abspfx)
731 731 if striplen:
732 732 striplen += len(os.sep)
733 733 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
734 734 elif destdirexists:
735 735 res = lambda p: os.path.join(dest,
736 736 os.path.basename(util.localpath(p)))
737 737 else:
738 738 res = lambda p: dest
739 739 return res
740 740
741 741 # pat: ossep
742 742 # dest ossep
743 743 # srcs: list of (hgsep, hgsep, ossep, bool)
744 744 # return: function that takes hgsep and returns ossep
745 745 def targetpathafterfn(pat, dest, srcs):
746 746 if matchmod.patkind(pat):
747 747 # a mercurial pattern
748 748 res = lambda p: os.path.join(dest,
749 749 os.path.basename(util.localpath(p)))
750 750 else:
751 751 abspfx = pathutil.canonpath(repo.root, cwd, pat)
752 752 if len(abspfx) < len(srcs[0][0]):
753 753 # A directory. Either the target path contains the last
754 754 # component of the source path or it does not.
755 755 def evalpath(striplen):
756 756 score = 0
757 757 for s in srcs:
758 758 t = os.path.join(dest, util.localpath(s[0])[striplen:])
759 759 if os.path.lexists(t):
760 760 score += 1
761 761 return score
762 762
763 763 abspfx = util.localpath(abspfx)
764 764 striplen = len(abspfx)
765 765 if striplen:
766 766 striplen += len(os.sep)
767 767 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
768 768 score = evalpath(striplen)
769 769 striplen1 = len(os.path.split(abspfx)[0])
770 770 if striplen1:
771 771 striplen1 += len(os.sep)
772 772 if evalpath(striplen1) > score:
773 773 striplen = striplen1
774 774 res = lambda p: os.path.join(dest,
775 775 util.localpath(p)[striplen:])
776 776 else:
777 777 # a file
778 778 if destdirexists:
779 779 res = lambda p: os.path.join(dest,
780 780 os.path.basename(util.localpath(p)))
781 781 else:
782 782 res = lambda p: dest
783 783 return res
784 784
785 785 pats = scmutil.expandpats(pats)
786 786 if not pats:
787 787 raise error.Abort(_('no source or destination specified'))
788 788 if len(pats) == 1:
789 789 raise error.Abort(_('no destination specified'))
790 790 dest = pats.pop()
791 791 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
792 792 if not destdirexists:
793 793 if len(pats) > 1 or matchmod.patkind(pats[0]):
794 794 raise error.Abort(_('with multiple sources, destination must be an '
795 795 'existing directory'))
796 796 if util.endswithsep(dest):
797 797 raise error.Abort(_('destination %s is not a directory') % dest)
798 798
799 799 tfn = targetpathfn
800 800 if after:
801 801 tfn = targetpathafterfn
802 802 copylist = []
803 803 for pat in pats:
804 804 srcs = walkpat(pat)
805 805 if not srcs:
806 806 continue
807 807 copylist.append((tfn(pat, dest, srcs), srcs))
808 808 if not copylist:
809 809 raise error.Abort(_('no files to copy'))
810 810
811 811 errors = 0
812 812 for targetpath, srcs in copylist:
813 813 for abssrc, relsrc, exact in srcs:
814 814 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
815 815 errors += 1
816 816
817 817 if errors:
818 818 ui.warn(_('(consider using --after)\n'))
819 819
820 820 return errors != 0
821 821
822 822 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
823 823 runargs=None, appendpid=False):
824 824 '''Run a command as a service.'''
825 825
826 826 def writepid(pid):
827 827 if opts['pid_file']:
828 828 if appendpid:
829 829 mode = 'a'
830 830 else:
831 831 mode = 'w'
832 832 fp = open(opts['pid_file'], mode)
833 833 fp.write(str(pid) + '\n')
834 834 fp.close()
835 835
836 836 if opts['daemon'] and not opts['daemon_postexec']:
837 837 # Signal child process startup with file removal
838 838 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
839 839 os.close(lockfd)
840 840 try:
841 841 if not runargs:
842 842 runargs = util.hgcmd() + sys.argv[1:]
843 843 runargs.append('--daemon-postexec=unlink:%s' % lockpath)
844 844 # Don't pass --cwd to the child process, because we've already
845 845 # changed directory.
846 846 for i in xrange(1, len(runargs)):
847 847 if runargs[i].startswith('--cwd='):
848 848 del runargs[i]
849 849 break
850 850 elif runargs[i].startswith('--cwd'):
851 851 del runargs[i:i + 2]
852 852 break
853 853 def condfn():
854 854 return not os.path.exists(lockpath)
855 855 pid = util.rundetached(runargs, condfn)
856 856 if pid < 0:
857 857 raise error.Abort(_('child process failed to start'))
858 858 writepid(pid)
859 859 finally:
860 860 try:
861 861 os.unlink(lockpath)
862 862 except OSError as e:
863 863 if e.errno != errno.ENOENT:
864 864 raise
865 865 if parentfn:
866 866 return parentfn(pid)
867 867 else:
868 868 return
869 869
870 870 if initfn:
871 871 initfn()
872 872
873 873 if not opts['daemon']:
874 874 writepid(util.getpid())
875 875
876 876 if opts['daemon_postexec']:
877 877 try:
878 878 os.setsid()
879 879 except AttributeError:
880 880 pass
881 881 for inst in opts['daemon_postexec']:
882 882 if inst.startswith('unlink:'):
883 883 lockpath = inst[7:]
884 884 os.unlink(lockpath)
885 885 elif inst.startswith('chdir:'):
886 886 os.chdir(inst[6:])
887 887 elif inst != 'none':
888 888 raise error.Abort(_('invalid value for --daemon-postexec: %s')
889 889 % inst)
890 890 util.hidewindow()
891 sys.stdout.flush()
892 sys.stderr.flush()
891 util.stdout.flush()
892 util.stderr.flush()
893 893
894 894 nullfd = os.open(os.devnull, os.O_RDWR)
895 895 logfilefd = nullfd
896 896 if logfile:
897 897 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
898 898 os.dup2(nullfd, 0)
899 899 os.dup2(logfilefd, 1)
900 900 os.dup2(logfilefd, 2)
901 901 if nullfd not in (0, 1, 2):
902 902 os.close(nullfd)
903 903 if logfile and logfilefd not in (0, 1, 2):
904 904 os.close(logfilefd)
905 905
906 906 if runfn:
907 907 return runfn()
908 908
909 909 ## facility to let extension process additional data into an import patch
910 910 # list of identifier to be executed in order
911 911 extrapreimport = [] # run before commit
912 912 extrapostimport = [] # run after commit
913 913 # mapping from identifier to actual import function
914 914 #
915 915 # 'preimport' are run before the commit is made and are provided the following
916 916 # arguments:
917 917 # - repo: the localrepository instance,
918 918 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
919 919 # - extra: the future extra dictionary of the changeset, please mutate it,
920 920 # - opts: the import options.
921 921 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
922 922 # mutation of in memory commit and more. Feel free to rework the code to get
923 923 # there.
924 924 extrapreimportmap = {}
925 925 # 'postimport' are run after the commit is made and are provided the following
926 926 # argument:
927 927 # - ctx: the changectx created by import.
928 928 extrapostimportmap = {}
929 929
930 930 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
931 931 """Utility function used by commands.import to import a single patch
932 932
933 933 This function is explicitly defined here to help the evolve extension to
934 934 wrap this part of the import logic.
935 935
936 936 The API is currently a bit ugly because it a simple code translation from
937 937 the import command. Feel free to make it better.
938 938
939 939 :hunk: a patch (as a binary string)
940 940 :parents: nodes that will be parent of the created commit
941 941 :opts: the full dict of option passed to the import command
942 942 :msgs: list to save commit message to.
943 943 (used in case we need to save it when failing)
944 944 :updatefunc: a function that update a repo to a given node
945 945 updatefunc(<repo>, <node>)
946 946 """
947 947 # avoid cycle context -> subrepo -> cmdutil
948 948 from . import context
949 949 extractdata = patch.extract(ui, hunk)
950 950 tmpname = extractdata.get('filename')
951 951 message = extractdata.get('message')
952 952 user = opts.get('user') or extractdata.get('user')
953 953 date = opts.get('date') or extractdata.get('date')
954 954 branch = extractdata.get('branch')
955 955 nodeid = extractdata.get('nodeid')
956 956 p1 = extractdata.get('p1')
957 957 p2 = extractdata.get('p2')
958 958
959 959 nocommit = opts.get('no_commit')
960 960 importbranch = opts.get('import_branch')
961 961 update = not opts.get('bypass')
962 962 strip = opts["strip"]
963 963 prefix = opts["prefix"]
964 964 sim = float(opts.get('similarity') or 0)
965 965 if not tmpname:
966 966 return (None, None, False)
967 967
968 968 rejects = False
969 969
970 970 try:
971 971 cmdline_message = logmessage(ui, opts)
972 972 if cmdline_message:
973 973 # pickup the cmdline msg
974 974 message = cmdline_message
975 975 elif message:
976 976 # pickup the patch msg
977 977 message = message.strip()
978 978 else:
979 979 # launch the editor
980 980 message = None
981 981 ui.debug('message:\n%s\n' % message)
982 982
983 983 if len(parents) == 1:
984 984 parents.append(repo[nullid])
985 985 if opts.get('exact'):
986 986 if not nodeid or not p1:
987 987 raise error.Abort(_('not a Mercurial patch'))
988 988 p1 = repo[p1]
989 989 p2 = repo[p2 or nullid]
990 990 elif p2:
991 991 try:
992 992 p1 = repo[p1]
993 993 p2 = repo[p2]
994 994 # Without any options, consider p2 only if the
995 995 # patch is being applied on top of the recorded
996 996 # first parent.
997 997 if p1 != parents[0]:
998 998 p1 = parents[0]
999 999 p2 = repo[nullid]
1000 1000 except error.RepoError:
1001 1001 p1, p2 = parents
1002 1002 if p2.node() == nullid:
1003 1003 ui.warn(_("warning: import the patch as a normal revision\n"
1004 1004 "(use --exact to import the patch as a merge)\n"))
1005 1005 else:
1006 1006 p1, p2 = parents
1007 1007
1008 1008 n = None
1009 1009 if update:
1010 1010 if p1 != parents[0]:
1011 1011 updatefunc(repo, p1.node())
1012 1012 if p2 != parents[1]:
1013 1013 repo.setparents(p1.node(), p2.node())
1014 1014
1015 1015 if opts.get('exact') or importbranch:
1016 1016 repo.dirstate.setbranch(branch or 'default')
1017 1017
1018 1018 partial = opts.get('partial', False)
1019 1019 files = set()
1020 1020 try:
1021 1021 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1022 1022 files=files, eolmode=None, similarity=sim / 100.0)
1023 1023 except patch.PatchError as e:
1024 1024 if not partial:
1025 1025 raise error.Abort(str(e))
1026 1026 if partial:
1027 1027 rejects = True
1028 1028
1029 1029 files = list(files)
1030 1030 if nocommit:
1031 1031 if message:
1032 1032 msgs.append(message)
1033 1033 else:
1034 1034 if opts.get('exact') or p2:
1035 1035 # If you got here, you either use --force and know what
1036 1036 # you are doing or used --exact or a merge patch while
1037 1037 # being updated to its first parent.
1038 1038 m = None
1039 1039 else:
1040 1040 m = scmutil.matchfiles(repo, files or [])
1041 1041 editform = mergeeditform(repo[None], 'import.normal')
1042 1042 if opts.get('exact'):
1043 1043 editor = None
1044 1044 else:
1045 1045 editor = getcommiteditor(editform=editform, **opts)
1046 1046 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
1047 1047 extra = {}
1048 1048 for idfunc in extrapreimport:
1049 1049 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1050 1050 try:
1051 1051 if partial:
1052 1052 repo.ui.setconfig('ui', 'allowemptycommit', True)
1053 1053 n = repo.commit(message, user,
1054 1054 date, match=m,
1055 1055 editor=editor, extra=extra)
1056 1056 for idfunc in extrapostimport:
1057 1057 extrapostimportmap[idfunc](repo[n])
1058 1058 finally:
1059 1059 repo.ui.restoreconfig(allowemptyback)
1060 1060 else:
1061 1061 if opts.get('exact') or importbranch:
1062 1062 branch = branch or 'default'
1063 1063 else:
1064 1064 branch = p1.branch()
1065 1065 store = patch.filestore()
1066 1066 try:
1067 1067 files = set()
1068 1068 try:
1069 1069 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1070 1070 files, eolmode=None)
1071 1071 except patch.PatchError as e:
1072 1072 raise error.Abort(str(e))
1073 1073 if opts.get('exact'):
1074 1074 editor = None
1075 1075 else:
1076 1076 editor = getcommiteditor(editform='import.bypass')
1077 1077 memctx = context.makememctx(repo, (p1.node(), p2.node()),
1078 1078 message,
1079 1079 user,
1080 1080 date,
1081 1081 branch, files, store,
1082 1082 editor=editor)
1083 1083 n = memctx.commit()
1084 1084 finally:
1085 1085 store.close()
1086 1086 if opts.get('exact') and nocommit:
1087 1087 # --exact with --no-commit is still useful in that it does merge
1088 1088 # and branch bits
1089 1089 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1090 1090 elif opts.get('exact') and hex(n) != nodeid:
1091 1091 raise error.Abort(_('patch is damaged or loses information'))
1092 1092 msg = _('applied to working directory')
1093 1093 if n:
1094 1094 # i18n: refers to a short changeset id
1095 1095 msg = _('created %s') % short(n)
1096 1096 return (msg, n, rejects)
1097 1097 finally:
1098 1098 os.unlink(tmpname)
1099 1099
1100 1100 # facility to let extensions include additional data in an exported patch
1101 1101 # list of identifiers to be executed in order
1102 1102 extraexport = []
1103 1103 # mapping from identifier to actual export function
1104 1104 # function as to return a string to be added to the header or None
1105 1105 # it is given two arguments (sequencenumber, changectx)
1106 1106 extraexportmap = {}
1107 1107
1108 1108 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1109 1109 opts=None, match=None):
1110 1110 '''export changesets as hg patches.'''
1111 1111
1112 1112 total = len(revs)
1113 1113 revwidth = max([len(str(rev)) for rev in revs])
1114 1114 filemode = {}
1115 1115
1116 1116 def single(rev, seqno, fp):
1117 1117 ctx = repo[rev]
1118 1118 node = ctx.node()
1119 1119 parents = [p.node() for p in ctx.parents() if p]
1120 1120 branch = ctx.branch()
1121 1121 if switch_parent:
1122 1122 parents.reverse()
1123 1123
1124 1124 if parents:
1125 1125 prev = parents[0]
1126 1126 else:
1127 1127 prev = nullid
1128 1128
1129 1129 shouldclose = False
1130 1130 if not fp and len(template) > 0:
1131 1131 desc_lines = ctx.description().rstrip().split('\n')
1132 1132 desc = desc_lines[0] #Commit always has a first line.
1133 1133 fp = makefileobj(repo, template, node, desc=desc, total=total,
1134 1134 seqno=seqno, revwidth=revwidth, mode='wb',
1135 1135 modemap=filemode)
1136 1136 shouldclose = True
1137 1137 if fp and not getattr(fp, 'name', '<unnamed>').startswith('<'):
1138 1138 repo.ui.note("%s\n" % fp.name)
1139 1139
1140 1140 if not fp:
1141 1141 write = repo.ui.write
1142 1142 else:
1143 1143 def write(s, **kw):
1144 1144 fp.write(s)
1145 1145
1146 1146 write("# HG changeset patch\n")
1147 1147 write("# User %s\n" % ctx.user())
1148 1148 write("# Date %d %d\n" % ctx.date())
1149 1149 write("# %s\n" % util.datestr(ctx.date()))
1150 1150 if branch and branch != 'default':
1151 1151 write("# Branch %s\n" % branch)
1152 1152 write("# Node ID %s\n" % hex(node))
1153 1153 write("# Parent %s\n" % hex(prev))
1154 1154 if len(parents) > 1:
1155 1155 write("# Parent %s\n" % hex(parents[1]))
1156 1156
1157 1157 for headerid in extraexport:
1158 1158 header = extraexportmap[headerid](seqno, ctx)
1159 1159 if header is not None:
1160 1160 write('# %s\n' % header)
1161 1161 write(ctx.description().rstrip())
1162 1162 write("\n\n")
1163 1163
1164 1164 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1165 1165 write(chunk, label=label)
1166 1166
1167 1167 if shouldclose:
1168 1168 fp.close()
1169 1169
1170 1170 for seqno, rev in enumerate(revs):
1171 1171 single(rev, seqno + 1, fp)
1172 1172
1173 1173 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1174 1174 changes=None, stat=False, fp=None, prefix='',
1175 1175 root='', listsubrepos=False):
1176 1176 '''show diff or diffstat.'''
1177 1177 if fp is None:
1178 1178 write = ui.write
1179 1179 else:
1180 1180 def write(s, **kw):
1181 1181 fp.write(s)
1182 1182
1183 1183 if root:
1184 1184 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1185 1185 else:
1186 1186 relroot = ''
1187 1187 if relroot != '':
1188 1188 # XXX relative roots currently don't work if the root is within a
1189 1189 # subrepo
1190 1190 uirelroot = match.uipath(relroot)
1191 1191 relroot += '/'
1192 1192 for matchroot in match.files():
1193 1193 if not matchroot.startswith(relroot):
1194 1194 ui.warn(_('warning: %s not inside relative root %s\n') % (
1195 1195 match.uipath(matchroot), uirelroot))
1196 1196
1197 1197 if stat:
1198 1198 diffopts = diffopts.copy(context=0)
1199 1199 width = 80
1200 1200 if not ui.plain():
1201 1201 width = ui.termwidth()
1202 1202 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1203 1203 prefix=prefix, relroot=relroot)
1204 1204 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1205 1205 width=width):
1206 1206 write(chunk, label=label)
1207 1207 else:
1208 1208 for chunk, label in patch.diffui(repo, node1, node2, match,
1209 1209 changes, diffopts, prefix=prefix,
1210 1210 relroot=relroot):
1211 1211 write(chunk, label=label)
1212 1212
1213 1213 if listsubrepos:
1214 1214 ctx1 = repo[node1]
1215 1215 ctx2 = repo[node2]
1216 1216 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1217 1217 tempnode2 = node2
1218 1218 try:
1219 1219 if node2 is not None:
1220 1220 tempnode2 = ctx2.substate[subpath][1]
1221 1221 except KeyError:
1222 1222 # A subrepo that existed in node1 was deleted between node1 and
1223 1223 # node2 (inclusive). Thus, ctx2's substate won't contain that
1224 1224 # subpath. The best we can do is to ignore it.
1225 1225 tempnode2 = None
1226 1226 submatch = matchmod.subdirmatcher(subpath, match)
1227 1227 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1228 1228 stat=stat, fp=fp, prefix=prefix)
1229 1229
1230 1230 class changeset_printer(object):
1231 1231 '''show changeset information when templating not requested.'''
1232 1232
1233 1233 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1234 1234 self.ui = ui
1235 1235 self.repo = repo
1236 1236 self.buffered = buffered
1237 1237 self.matchfn = matchfn
1238 1238 self.diffopts = diffopts
1239 1239 self.header = {}
1240 1240 self.hunk = {}
1241 1241 self.lastheader = None
1242 1242 self.footer = None
1243 1243
1244 1244 def flush(self, ctx):
1245 1245 rev = ctx.rev()
1246 1246 if rev in self.header:
1247 1247 h = self.header[rev]
1248 1248 if h != self.lastheader:
1249 1249 self.lastheader = h
1250 1250 self.ui.write(h)
1251 1251 del self.header[rev]
1252 1252 if rev in self.hunk:
1253 1253 self.ui.write(self.hunk[rev])
1254 1254 del self.hunk[rev]
1255 1255 return 1
1256 1256 return 0
1257 1257
1258 1258 def close(self):
1259 1259 if self.footer:
1260 1260 self.ui.write(self.footer)
1261 1261
1262 1262 def show(self, ctx, copies=None, matchfn=None, **props):
1263 1263 if self.buffered:
1264 1264 self.ui.pushbuffer(labeled=True)
1265 1265 self._show(ctx, copies, matchfn, props)
1266 1266 self.hunk[ctx.rev()] = self.ui.popbuffer()
1267 1267 else:
1268 1268 self._show(ctx, copies, matchfn, props)
1269 1269
1270 1270 def _show(self, ctx, copies, matchfn, props):
1271 1271 '''show a single changeset or file revision'''
1272 1272 changenode = ctx.node()
1273 1273 rev = ctx.rev()
1274 1274 if self.ui.debugflag:
1275 1275 hexfunc = hex
1276 1276 else:
1277 1277 hexfunc = short
1278 1278 # as of now, wctx.node() and wctx.rev() return None, but we want to
1279 1279 # show the same values as {node} and {rev} templatekw
1280 1280 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1281 1281
1282 1282 if self.ui.quiet:
1283 1283 self.ui.write("%d:%s\n" % revnode, label='log.node')
1284 1284 return
1285 1285
1286 1286 date = util.datestr(ctx.date())
1287 1287
1288 1288 # i18n: column positioning for "hg log"
1289 1289 self.ui.write(_("changeset: %d:%s\n") % revnode,
1290 1290 label='log.changeset changeset.%s' % ctx.phasestr())
1291 1291
1292 1292 # branches are shown first before any other names due to backwards
1293 1293 # compatibility
1294 1294 branch = ctx.branch()
1295 1295 # don't show the default branch name
1296 1296 if branch != 'default':
1297 1297 # i18n: column positioning for "hg log"
1298 1298 self.ui.write(_("branch: %s\n") % branch,
1299 1299 label='log.branch')
1300 1300
1301 1301 for nsname, ns in self.repo.names.iteritems():
1302 1302 # branches has special logic already handled above, so here we just
1303 1303 # skip it
1304 1304 if nsname == 'branches':
1305 1305 continue
1306 1306 # we will use the templatename as the color name since those two
1307 1307 # should be the same
1308 1308 for name in ns.names(self.repo, changenode):
1309 1309 self.ui.write(ns.logfmt % name,
1310 1310 label='log.%s' % ns.colorname)
1311 1311 if self.ui.debugflag:
1312 1312 # i18n: column positioning for "hg log"
1313 1313 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1314 1314 label='log.phase')
1315 1315 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1316 1316 label = 'log.parent changeset.%s' % pctx.phasestr()
1317 1317 # i18n: column positioning for "hg log"
1318 1318 self.ui.write(_("parent: %d:%s\n")
1319 1319 % (pctx.rev(), hexfunc(pctx.node())),
1320 1320 label=label)
1321 1321
1322 1322 if self.ui.debugflag and rev is not None:
1323 1323 mnode = ctx.manifestnode()
1324 1324 # i18n: column positioning for "hg log"
1325 1325 self.ui.write(_("manifest: %d:%s\n") %
1326 1326 (self.repo.manifestlog._revlog.rev(mnode),
1327 1327 hex(mnode)),
1328 1328 label='ui.debug log.manifest')
1329 1329 # i18n: column positioning for "hg log"
1330 1330 self.ui.write(_("user: %s\n") % ctx.user(),
1331 1331 label='log.user')
1332 1332 # i18n: column positioning for "hg log"
1333 1333 self.ui.write(_("date: %s\n") % date,
1334 1334 label='log.date')
1335 1335
1336 1336 if self.ui.debugflag:
1337 1337 files = ctx.p1().status(ctx)[:3]
1338 1338 for key, value in zip([# i18n: column positioning for "hg log"
1339 1339 _("files:"),
1340 1340 # i18n: column positioning for "hg log"
1341 1341 _("files+:"),
1342 1342 # i18n: column positioning for "hg log"
1343 1343 _("files-:")], files):
1344 1344 if value:
1345 1345 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1346 1346 label='ui.debug log.files')
1347 1347 elif ctx.files() and self.ui.verbose:
1348 1348 # i18n: column positioning for "hg log"
1349 1349 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1350 1350 label='ui.note log.files')
1351 1351 if copies and self.ui.verbose:
1352 1352 copies = ['%s (%s)' % c for c in copies]
1353 1353 # i18n: column positioning for "hg log"
1354 1354 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1355 1355 label='ui.note log.copies')
1356 1356
1357 1357 extra = ctx.extra()
1358 1358 if extra and self.ui.debugflag:
1359 1359 for key, value in sorted(extra.items()):
1360 1360 # i18n: column positioning for "hg log"
1361 1361 self.ui.write(_("extra: %s=%s\n")
1362 1362 % (key, value.encode('string_escape')),
1363 1363 label='ui.debug log.extra')
1364 1364
1365 1365 description = ctx.description().strip()
1366 1366 if description:
1367 1367 if self.ui.verbose:
1368 1368 self.ui.write(_("description:\n"),
1369 1369 label='ui.note log.description')
1370 1370 self.ui.write(description,
1371 1371 label='ui.note log.description')
1372 1372 self.ui.write("\n\n")
1373 1373 else:
1374 1374 # i18n: column positioning for "hg log"
1375 1375 self.ui.write(_("summary: %s\n") %
1376 1376 description.splitlines()[0],
1377 1377 label='log.summary')
1378 1378 self.ui.write("\n")
1379 1379
1380 1380 self.showpatch(ctx, matchfn)
1381 1381
1382 1382 def showpatch(self, ctx, matchfn):
1383 1383 if not matchfn:
1384 1384 matchfn = self.matchfn
1385 1385 if matchfn:
1386 1386 stat = self.diffopts.get('stat')
1387 1387 diff = self.diffopts.get('patch')
1388 1388 diffopts = patch.diffallopts(self.ui, self.diffopts)
1389 1389 node = ctx.node()
1390 1390 prev = ctx.p1().node()
1391 1391 if stat:
1392 1392 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1393 1393 match=matchfn, stat=True)
1394 1394 if diff:
1395 1395 if stat:
1396 1396 self.ui.write("\n")
1397 1397 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1398 1398 match=matchfn, stat=False)
1399 1399 self.ui.write("\n")
1400 1400
1401 1401 class jsonchangeset(changeset_printer):
1402 1402 '''format changeset information.'''
1403 1403
1404 1404 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1405 1405 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1406 1406 self.cache = {}
1407 1407 self._first = True
1408 1408
1409 1409 def close(self):
1410 1410 if not self._first:
1411 1411 self.ui.write("\n]\n")
1412 1412 else:
1413 1413 self.ui.write("[]\n")
1414 1414
1415 1415 def _show(self, ctx, copies, matchfn, props):
1416 1416 '''show a single changeset or file revision'''
1417 1417 rev = ctx.rev()
1418 1418 if rev is None:
1419 1419 jrev = jnode = 'null'
1420 1420 else:
1421 1421 jrev = str(rev)
1422 1422 jnode = '"%s"' % hex(ctx.node())
1423 1423 j = encoding.jsonescape
1424 1424
1425 1425 if self._first:
1426 1426 self.ui.write("[\n {")
1427 1427 self._first = False
1428 1428 else:
1429 1429 self.ui.write(",\n {")
1430 1430
1431 1431 if self.ui.quiet:
1432 1432 self.ui.write(('\n "rev": %s') % jrev)
1433 1433 self.ui.write((',\n "node": %s') % jnode)
1434 1434 self.ui.write('\n }')
1435 1435 return
1436 1436
1437 1437 self.ui.write(('\n "rev": %s') % jrev)
1438 1438 self.ui.write((',\n "node": %s') % jnode)
1439 1439 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1440 1440 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1441 1441 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1442 1442 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1443 1443 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1444 1444
1445 1445 self.ui.write((',\n "bookmarks": [%s]') %
1446 1446 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1447 1447 self.ui.write((',\n "tags": [%s]') %
1448 1448 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1449 1449 self.ui.write((',\n "parents": [%s]') %
1450 1450 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1451 1451
1452 1452 if self.ui.debugflag:
1453 1453 if rev is None:
1454 1454 jmanifestnode = 'null'
1455 1455 else:
1456 1456 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1457 1457 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1458 1458
1459 1459 self.ui.write((',\n "extra": {%s}') %
1460 1460 ", ".join('"%s": "%s"' % (j(k), j(v))
1461 1461 for k, v in ctx.extra().items()))
1462 1462
1463 1463 files = ctx.p1().status(ctx)
1464 1464 self.ui.write((',\n "modified": [%s]') %
1465 1465 ", ".join('"%s"' % j(f) for f in files[0]))
1466 1466 self.ui.write((',\n "added": [%s]') %
1467 1467 ", ".join('"%s"' % j(f) for f in files[1]))
1468 1468 self.ui.write((',\n "removed": [%s]') %
1469 1469 ", ".join('"%s"' % j(f) for f in files[2]))
1470 1470
1471 1471 elif self.ui.verbose:
1472 1472 self.ui.write((',\n "files": [%s]') %
1473 1473 ", ".join('"%s"' % j(f) for f in ctx.files()))
1474 1474
1475 1475 if copies:
1476 1476 self.ui.write((',\n "copies": {%s}') %
1477 1477 ", ".join('"%s": "%s"' % (j(k), j(v))
1478 1478 for k, v in copies))
1479 1479
1480 1480 matchfn = self.matchfn
1481 1481 if matchfn:
1482 1482 stat = self.diffopts.get('stat')
1483 1483 diff = self.diffopts.get('patch')
1484 1484 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1485 1485 node, prev = ctx.node(), ctx.p1().node()
1486 1486 if stat:
1487 1487 self.ui.pushbuffer()
1488 1488 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1489 1489 match=matchfn, stat=True)
1490 1490 self.ui.write((',\n "diffstat": "%s"')
1491 1491 % j(self.ui.popbuffer()))
1492 1492 if diff:
1493 1493 self.ui.pushbuffer()
1494 1494 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1495 1495 match=matchfn, stat=False)
1496 1496 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1497 1497
1498 1498 self.ui.write("\n }")
1499 1499
1500 1500 class changeset_templater(changeset_printer):
1501 1501 '''format changeset information.'''
1502 1502
1503 1503 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1504 1504 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1505 1505 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1506 1506 filters = {'formatnode': formatnode}
1507 1507 defaulttempl = {
1508 1508 'parent': '{rev}:{node|formatnode} ',
1509 1509 'manifest': '{rev}:{node|formatnode}',
1510 1510 'file_copy': '{name} ({source})',
1511 1511 'extra': '{key}={value|stringescape}'
1512 1512 }
1513 1513 # filecopy is preserved for compatibility reasons
1514 1514 defaulttempl['filecopy'] = defaulttempl['file_copy']
1515 1515 assert not (tmpl and mapfile)
1516 1516 if mapfile:
1517 1517 self.t = templater.templater.frommapfile(mapfile, filters=filters,
1518 1518 cache=defaulttempl)
1519 1519 else:
1520 1520 self.t = formatter.maketemplater(ui, 'changeset', tmpl,
1521 1521 filters=filters,
1522 1522 cache=defaulttempl)
1523 1523
1524 1524 self.cache = {}
1525 1525
1526 1526 # find correct templates for current mode
1527 1527 tmplmodes = [
1528 1528 (True, None),
1529 1529 (self.ui.verbose, 'verbose'),
1530 1530 (self.ui.quiet, 'quiet'),
1531 1531 (self.ui.debugflag, 'debug'),
1532 1532 ]
1533 1533
1534 1534 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1535 1535 'docheader': '', 'docfooter': ''}
1536 1536 for mode, postfix in tmplmodes:
1537 1537 for t in self._parts:
1538 1538 cur = t
1539 1539 if postfix:
1540 1540 cur += "_" + postfix
1541 1541 if mode and cur in self.t:
1542 1542 self._parts[t] = cur
1543 1543
1544 1544 if self._parts['docheader']:
1545 1545 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1546 1546
1547 1547 def close(self):
1548 1548 if self._parts['docfooter']:
1549 1549 if not self.footer:
1550 1550 self.footer = ""
1551 1551 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1552 1552 return super(changeset_templater, self).close()
1553 1553
1554 1554 def _show(self, ctx, copies, matchfn, props):
1555 1555 '''show a single changeset or file revision'''
1556 1556 props = props.copy()
1557 1557 props.update(templatekw.keywords)
1558 1558 props['templ'] = self.t
1559 1559 props['ctx'] = ctx
1560 1560 props['repo'] = self.repo
1561 1561 props['ui'] = self.repo.ui
1562 1562 props['revcache'] = {'copies': copies}
1563 1563 props['cache'] = self.cache
1564 1564
1565 1565 # write header
1566 1566 if self._parts['header']:
1567 1567 h = templater.stringify(self.t(self._parts['header'], **props))
1568 1568 if self.buffered:
1569 1569 self.header[ctx.rev()] = h
1570 1570 else:
1571 1571 if self.lastheader != h:
1572 1572 self.lastheader = h
1573 1573 self.ui.write(h)
1574 1574
1575 1575 # write changeset metadata, then patch if requested
1576 1576 key = self._parts['changeset']
1577 1577 self.ui.write(templater.stringify(self.t(key, **props)))
1578 1578 self.showpatch(ctx, matchfn)
1579 1579
1580 1580 if self._parts['footer']:
1581 1581 if not self.footer:
1582 1582 self.footer = templater.stringify(
1583 1583 self.t(self._parts['footer'], **props))
1584 1584
1585 1585 def gettemplate(ui, tmpl, style):
1586 1586 """
1587 1587 Find the template matching the given template spec or style.
1588 1588 """
1589 1589
1590 1590 # ui settings
1591 1591 if not tmpl and not style: # template are stronger than style
1592 1592 tmpl = ui.config('ui', 'logtemplate')
1593 1593 if tmpl:
1594 1594 return templater.unquotestring(tmpl), None
1595 1595 else:
1596 1596 style = util.expandpath(ui.config('ui', 'style', ''))
1597 1597
1598 1598 if not tmpl and style:
1599 1599 mapfile = style
1600 1600 if not os.path.split(mapfile)[0]:
1601 1601 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1602 1602 or templater.templatepath(mapfile))
1603 1603 if mapname:
1604 1604 mapfile = mapname
1605 1605 return None, mapfile
1606 1606
1607 1607 if not tmpl:
1608 1608 return None, None
1609 1609
1610 1610 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1611 1611
1612 1612 def show_changeset(ui, repo, opts, buffered=False):
1613 1613 """show one changeset using template or regular display.
1614 1614
1615 1615 Display format will be the first non-empty hit of:
1616 1616 1. option 'template'
1617 1617 2. option 'style'
1618 1618 3. [ui] setting 'logtemplate'
1619 1619 4. [ui] setting 'style'
1620 1620 If all of these values are either the unset or the empty string,
1621 1621 regular display via changeset_printer() is done.
1622 1622 """
1623 1623 # options
1624 1624 matchfn = None
1625 1625 if opts.get('patch') or opts.get('stat'):
1626 1626 matchfn = scmutil.matchall(repo)
1627 1627
1628 1628 if opts.get('template') == 'json':
1629 1629 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1630 1630
1631 1631 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1632 1632
1633 1633 if not tmpl and not mapfile:
1634 1634 return changeset_printer(ui, repo, matchfn, opts, buffered)
1635 1635
1636 1636 return changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile, buffered)
1637 1637
1638 1638 def showmarker(fm, marker, index=None):
1639 1639 """utility function to display obsolescence marker in a readable way
1640 1640
1641 1641 To be used by debug function."""
1642 1642 if index is not None:
1643 1643 fm.write('index', '%i ', index)
1644 1644 fm.write('precnode', '%s ', hex(marker.precnode()))
1645 1645 succs = marker.succnodes()
1646 1646 fm.condwrite(succs, 'succnodes', '%s ',
1647 1647 fm.formatlist(map(hex, succs), name='node'))
1648 1648 fm.write('flag', '%X ', marker.flags())
1649 1649 parents = marker.parentnodes()
1650 1650 if parents is not None:
1651 1651 fm.write('parentnodes', '{%s} ',
1652 1652 fm.formatlist(map(hex, parents), name='node', sep=', '))
1653 1653 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1654 1654 meta = marker.metadata().copy()
1655 1655 meta.pop('date', None)
1656 1656 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
1657 1657 fm.plain('\n')
1658 1658
1659 1659 def finddate(ui, repo, date):
1660 1660 """Find the tipmost changeset that matches the given date spec"""
1661 1661
1662 1662 df = util.matchdate(date)
1663 1663 m = scmutil.matchall(repo)
1664 1664 results = {}
1665 1665
1666 1666 def prep(ctx, fns):
1667 1667 d = ctx.date()
1668 1668 if df(d[0]):
1669 1669 results[ctx.rev()] = d
1670 1670
1671 1671 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1672 1672 rev = ctx.rev()
1673 1673 if rev in results:
1674 1674 ui.status(_("found revision %s from %s\n") %
1675 1675 (rev, util.datestr(results[rev])))
1676 1676 return str(rev)
1677 1677
1678 1678 raise error.Abort(_("revision matching date not found"))
1679 1679
1680 1680 def increasingwindows(windowsize=8, sizelimit=512):
1681 1681 while True:
1682 1682 yield windowsize
1683 1683 if windowsize < sizelimit:
1684 1684 windowsize *= 2
1685 1685
1686 1686 class FileWalkError(Exception):
1687 1687 pass
1688 1688
1689 1689 def walkfilerevs(repo, match, follow, revs, fncache):
1690 1690 '''Walks the file history for the matched files.
1691 1691
1692 1692 Returns the changeset revs that are involved in the file history.
1693 1693
1694 1694 Throws FileWalkError if the file history can't be walked using
1695 1695 filelogs alone.
1696 1696 '''
1697 1697 wanted = set()
1698 1698 copies = []
1699 1699 minrev, maxrev = min(revs), max(revs)
1700 1700 def filerevgen(filelog, last):
1701 1701 """
1702 1702 Only files, no patterns. Check the history of each file.
1703 1703
1704 1704 Examines filelog entries within minrev, maxrev linkrev range
1705 1705 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1706 1706 tuples in backwards order
1707 1707 """
1708 1708 cl_count = len(repo)
1709 1709 revs = []
1710 1710 for j in xrange(0, last + 1):
1711 1711 linkrev = filelog.linkrev(j)
1712 1712 if linkrev < minrev:
1713 1713 continue
1714 1714 # only yield rev for which we have the changelog, it can
1715 1715 # happen while doing "hg log" during a pull or commit
1716 1716 if linkrev >= cl_count:
1717 1717 break
1718 1718
1719 1719 parentlinkrevs = []
1720 1720 for p in filelog.parentrevs(j):
1721 1721 if p != nullrev:
1722 1722 parentlinkrevs.append(filelog.linkrev(p))
1723 1723 n = filelog.node(j)
1724 1724 revs.append((linkrev, parentlinkrevs,
1725 1725 follow and filelog.renamed(n)))
1726 1726
1727 1727 return reversed(revs)
1728 1728 def iterfiles():
1729 1729 pctx = repo['.']
1730 1730 for filename in match.files():
1731 1731 if follow:
1732 1732 if filename not in pctx:
1733 1733 raise error.Abort(_('cannot follow file not in parent '
1734 1734 'revision: "%s"') % filename)
1735 1735 yield filename, pctx[filename].filenode()
1736 1736 else:
1737 1737 yield filename, None
1738 1738 for filename_node in copies:
1739 1739 yield filename_node
1740 1740
1741 1741 for file_, node in iterfiles():
1742 1742 filelog = repo.file(file_)
1743 1743 if not len(filelog):
1744 1744 if node is None:
1745 1745 # A zero count may be a directory or deleted file, so
1746 1746 # try to find matching entries on the slow path.
1747 1747 if follow:
1748 1748 raise error.Abort(
1749 1749 _('cannot follow nonexistent file: "%s"') % file_)
1750 1750 raise FileWalkError("Cannot walk via filelog")
1751 1751 else:
1752 1752 continue
1753 1753
1754 1754 if node is None:
1755 1755 last = len(filelog) - 1
1756 1756 else:
1757 1757 last = filelog.rev(node)
1758 1758
1759 1759 # keep track of all ancestors of the file
1760 1760 ancestors = set([filelog.linkrev(last)])
1761 1761
1762 1762 # iterate from latest to oldest revision
1763 1763 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1764 1764 if not follow:
1765 1765 if rev > maxrev:
1766 1766 continue
1767 1767 else:
1768 1768 # Note that last might not be the first interesting
1769 1769 # rev to us:
1770 1770 # if the file has been changed after maxrev, we'll
1771 1771 # have linkrev(last) > maxrev, and we still need
1772 1772 # to explore the file graph
1773 1773 if rev not in ancestors:
1774 1774 continue
1775 1775 # XXX insert 1327 fix here
1776 1776 if flparentlinkrevs:
1777 1777 ancestors.update(flparentlinkrevs)
1778 1778
1779 1779 fncache.setdefault(rev, []).append(file_)
1780 1780 wanted.add(rev)
1781 1781 if copied:
1782 1782 copies.append(copied)
1783 1783
1784 1784 return wanted
1785 1785
1786 1786 class _followfilter(object):
1787 1787 def __init__(self, repo, onlyfirst=False):
1788 1788 self.repo = repo
1789 1789 self.startrev = nullrev
1790 1790 self.roots = set()
1791 1791 self.onlyfirst = onlyfirst
1792 1792
1793 1793 def match(self, rev):
1794 1794 def realparents(rev):
1795 1795 if self.onlyfirst:
1796 1796 return self.repo.changelog.parentrevs(rev)[0:1]
1797 1797 else:
1798 1798 return filter(lambda x: x != nullrev,
1799 1799 self.repo.changelog.parentrevs(rev))
1800 1800
1801 1801 if self.startrev == nullrev:
1802 1802 self.startrev = rev
1803 1803 return True
1804 1804
1805 1805 if rev > self.startrev:
1806 1806 # forward: all descendants
1807 1807 if not self.roots:
1808 1808 self.roots.add(self.startrev)
1809 1809 for parent in realparents(rev):
1810 1810 if parent in self.roots:
1811 1811 self.roots.add(rev)
1812 1812 return True
1813 1813 else:
1814 1814 # backwards: all parents
1815 1815 if not self.roots:
1816 1816 self.roots.update(realparents(self.startrev))
1817 1817 if rev in self.roots:
1818 1818 self.roots.remove(rev)
1819 1819 self.roots.update(realparents(rev))
1820 1820 return True
1821 1821
1822 1822 return False
1823 1823
1824 1824 def walkchangerevs(repo, match, opts, prepare):
1825 1825 '''Iterate over files and the revs in which they changed.
1826 1826
1827 1827 Callers most commonly need to iterate backwards over the history
1828 1828 in which they are interested. Doing so has awful (quadratic-looking)
1829 1829 performance, so we use iterators in a "windowed" way.
1830 1830
1831 1831 We walk a window of revisions in the desired order. Within the
1832 1832 window, we first walk forwards to gather data, then in the desired
1833 1833 order (usually backwards) to display it.
1834 1834
1835 1835 This function returns an iterator yielding contexts. Before
1836 1836 yielding each context, the iterator will first call the prepare
1837 1837 function on each context in the window in forward order.'''
1838 1838
1839 1839 follow = opts.get('follow') or opts.get('follow_first')
1840 1840 revs = _logrevs(repo, opts)
1841 1841 if not revs:
1842 1842 return []
1843 1843 wanted = set()
1844 1844 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1845 1845 opts.get('removed'))
1846 1846 fncache = {}
1847 1847 change = repo.changectx
1848 1848
1849 1849 # First step is to fill wanted, the set of revisions that we want to yield.
1850 1850 # When it does not induce extra cost, we also fill fncache for revisions in
1851 1851 # wanted: a cache of filenames that were changed (ctx.files()) and that
1852 1852 # match the file filtering conditions.
1853 1853
1854 1854 if match.always():
1855 1855 # No files, no patterns. Display all revs.
1856 1856 wanted = revs
1857 1857 elif not slowpath:
1858 1858 # We only have to read through the filelog to find wanted revisions
1859 1859
1860 1860 try:
1861 1861 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1862 1862 except FileWalkError:
1863 1863 slowpath = True
1864 1864
1865 1865 # We decided to fall back to the slowpath because at least one
1866 1866 # of the paths was not a file. Check to see if at least one of them
1867 1867 # existed in history, otherwise simply return
1868 1868 for path in match.files():
1869 1869 if path == '.' or path in repo.store:
1870 1870 break
1871 1871 else:
1872 1872 return []
1873 1873
1874 1874 if slowpath:
1875 1875 # We have to read the changelog to match filenames against
1876 1876 # changed files
1877 1877
1878 1878 if follow:
1879 1879 raise error.Abort(_('can only follow copies/renames for explicit '
1880 1880 'filenames'))
1881 1881
1882 1882 # The slow path checks files modified in every changeset.
1883 1883 # This is really slow on large repos, so compute the set lazily.
1884 1884 class lazywantedset(object):
1885 1885 def __init__(self):
1886 1886 self.set = set()
1887 1887 self.revs = set(revs)
1888 1888
1889 1889 # No need to worry about locality here because it will be accessed
1890 1890 # in the same order as the increasing window below.
1891 1891 def __contains__(self, value):
1892 1892 if value in self.set:
1893 1893 return True
1894 1894 elif not value in self.revs:
1895 1895 return False
1896 1896 else:
1897 1897 self.revs.discard(value)
1898 1898 ctx = change(value)
1899 1899 matches = filter(match, ctx.files())
1900 1900 if matches:
1901 1901 fncache[value] = matches
1902 1902 self.set.add(value)
1903 1903 return True
1904 1904 return False
1905 1905
1906 1906 def discard(self, value):
1907 1907 self.revs.discard(value)
1908 1908 self.set.discard(value)
1909 1909
1910 1910 wanted = lazywantedset()
1911 1911
1912 1912 # it might be worthwhile to do this in the iterator if the rev range
1913 1913 # is descending and the prune args are all within that range
1914 1914 for rev in opts.get('prune', ()):
1915 1915 rev = repo[rev].rev()
1916 1916 ff = _followfilter(repo)
1917 1917 stop = min(revs[0], revs[-1])
1918 1918 for x in xrange(rev, stop - 1, -1):
1919 1919 if ff.match(x):
1920 1920 wanted = wanted - [x]
1921 1921
1922 1922 # Now that wanted is correctly initialized, we can iterate over the
1923 1923 # revision range, yielding only revisions in wanted.
1924 1924 def iterate():
1925 1925 if follow and match.always():
1926 1926 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1927 1927 def want(rev):
1928 1928 return ff.match(rev) and rev in wanted
1929 1929 else:
1930 1930 def want(rev):
1931 1931 return rev in wanted
1932 1932
1933 1933 it = iter(revs)
1934 1934 stopiteration = False
1935 1935 for windowsize in increasingwindows():
1936 1936 nrevs = []
1937 1937 for i in xrange(windowsize):
1938 1938 rev = next(it, None)
1939 1939 if rev is None:
1940 1940 stopiteration = True
1941 1941 break
1942 1942 elif want(rev):
1943 1943 nrevs.append(rev)
1944 1944 for rev in sorted(nrevs):
1945 1945 fns = fncache.get(rev)
1946 1946 ctx = change(rev)
1947 1947 if not fns:
1948 1948 def fns_generator():
1949 1949 for f in ctx.files():
1950 1950 if match(f):
1951 1951 yield f
1952 1952 fns = fns_generator()
1953 1953 prepare(ctx, fns)
1954 1954 for rev in nrevs:
1955 1955 yield change(rev)
1956 1956
1957 1957 if stopiteration:
1958 1958 break
1959 1959
1960 1960 return iterate()
1961 1961
1962 1962 def _makefollowlogfilematcher(repo, files, followfirst):
1963 1963 # When displaying a revision with --patch --follow FILE, we have
1964 1964 # to know which file of the revision must be diffed. With
1965 1965 # --follow, we want the names of the ancestors of FILE in the
1966 1966 # revision, stored in "fcache". "fcache" is populated by
1967 1967 # reproducing the graph traversal already done by --follow revset
1968 1968 # and relating revs to file names (which is not "correct" but
1969 1969 # good enough).
1970 1970 fcache = {}
1971 1971 fcacheready = [False]
1972 1972 pctx = repo['.']
1973 1973
1974 1974 def populate():
1975 1975 for fn in files:
1976 1976 fctx = pctx[fn]
1977 1977 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
1978 1978 for c in fctx.ancestors(followfirst=followfirst):
1979 1979 fcache.setdefault(c.rev(), set()).add(c.path())
1980 1980
1981 1981 def filematcher(rev):
1982 1982 if not fcacheready[0]:
1983 1983 # Lazy initialization
1984 1984 fcacheready[0] = True
1985 1985 populate()
1986 1986 return scmutil.matchfiles(repo, fcache.get(rev, []))
1987 1987
1988 1988 return filematcher
1989 1989
1990 1990 def _makenofollowlogfilematcher(repo, pats, opts):
1991 1991 '''hook for extensions to override the filematcher for non-follow cases'''
1992 1992 return None
1993 1993
1994 1994 def _makelogrevset(repo, pats, opts, revs):
1995 1995 """Return (expr, filematcher) where expr is a revset string built
1996 1996 from log options and file patterns or None. If --stat or --patch
1997 1997 are not passed filematcher is None. Otherwise it is a callable
1998 1998 taking a revision number and returning a match objects filtering
1999 1999 the files to be detailed when displaying the revision.
2000 2000 """
2001 2001 opt2revset = {
2002 2002 'no_merges': ('not merge()', None),
2003 2003 'only_merges': ('merge()', None),
2004 2004 '_ancestors': ('ancestors(%(val)s)', None),
2005 2005 '_fancestors': ('_firstancestors(%(val)s)', None),
2006 2006 '_descendants': ('descendants(%(val)s)', None),
2007 2007 '_fdescendants': ('_firstdescendants(%(val)s)', None),
2008 2008 '_matchfiles': ('_matchfiles(%(val)s)', None),
2009 2009 'date': ('date(%(val)r)', None),
2010 2010 'branch': ('branch(%(val)r)', ' or '),
2011 2011 '_patslog': ('filelog(%(val)r)', ' or '),
2012 2012 '_patsfollow': ('follow(%(val)r)', ' or '),
2013 2013 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
2014 2014 'keyword': ('keyword(%(val)r)', ' or '),
2015 2015 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
2016 2016 'user': ('user(%(val)r)', ' or '),
2017 2017 }
2018 2018
2019 2019 opts = dict(opts)
2020 2020 # follow or not follow?
2021 2021 follow = opts.get('follow') or opts.get('follow_first')
2022 2022 if opts.get('follow_first'):
2023 2023 followfirst = 1
2024 2024 else:
2025 2025 followfirst = 0
2026 2026 # --follow with FILE behavior depends on revs...
2027 2027 it = iter(revs)
2028 2028 startrev = next(it)
2029 2029 followdescendants = startrev < next(it, startrev)
2030 2030
2031 2031 # branch and only_branch are really aliases and must be handled at
2032 2032 # the same time
2033 2033 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2034 2034 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2035 2035 # pats/include/exclude are passed to match.match() directly in
2036 2036 # _matchfiles() revset but walkchangerevs() builds its matcher with
2037 2037 # scmutil.match(). The difference is input pats are globbed on
2038 2038 # platforms without shell expansion (windows).
2039 2039 wctx = repo[None]
2040 2040 match, pats = scmutil.matchandpats(wctx, pats, opts)
2041 2041 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2042 2042 opts.get('removed'))
2043 2043 if not slowpath:
2044 2044 for f in match.files():
2045 2045 if follow and f not in wctx:
2046 2046 # If the file exists, it may be a directory, so let it
2047 2047 # take the slow path.
2048 2048 if os.path.exists(repo.wjoin(f)):
2049 2049 slowpath = True
2050 2050 continue
2051 2051 else:
2052 2052 raise error.Abort(_('cannot follow file not in parent '
2053 2053 'revision: "%s"') % f)
2054 2054 filelog = repo.file(f)
2055 2055 if not filelog:
2056 2056 # A zero count may be a directory or deleted file, so
2057 2057 # try to find matching entries on the slow path.
2058 2058 if follow:
2059 2059 raise error.Abort(
2060 2060 _('cannot follow nonexistent file: "%s"') % f)
2061 2061 slowpath = True
2062 2062
2063 2063 # We decided to fall back to the slowpath because at least one
2064 2064 # of the paths was not a file. Check to see if at least one of them
2065 2065 # existed in history - in that case, we'll continue down the
2066 2066 # slowpath; otherwise, we can turn off the slowpath
2067 2067 if slowpath:
2068 2068 for path in match.files():
2069 2069 if path == '.' or path in repo.store:
2070 2070 break
2071 2071 else:
2072 2072 slowpath = False
2073 2073
2074 2074 fpats = ('_patsfollow', '_patsfollowfirst')
2075 2075 fnopats = (('_ancestors', '_fancestors'),
2076 2076 ('_descendants', '_fdescendants'))
2077 2077 if slowpath:
2078 2078 # See walkchangerevs() slow path.
2079 2079 #
2080 2080 # pats/include/exclude cannot be represented as separate
2081 2081 # revset expressions as their filtering logic applies at file
2082 2082 # level. For instance "-I a -X a" matches a revision touching
2083 2083 # "a" and "b" while "file(a) and not file(b)" does
2084 2084 # not. Besides, filesets are evaluated against the working
2085 2085 # directory.
2086 2086 matchargs = ['r:', 'd:relpath']
2087 2087 for p in pats:
2088 2088 matchargs.append('p:' + p)
2089 2089 for p in opts.get('include', []):
2090 2090 matchargs.append('i:' + p)
2091 2091 for p in opts.get('exclude', []):
2092 2092 matchargs.append('x:' + p)
2093 2093 matchargs = ','.join(('%r' % p) for p in matchargs)
2094 2094 opts['_matchfiles'] = matchargs
2095 2095 if follow:
2096 2096 opts[fnopats[0][followfirst]] = '.'
2097 2097 else:
2098 2098 if follow:
2099 2099 if pats:
2100 2100 # follow() revset interprets its file argument as a
2101 2101 # manifest entry, so use match.files(), not pats.
2102 2102 opts[fpats[followfirst]] = list(match.files())
2103 2103 else:
2104 2104 op = fnopats[followdescendants][followfirst]
2105 2105 opts[op] = 'rev(%d)' % startrev
2106 2106 else:
2107 2107 opts['_patslog'] = list(pats)
2108 2108
2109 2109 filematcher = None
2110 2110 if opts.get('patch') or opts.get('stat'):
2111 2111 # When following files, track renames via a special matcher.
2112 2112 # If we're forced to take the slowpath it means we're following
2113 2113 # at least one pattern/directory, so don't bother with rename tracking.
2114 2114 if follow and not match.always() and not slowpath:
2115 2115 # _makefollowlogfilematcher expects its files argument to be
2116 2116 # relative to the repo root, so use match.files(), not pats.
2117 2117 filematcher = _makefollowlogfilematcher(repo, match.files(),
2118 2118 followfirst)
2119 2119 else:
2120 2120 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2121 2121 if filematcher is None:
2122 2122 filematcher = lambda rev: match
2123 2123
2124 2124 expr = []
2125 2125 for op, val in sorted(opts.iteritems()):
2126 2126 if not val:
2127 2127 continue
2128 2128 if op not in opt2revset:
2129 2129 continue
2130 2130 revop, andor = opt2revset[op]
2131 2131 if '%(val)' not in revop:
2132 2132 expr.append(revop)
2133 2133 else:
2134 2134 if not isinstance(val, list):
2135 2135 e = revop % {'val': val}
2136 2136 else:
2137 2137 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2138 2138 expr.append(e)
2139 2139
2140 2140 if expr:
2141 2141 expr = '(' + ' and '.join(expr) + ')'
2142 2142 else:
2143 2143 expr = None
2144 2144 return expr, filematcher
2145 2145
2146 2146 def _logrevs(repo, opts):
2147 2147 # Default --rev value depends on --follow but --follow behavior
2148 2148 # depends on revisions resolved from --rev...
2149 2149 follow = opts.get('follow') or opts.get('follow_first')
2150 2150 if opts.get('rev'):
2151 2151 revs = scmutil.revrange(repo, opts['rev'])
2152 2152 elif follow and repo.dirstate.p1() == nullid:
2153 2153 revs = revset.baseset()
2154 2154 elif follow:
2155 2155 revs = repo.revs('reverse(:.)')
2156 2156 else:
2157 2157 revs = revset.spanset(repo)
2158 2158 revs.reverse()
2159 2159 return revs
2160 2160
2161 2161 def getgraphlogrevs(repo, pats, opts):
2162 2162 """Return (revs, expr, filematcher) where revs is an iterable of
2163 2163 revision numbers, expr is a revset string built from log options
2164 2164 and file patterns or None, and used to filter 'revs'. If --stat or
2165 2165 --patch are not passed filematcher is None. Otherwise it is a
2166 2166 callable taking a revision number and returning a match objects
2167 2167 filtering the files to be detailed when displaying the revision.
2168 2168 """
2169 2169 limit = loglimit(opts)
2170 2170 revs = _logrevs(repo, opts)
2171 2171 if not revs:
2172 2172 return revset.baseset(), None, None
2173 2173 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2174 2174 if opts.get('rev'):
2175 2175 # User-specified revs might be unsorted, but don't sort before
2176 2176 # _makelogrevset because it might depend on the order of revs
2177 2177 if not (revs.isdescending() or revs.istopo()):
2178 2178 revs.sort(reverse=True)
2179 2179 if expr:
2180 2180 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2181 2181 revs = matcher(repo, revs)
2182 2182 if limit is not None:
2183 2183 limitedrevs = []
2184 2184 for idx, rev in enumerate(revs):
2185 2185 if idx >= limit:
2186 2186 break
2187 2187 limitedrevs.append(rev)
2188 2188 revs = revset.baseset(limitedrevs)
2189 2189
2190 2190 return revs, expr, filematcher
2191 2191
2192 2192 def getlogrevs(repo, pats, opts):
2193 2193 """Return (revs, expr, filematcher) where revs is an iterable of
2194 2194 revision numbers, expr is a revset string built from log options
2195 2195 and file patterns or None, and used to filter 'revs'. If --stat or
2196 2196 --patch are not passed filematcher is None. Otherwise it is a
2197 2197 callable taking a revision number and returning a match objects
2198 2198 filtering the files to be detailed when displaying the revision.
2199 2199 """
2200 2200 limit = loglimit(opts)
2201 2201 revs = _logrevs(repo, opts)
2202 2202 if not revs:
2203 2203 return revset.baseset([]), None, None
2204 2204 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2205 2205 if expr:
2206 2206 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2207 2207 revs = matcher(repo, revs)
2208 2208 if limit is not None:
2209 2209 limitedrevs = []
2210 2210 for idx, r in enumerate(revs):
2211 2211 if limit <= idx:
2212 2212 break
2213 2213 limitedrevs.append(r)
2214 2214 revs = revset.baseset(limitedrevs)
2215 2215
2216 2216 return revs, expr, filematcher
2217 2217
2218 2218 def _graphnodeformatter(ui, displayer):
2219 2219 spec = ui.config('ui', 'graphnodetemplate')
2220 2220 if not spec:
2221 2221 return templatekw.showgraphnode # fast path for "{graphnode}"
2222 2222
2223 2223 templ = formatter.gettemplater(ui, 'graphnode', spec)
2224 2224 cache = {}
2225 2225 if isinstance(displayer, changeset_templater):
2226 2226 cache = displayer.cache # reuse cache of slow templates
2227 2227 props = templatekw.keywords.copy()
2228 2228 props['templ'] = templ
2229 2229 props['cache'] = cache
2230 2230 def formatnode(repo, ctx):
2231 2231 props['ctx'] = ctx
2232 2232 props['repo'] = repo
2233 2233 props['ui'] = repo.ui
2234 2234 props['revcache'] = {}
2235 2235 return templater.stringify(templ('graphnode', **props))
2236 2236 return formatnode
2237 2237
2238 2238 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2239 2239 filematcher=None):
2240 2240 formatnode = _graphnodeformatter(ui, displayer)
2241 2241 state = graphmod.asciistate()
2242 2242 styles = state['styles']
2243 2243
2244 2244 # only set graph styling if HGPLAIN is not set.
2245 2245 if ui.plain('graph'):
2246 2246 # set all edge styles to |, the default pre-3.8 behaviour
2247 2247 styles.update(dict.fromkeys(styles, '|'))
2248 2248 else:
2249 2249 edgetypes = {
2250 2250 'parent': graphmod.PARENT,
2251 2251 'grandparent': graphmod.GRANDPARENT,
2252 2252 'missing': graphmod.MISSINGPARENT
2253 2253 }
2254 2254 for name, key in edgetypes.items():
2255 2255 # experimental config: experimental.graphstyle.*
2256 2256 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2257 2257 styles[key])
2258 2258 if not styles[key]:
2259 2259 styles[key] = None
2260 2260
2261 2261 # experimental config: experimental.graphshorten
2262 2262 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2263 2263
2264 2264 for rev, type, ctx, parents in dag:
2265 2265 char = formatnode(repo, ctx)
2266 2266 copies = None
2267 2267 if getrenamed and ctx.rev():
2268 2268 copies = []
2269 2269 for fn in ctx.files():
2270 2270 rename = getrenamed(fn, ctx.rev())
2271 2271 if rename:
2272 2272 copies.append((fn, rename[0]))
2273 2273 revmatchfn = None
2274 2274 if filematcher is not None:
2275 2275 revmatchfn = filematcher(ctx.rev())
2276 2276 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2277 2277 lines = displayer.hunk.pop(rev).split('\n')
2278 2278 if not lines[-1]:
2279 2279 del lines[-1]
2280 2280 displayer.flush(ctx)
2281 2281 edges = edgefn(type, char, lines, state, rev, parents)
2282 2282 for type, char, lines, coldata in edges:
2283 2283 graphmod.ascii(ui, state, type, char, lines, coldata)
2284 2284 displayer.close()
2285 2285
2286 2286 def graphlog(ui, repo, *pats, **opts):
2287 2287 # Parameters are identical to log command ones
2288 2288 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2289 2289 revdag = graphmod.dagwalker(repo, revs)
2290 2290
2291 2291 getrenamed = None
2292 2292 if opts.get('copies'):
2293 2293 endrev = None
2294 2294 if opts.get('rev'):
2295 2295 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2296 2296 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2297 2297 displayer = show_changeset(ui, repo, opts, buffered=True)
2298 2298 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2299 2299 filematcher)
2300 2300
2301 2301 def checkunsupportedgraphflags(pats, opts):
2302 2302 for op in ["newest_first"]:
2303 2303 if op in opts and opts[op]:
2304 2304 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2305 2305 % op.replace("_", "-"))
2306 2306
2307 2307 def graphrevs(repo, nodes, opts):
2308 2308 limit = loglimit(opts)
2309 2309 nodes.reverse()
2310 2310 if limit is not None:
2311 2311 nodes = nodes[:limit]
2312 2312 return graphmod.nodes(repo, nodes)
2313 2313
2314 2314 def add(ui, repo, match, prefix, explicitonly, **opts):
2315 2315 join = lambda f: os.path.join(prefix, f)
2316 2316 bad = []
2317 2317
2318 2318 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2319 2319 names = []
2320 2320 wctx = repo[None]
2321 2321 cca = None
2322 2322 abort, warn = scmutil.checkportabilityalert(ui)
2323 2323 if abort or warn:
2324 2324 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2325 2325
2326 2326 badmatch = matchmod.badmatch(match, badfn)
2327 2327 dirstate = repo.dirstate
2328 2328 # We don't want to just call wctx.walk here, since it would return a lot of
2329 2329 # clean files, which we aren't interested in and takes time.
2330 2330 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2331 2331 True, False, full=False)):
2332 2332 exact = match.exact(f)
2333 2333 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2334 2334 if cca:
2335 2335 cca(f)
2336 2336 names.append(f)
2337 2337 if ui.verbose or not exact:
2338 2338 ui.status(_('adding %s\n') % match.rel(f))
2339 2339
2340 2340 for subpath in sorted(wctx.substate):
2341 2341 sub = wctx.sub(subpath)
2342 2342 try:
2343 2343 submatch = matchmod.subdirmatcher(subpath, match)
2344 2344 if opts.get('subrepos'):
2345 2345 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2346 2346 else:
2347 2347 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2348 2348 except error.LookupError:
2349 2349 ui.status(_("skipping missing subrepository: %s\n")
2350 2350 % join(subpath))
2351 2351
2352 2352 if not opts.get('dry_run'):
2353 2353 rejected = wctx.add(names, prefix)
2354 2354 bad.extend(f for f in rejected if f in match.files())
2355 2355 return bad
2356 2356
2357 2357 def forget(ui, repo, match, prefix, explicitonly):
2358 2358 join = lambda f: os.path.join(prefix, f)
2359 2359 bad = []
2360 2360 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2361 2361 wctx = repo[None]
2362 2362 forgot = []
2363 2363
2364 2364 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2365 2365 forget = sorted(s[0] + s[1] + s[3] + s[6])
2366 2366 if explicitonly:
2367 2367 forget = [f for f in forget if match.exact(f)]
2368 2368
2369 2369 for subpath in sorted(wctx.substate):
2370 2370 sub = wctx.sub(subpath)
2371 2371 try:
2372 2372 submatch = matchmod.subdirmatcher(subpath, match)
2373 2373 subbad, subforgot = sub.forget(submatch, prefix)
2374 2374 bad.extend([subpath + '/' + f for f in subbad])
2375 2375 forgot.extend([subpath + '/' + f for f in subforgot])
2376 2376 except error.LookupError:
2377 2377 ui.status(_("skipping missing subrepository: %s\n")
2378 2378 % join(subpath))
2379 2379
2380 2380 if not explicitonly:
2381 2381 for f in match.files():
2382 2382 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2383 2383 if f not in forgot:
2384 2384 if repo.wvfs.exists(f):
2385 2385 # Don't complain if the exact case match wasn't given.
2386 2386 # But don't do this until after checking 'forgot', so
2387 2387 # that subrepo files aren't normalized, and this op is
2388 2388 # purely from data cached by the status walk above.
2389 2389 if repo.dirstate.normalize(f) in repo.dirstate:
2390 2390 continue
2391 2391 ui.warn(_('not removing %s: '
2392 2392 'file is already untracked\n')
2393 2393 % match.rel(f))
2394 2394 bad.append(f)
2395 2395
2396 2396 for f in forget:
2397 2397 if ui.verbose or not match.exact(f):
2398 2398 ui.status(_('removing %s\n') % match.rel(f))
2399 2399
2400 2400 rejected = wctx.forget(forget, prefix)
2401 2401 bad.extend(f for f in rejected if f in match.files())
2402 2402 forgot.extend(f for f in forget if f not in rejected)
2403 2403 return bad, forgot
2404 2404
2405 2405 def files(ui, ctx, m, fm, fmt, subrepos):
2406 2406 rev = ctx.rev()
2407 2407 ret = 1
2408 2408 ds = ctx.repo().dirstate
2409 2409
2410 2410 for f in ctx.matches(m):
2411 2411 if rev is None and ds[f] == 'r':
2412 2412 continue
2413 2413 fm.startitem()
2414 2414 if ui.verbose:
2415 2415 fc = ctx[f]
2416 2416 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2417 2417 fm.data(abspath=f)
2418 2418 fm.write('path', fmt, m.rel(f))
2419 2419 ret = 0
2420 2420
2421 2421 for subpath in sorted(ctx.substate):
2422 2422 submatch = matchmod.subdirmatcher(subpath, m)
2423 2423 if (subrepos or m.exact(subpath) or any(submatch.files())):
2424 2424 sub = ctx.sub(subpath)
2425 2425 try:
2426 2426 recurse = m.exact(subpath) or subrepos
2427 2427 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2428 2428 ret = 0
2429 2429 except error.LookupError:
2430 2430 ui.status(_("skipping missing subrepository: %s\n")
2431 2431 % m.abs(subpath))
2432 2432
2433 2433 return ret
2434 2434
2435 2435 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2436 2436 join = lambda f: os.path.join(prefix, f)
2437 2437 ret = 0
2438 2438 s = repo.status(match=m, clean=True)
2439 2439 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2440 2440
2441 2441 wctx = repo[None]
2442 2442
2443 2443 if warnings is None:
2444 2444 warnings = []
2445 2445 warn = True
2446 2446 else:
2447 2447 warn = False
2448 2448
2449 2449 subs = sorted(wctx.substate)
2450 2450 total = len(subs)
2451 2451 count = 0
2452 2452 for subpath in subs:
2453 2453 count += 1
2454 2454 submatch = matchmod.subdirmatcher(subpath, m)
2455 2455 if subrepos or m.exact(subpath) or any(submatch.files()):
2456 2456 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2457 2457 sub = wctx.sub(subpath)
2458 2458 try:
2459 2459 if sub.removefiles(submatch, prefix, after, force, subrepos,
2460 2460 warnings):
2461 2461 ret = 1
2462 2462 except error.LookupError:
2463 2463 warnings.append(_("skipping missing subrepository: %s\n")
2464 2464 % join(subpath))
2465 2465 ui.progress(_('searching'), None)
2466 2466
2467 2467 # warn about failure to delete explicit files/dirs
2468 2468 deleteddirs = util.dirs(deleted)
2469 2469 files = m.files()
2470 2470 total = len(files)
2471 2471 count = 0
2472 2472 for f in files:
2473 2473 def insubrepo():
2474 2474 for subpath in wctx.substate:
2475 2475 if f.startswith(subpath + '/'):
2476 2476 return True
2477 2477 return False
2478 2478
2479 2479 count += 1
2480 2480 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2481 2481 isdir = f in deleteddirs or wctx.hasdir(f)
2482 2482 if (f in repo.dirstate or isdir or f == '.'
2483 2483 or insubrepo() or f in subs):
2484 2484 continue
2485 2485
2486 2486 if repo.wvfs.exists(f):
2487 2487 if repo.wvfs.isdir(f):
2488 2488 warnings.append(_('not removing %s: no tracked files\n')
2489 2489 % m.rel(f))
2490 2490 else:
2491 2491 warnings.append(_('not removing %s: file is untracked\n')
2492 2492 % m.rel(f))
2493 2493 # missing files will generate a warning elsewhere
2494 2494 ret = 1
2495 2495 ui.progress(_('deleting'), None)
2496 2496
2497 2497 if force:
2498 2498 list = modified + deleted + clean + added
2499 2499 elif after:
2500 2500 list = deleted
2501 2501 remaining = modified + added + clean
2502 2502 total = len(remaining)
2503 2503 count = 0
2504 2504 for f in remaining:
2505 2505 count += 1
2506 2506 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2507 2507 warnings.append(_('not removing %s: file still exists\n')
2508 2508 % m.rel(f))
2509 2509 ret = 1
2510 2510 ui.progress(_('skipping'), None)
2511 2511 else:
2512 2512 list = deleted + clean
2513 2513 total = len(modified) + len(added)
2514 2514 count = 0
2515 2515 for f in modified:
2516 2516 count += 1
2517 2517 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2518 2518 warnings.append(_('not removing %s: file is modified (use -f'
2519 2519 ' to force removal)\n') % m.rel(f))
2520 2520 ret = 1
2521 2521 for f in added:
2522 2522 count += 1
2523 2523 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2524 2524 warnings.append(_("not removing %s: file has been marked for add"
2525 2525 " (use 'hg forget' to undo add)\n") % m.rel(f))
2526 2526 ret = 1
2527 2527 ui.progress(_('skipping'), None)
2528 2528
2529 2529 list = sorted(list)
2530 2530 total = len(list)
2531 2531 count = 0
2532 2532 for f in list:
2533 2533 count += 1
2534 2534 if ui.verbose or not m.exact(f):
2535 2535 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2536 2536 ui.status(_('removing %s\n') % m.rel(f))
2537 2537 ui.progress(_('deleting'), None)
2538 2538
2539 2539 with repo.wlock():
2540 2540 if not after:
2541 2541 for f in list:
2542 2542 if f in added:
2543 2543 continue # we never unlink added files on remove
2544 2544 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2545 2545 repo[None].forget(list)
2546 2546
2547 2547 if warn:
2548 2548 for warning in warnings:
2549 2549 ui.warn(warning)
2550 2550
2551 2551 return ret
2552 2552
2553 2553 def cat(ui, repo, ctx, matcher, prefix, **opts):
2554 2554 err = 1
2555 2555
2556 2556 def write(path):
2557 2557 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2558 2558 pathname=os.path.join(prefix, path))
2559 2559 data = ctx[path].data()
2560 2560 if opts.get('decode'):
2561 2561 data = repo.wwritedata(path, data)
2562 2562 fp.write(data)
2563 2563 fp.close()
2564 2564
2565 2565 # Automation often uses hg cat on single files, so special case it
2566 2566 # for performance to avoid the cost of parsing the manifest.
2567 2567 if len(matcher.files()) == 1 and not matcher.anypats():
2568 2568 file = matcher.files()[0]
2569 2569 mfl = repo.manifestlog
2570 2570 mfnode = ctx.manifestnode()
2571 2571 try:
2572 2572 if mfnode and mfl[mfnode].find(file)[0]:
2573 2573 write(file)
2574 2574 return 0
2575 2575 except KeyError:
2576 2576 pass
2577 2577
2578 2578 for abs in ctx.walk(matcher):
2579 2579 write(abs)
2580 2580 err = 0
2581 2581
2582 2582 for subpath in sorted(ctx.substate):
2583 2583 sub = ctx.sub(subpath)
2584 2584 try:
2585 2585 submatch = matchmod.subdirmatcher(subpath, matcher)
2586 2586
2587 2587 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2588 2588 **opts):
2589 2589 err = 0
2590 2590 except error.RepoLookupError:
2591 2591 ui.status(_("skipping missing subrepository: %s\n")
2592 2592 % os.path.join(prefix, subpath))
2593 2593
2594 2594 return err
2595 2595
2596 2596 def commit(ui, repo, commitfunc, pats, opts):
2597 2597 '''commit the specified files or all outstanding changes'''
2598 2598 date = opts.get('date')
2599 2599 if date:
2600 2600 opts['date'] = util.parsedate(date)
2601 2601 message = logmessage(ui, opts)
2602 2602 matcher = scmutil.match(repo[None], pats, opts)
2603 2603
2604 2604 # extract addremove carefully -- this function can be called from a command
2605 2605 # that doesn't support addremove
2606 2606 if opts.get('addremove'):
2607 2607 if scmutil.addremove(repo, matcher, "", opts) != 0:
2608 2608 raise error.Abort(
2609 2609 _("failed to mark all new/missing files as added/removed"))
2610 2610
2611 2611 return commitfunc(ui, repo, message, matcher, opts)
2612 2612
2613 2613 def samefile(f, ctx1, ctx2):
2614 2614 if f in ctx1.manifest():
2615 2615 a = ctx1.filectx(f)
2616 2616 if f in ctx2.manifest():
2617 2617 b = ctx2.filectx(f)
2618 2618 return (not a.cmp(b)
2619 2619 and a.flags() == b.flags())
2620 2620 else:
2621 2621 return False
2622 2622 else:
2623 2623 return f not in ctx2.manifest()
2624 2624
2625 2625 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2626 2626 # avoid cycle context -> subrepo -> cmdutil
2627 2627 from . import context
2628 2628
2629 2629 # amend will reuse the existing user if not specified, but the obsolete
2630 2630 # marker creation requires that the current user's name is specified.
2631 2631 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2632 2632 ui.username() # raise exception if username not set
2633 2633
2634 2634 ui.note(_('amending changeset %s\n') % old)
2635 2635 base = old.p1()
2636 2636 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2637 2637
2638 2638 wlock = lock = newid = None
2639 2639 try:
2640 2640 wlock = repo.wlock()
2641 2641 lock = repo.lock()
2642 2642 with repo.transaction('amend') as tr:
2643 2643 # See if we got a message from -m or -l, if not, open the editor
2644 2644 # with the message of the changeset to amend
2645 2645 message = logmessage(ui, opts)
2646 2646 # ensure logfile does not conflict with later enforcement of the
2647 2647 # message. potential logfile content has been processed by
2648 2648 # `logmessage` anyway.
2649 2649 opts.pop('logfile')
2650 2650 # First, do a regular commit to record all changes in the working
2651 2651 # directory (if there are any)
2652 2652 ui.callhooks = False
2653 2653 activebookmark = repo._bookmarks.active
2654 2654 try:
2655 2655 repo._bookmarks.active = None
2656 2656 opts['message'] = 'temporary amend commit for %s' % old
2657 2657 node = commit(ui, repo, commitfunc, pats, opts)
2658 2658 finally:
2659 2659 repo._bookmarks.active = activebookmark
2660 2660 repo._bookmarks.recordchange(tr)
2661 2661 ui.callhooks = True
2662 2662 ctx = repo[node]
2663 2663
2664 2664 # Participating changesets:
2665 2665 #
2666 2666 # node/ctx o - new (intermediate) commit that contains changes
2667 2667 # | from working dir to go into amending commit
2668 2668 # | (or a workingctx if there were no changes)
2669 2669 # |
2670 2670 # old o - changeset to amend
2671 2671 # |
2672 2672 # base o - parent of amending changeset
2673 2673
2674 2674 # Update extra dict from amended commit (e.g. to preserve graft
2675 2675 # source)
2676 2676 extra.update(old.extra())
2677 2677
2678 2678 # Also update it from the intermediate commit or from the wctx
2679 2679 extra.update(ctx.extra())
2680 2680
2681 2681 if len(old.parents()) > 1:
2682 2682 # ctx.files() isn't reliable for merges, so fall back to the
2683 2683 # slower repo.status() method
2684 2684 files = set([fn for st in repo.status(base, old)[:3]
2685 2685 for fn in st])
2686 2686 else:
2687 2687 files = set(old.files())
2688 2688
2689 2689 # Second, we use either the commit we just did, or if there were no
2690 2690 # changes the parent of the working directory as the version of the
2691 2691 # files in the final amend commit
2692 2692 if node:
2693 2693 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2694 2694
2695 2695 user = ctx.user()
2696 2696 date = ctx.date()
2697 2697 # Recompute copies (avoid recording a -> b -> a)
2698 2698 copied = copies.pathcopies(base, ctx)
2699 2699 if old.p2:
2700 2700 copied.update(copies.pathcopies(old.p2(), ctx))
2701 2701
2702 2702 # Prune files which were reverted by the updates: if old
2703 2703 # introduced file X and our intermediate commit, node,
2704 2704 # renamed that file, then those two files are the same and
2705 2705 # we can discard X from our list of files. Likewise if X
2706 2706 # was deleted, it's no longer relevant
2707 2707 files.update(ctx.files())
2708 2708 files = [f for f in files if not samefile(f, ctx, base)]
2709 2709
2710 2710 def filectxfn(repo, ctx_, path):
2711 2711 try:
2712 2712 fctx = ctx[path]
2713 2713 flags = fctx.flags()
2714 2714 mctx = context.memfilectx(repo,
2715 2715 fctx.path(), fctx.data(),
2716 2716 islink='l' in flags,
2717 2717 isexec='x' in flags,
2718 2718 copied=copied.get(path))
2719 2719 return mctx
2720 2720 except KeyError:
2721 2721 return None
2722 2722 else:
2723 2723 ui.note(_('copying changeset %s to %s\n') % (old, base))
2724 2724
2725 2725 # Use version of files as in the old cset
2726 2726 def filectxfn(repo, ctx_, path):
2727 2727 try:
2728 2728 return old.filectx(path)
2729 2729 except KeyError:
2730 2730 return None
2731 2731
2732 2732 user = opts.get('user') or old.user()
2733 2733 date = opts.get('date') or old.date()
2734 2734 editform = mergeeditform(old, 'commit.amend')
2735 2735 editor = getcommiteditor(editform=editform, **opts)
2736 2736 if not message:
2737 2737 editor = getcommiteditor(edit=True, editform=editform)
2738 2738 message = old.description()
2739 2739
2740 2740 pureextra = extra.copy()
2741 2741 extra['amend_source'] = old.hex()
2742 2742
2743 2743 new = context.memctx(repo,
2744 2744 parents=[base.node(), old.p2().node()],
2745 2745 text=message,
2746 2746 files=files,
2747 2747 filectxfn=filectxfn,
2748 2748 user=user,
2749 2749 date=date,
2750 2750 extra=extra,
2751 2751 editor=editor)
2752 2752
2753 2753 newdesc = changelog.stripdesc(new.description())
2754 2754 if ((not node)
2755 2755 and newdesc == old.description()
2756 2756 and user == old.user()
2757 2757 and date == old.date()
2758 2758 and pureextra == old.extra()):
2759 2759 # nothing changed. continuing here would create a new node
2760 2760 # anyway because of the amend_source noise.
2761 2761 #
2762 2762 # This not what we expect from amend.
2763 2763 return old.node()
2764 2764
2765 2765 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2766 2766 try:
2767 2767 if opts.get('secret'):
2768 2768 commitphase = 'secret'
2769 2769 else:
2770 2770 commitphase = old.phase()
2771 2771 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2772 2772 newid = repo.commitctx(new)
2773 2773 finally:
2774 2774 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2775 2775 if newid != old.node():
2776 2776 # Reroute the working copy parent to the new changeset
2777 2777 repo.setparents(newid, nullid)
2778 2778
2779 2779 # Move bookmarks from old parent to amend commit
2780 2780 bms = repo.nodebookmarks(old.node())
2781 2781 if bms:
2782 2782 marks = repo._bookmarks
2783 2783 for bm in bms:
2784 2784 ui.debug('moving bookmarks %r from %s to %s\n' %
2785 2785 (marks, old.hex(), hex(newid)))
2786 2786 marks[bm] = newid
2787 2787 marks.recordchange(tr)
2788 2788 #commit the whole amend process
2789 2789 if createmarkers:
2790 2790 # mark the new changeset as successor of the rewritten one
2791 2791 new = repo[newid]
2792 2792 obs = [(old, (new,))]
2793 2793 if node:
2794 2794 obs.append((ctx, ()))
2795 2795
2796 2796 obsolete.createmarkers(repo, obs)
2797 2797 if not createmarkers and newid != old.node():
2798 2798 # Strip the intermediate commit (if there was one) and the amended
2799 2799 # commit
2800 2800 if node:
2801 2801 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2802 2802 ui.note(_('stripping amended changeset %s\n') % old)
2803 2803 repair.strip(ui, repo, old.node(), topic='amend-backup')
2804 2804 finally:
2805 2805 lockmod.release(lock, wlock)
2806 2806 return newid
2807 2807
2808 2808 def commiteditor(repo, ctx, subs, editform=''):
2809 2809 if ctx.description():
2810 2810 return ctx.description()
2811 2811 return commitforceeditor(repo, ctx, subs, editform=editform,
2812 2812 unchangedmessagedetection=True)
2813 2813
2814 2814 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2815 2815 editform='', unchangedmessagedetection=False):
2816 2816 if not extramsg:
2817 2817 extramsg = _("Leave message empty to abort commit.")
2818 2818
2819 2819 forms = [e for e in editform.split('.') if e]
2820 2820 forms.insert(0, 'changeset')
2821 2821 templatetext = None
2822 2822 while forms:
2823 2823 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2824 2824 if tmpl:
2825 2825 templatetext = committext = buildcommittemplate(
2826 2826 repo, ctx, subs, extramsg, tmpl)
2827 2827 break
2828 2828 forms.pop()
2829 2829 else:
2830 2830 committext = buildcommittext(repo, ctx, subs, extramsg)
2831 2831
2832 2832 # run editor in the repository root
2833 2833 olddir = os.getcwd()
2834 2834 os.chdir(repo.root)
2835 2835
2836 2836 # make in-memory changes visible to external process
2837 2837 tr = repo.currenttransaction()
2838 2838 repo.dirstate.write(tr)
2839 2839 pending = tr and tr.writepending() and repo.root
2840 2840
2841 2841 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2842 2842 editform=editform, pending=pending)
2843 2843 text = re.sub("(?m)^HG:.*(\n|$)", "", editortext)
2844 2844 os.chdir(olddir)
2845 2845
2846 2846 if finishdesc:
2847 2847 text = finishdesc(text)
2848 2848 if not text.strip():
2849 2849 raise error.Abort(_("empty commit message"))
2850 2850 if unchangedmessagedetection and editortext == templatetext:
2851 2851 raise error.Abort(_("commit message unchanged"))
2852 2852
2853 2853 return text
2854 2854
2855 2855 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2856 2856 ui = repo.ui
2857 2857 tmpl, mapfile = gettemplate(ui, tmpl, None)
2858 2858
2859 2859 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2860 2860
2861 2861 for k, v in repo.ui.configitems('committemplate'):
2862 2862 if k != 'changeset':
2863 2863 t.t.cache[k] = v
2864 2864
2865 2865 if not extramsg:
2866 2866 extramsg = '' # ensure that extramsg is string
2867 2867
2868 2868 ui.pushbuffer()
2869 2869 t.show(ctx, extramsg=extramsg)
2870 2870 return ui.popbuffer()
2871 2871
2872 2872 def hgprefix(msg):
2873 2873 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2874 2874
2875 2875 def buildcommittext(repo, ctx, subs, extramsg):
2876 2876 edittext = []
2877 2877 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2878 2878 if ctx.description():
2879 2879 edittext.append(ctx.description())
2880 2880 edittext.append("")
2881 2881 edittext.append("") # Empty line between message and comments.
2882 2882 edittext.append(hgprefix(_("Enter commit message."
2883 2883 " Lines beginning with 'HG:' are removed.")))
2884 2884 edittext.append(hgprefix(extramsg))
2885 2885 edittext.append("HG: --")
2886 2886 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2887 2887 if ctx.p2():
2888 2888 edittext.append(hgprefix(_("branch merge")))
2889 2889 if ctx.branch():
2890 2890 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2891 2891 if bookmarks.isactivewdirparent(repo):
2892 2892 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2893 2893 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2894 2894 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2895 2895 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2896 2896 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2897 2897 if not added and not modified and not removed:
2898 2898 edittext.append(hgprefix(_("no files changed")))
2899 2899 edittext.append("")
2900 2900
2901 2901 return "\n".join(edittext)
2902 2902
2903 2903 def commitstatus(repo, node, branch, bheads=None, opts=None):
2904 2904 if opts is None:
2905 2905 opts = {}
2906 2906 ctx = repo[node]
2907 2907 parents = ctx.parents()
2908 2908
2909 2909 if (not opts.get('amend') and bheads and node not in bheads and not
2910 2910 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2911 2911 repo.ui.status(_('created new head\n'))
2912 2912 # The message is not printed for initial roots. For the other
2913 2913 # changesets, it is printed in the following situations:
2914 2914 #
2915 2915 # Par column: for the 2 parents with ...
2916 2916 # N: null or no parent
2917 2917 # B: parent is on another named branch
2918 2918 # C: parent is a regular non head changeset
2919 2919 # H: parent was a branch head of the current branch
2920 2920 # Msg column: whether we print "created new head" message
2921 2921 # In the following, it is assumed that there already exists some
2922 2922 # initial branch heads of the current branch, otherwise nothing is
2923 2923 # printed anyway.
2924 2924 #
2925 2925 # Par Msg Comment
2926 2926 # N N y additional topo root
2927 2927 #
2928 2928 # B N y additional branch root
2929 2929 # C N y additional topo head
2930 2930 # H N n usual case
2931 2931 #
2932 2932 # B B y weird additional branch root
2933 2933 # C B y branch merge
2934 2934 # H B n merge with named branch
2935 2935 #
2936 2936 # C C y additional head from merge
2937 2937 # C H n merge with a head
2938 2938 #
2939 2939 # H H n head merge: head count decreases
2940 2940
2941 2941 if not opts.get('close_branch'):
2942 2942 for r in parents:
2943 2943 if r.closesbranch() and r.branch() == branch:
2944 2944 repo.ui.status(_('reopening closed branch head %d\n') % r)
2945 2945
2946 2946 if repo.ui.debugflag:
2947 2947 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2948 2948 elif repo.ui.verbose:
2949 2949 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2950 2950
2951 2951 def postcommitstatus(repo, pats, opts):
2952 2952 return repo.status(match=scmutil.match(repo[None], pats, opts))
2953 2953
2954 2954 def revert(ui, repo, ctx, parents, *pats, **opts):
2955 2955 parent, p2 = parents
2956 2956 node = ctx.node()
2957 2957
2958 2958 mf = ctx.manifest()
2959 2959 if node == p2:
2960 2960 parent = p2
2961 2961
2962 2962 # need all matching names in dirstate and manifest of target rev,
2963 2963 # so have to walk both. do not print errors if files exist in one
2964 2964 # but not other. in both cases, filesets should be evaluated against
2965 2965 # workingctx to get consistent result (issue4497). this means 'set:**'
2966 2966 # cannot be used to select missing files from target rev.
2967 2967
2968 2968 # `names` is a mapping for all elements in working copy and target revision
2969 2969 # The mapping is in the form:
2970 2970 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2971 2971 names = {}
2972 2972
2973 2973 with repo.wlock():
2974 2974 ## filling of the `names` mapping
2975 2975 # walk dirstate to fill `names`
2976 2976
2977 2977 interactive = opts.get('interactive', False)
2978 2978 wctx = repo[None]
2979 2979 m = scmutil.match(wctx, pats, opts)
2980 2980
2981 2981 # we'll need this later
2982 2982 targetsubs = sorted(s for s in wctx.substate if m(s))
2983 2983
2984 2984 if not m.always():
2985 2985 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2986 2986 names[abs] = m.rel(abs), m.exact(abs)
2987 2987
2988 2988 # walk target manifest to fill `names`
2989 2989
2990 2990 def badfn(path, msg):
2991 2991 if path in names:
2992 2992 return
2993 2993 if path in ctx.substate:
2994 2994 return
2995 2995 path_ = path + '/'
2996 2996 for f in names:
2997 2997 if f.startswith(path_):
2998 2998 return
2999 2999 ui.warn("%s: %s\n" % (m.rel(path), msg))
3000 3000
3001 3001 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3002 3002 if abs not in names:
3003 3003 names[abs] = m.rel(abs), m.exact(abs)
3004 3004
3005 3005 # Find status of all file in `names`.
3006 3006 m = scmutil.matchfiles(repo, names)
3007 3007
3008 3008 changes = repo.status(node1=node, match=m,
3009 3009 unknown=True, ignored=True, clean=True)
3010 3010 else:
3011 3011 changes = repo.status(node1=node, match=m)
3012 3012 for kind in changes:
3013 3013 for abs in kind:
3014 3014 names[abs] = m.rel(abs), m.exact(abs)
3015 3015
3016 3016 m = scmutil.matchfiles(repo, names)
3017 3017
3018 3018 modified = set(changes.modified)
3019 3019 added = set(changes.added)
3020 3020 removed = set(changes.removed)
3021 3021 _deleted = set(changes.deleted)
3022 3022 unknown = set(changes.unknown)
3023 3023 unknown.update(changes.ignored)
3024 3024 clean = set(changes.clean)
3025 3025 modadded = set()
3026 3026
3027 3027 # split between files known in target manifest and the others
3028 3028 smf = set(mf)
3029 3029
3030 3030 # determine the exact nature of the deleted changesets
3031 3031 deladded = _deleted - smf
3032 3032 deleted = _deleted - deladded
3033 3033
3034 3034 # We need to account for the state of the file in the dirstate,
3035 3035 # even when we revert against something else than parent. This will
3036 3036 # slightly alter the behavior of revert (doing back up or not, delete
3037 3037 # or just forget etc).
3038 3038 if parent == node:
3039 3039 dsmodified = modified
3040 3040 dsadded = added
3041 3041 dsremoved = removed
3042 3042 # store all local modifications, useful later for rename detection
3043 3043 localchanges = dsmodified | dsadded
3044 3044 modified, added, removed = set(), set(), set()
3045 3045 else:
3046 3046 changes = repo.status(node1=parent, match=m)
3047 3047 dsmodified = set(changes.modified)
3048 3048 dsadded = set(changes.added)
3049 3049 dsremoved = set(changes.removed)
3050 3050 # store all local modifications, useful later for rename detection
3051 3051 localchanges = dsmodified | dsadded
3052 3052
3053 3053 # only take into account for removes between wc and target
3054 3054 clean |= dsremoved - removed
3055 3055 dsremoved &= removed
3056 3056 # distinct between dirstate remove and other
3057 3057 removed -= dsremoved
3058 3058
3059 3059 modadded = added & dsmodified
3060 3060 added -= modadded
3061 3061
3062 3062 # tell newly modified apart.
3063 3063 dsmodified &= modified
3064 3064 dsmodified |= modified & dsadded # dirstate added may need backup
3065 3065 modified -= dsmodified
3066 3066
3067 3067 # We need to wait for some post-processing to update this set
3068 3068 # before making the distinction. The dirstate will be used for
3069 3069 # that purpose.
3070 3070 dsadded = added
3071 3071
3072 3072 # in case of merge, files that are actually added can be reported as
3073 3073 # modified, we need to post process the result
3074 3074 if p2 != nullid:
3075 3075 mergeadd = dsmodified - smf
3076 3076 dsadded |= mergeadd
3077 3077 dsmodified -= mergeadd
3078 3078
3079 3079 # if f is a rename, update `names` to also revert the source
3080 3080 cwd = repo.getcwd()
3081 3081 for f in localchanges:
3082 3082 src = repo.dirstate.copied(f)
3083 3083 # XXX should we check for rename down to target node?
3084 3084 if src and src not in names and repo.dirstate[src] == 'r':
3085 3085 dsremoved.add(src)
3086 3086 names[src] = (repo.pathto(src, cwd), True)
3087 3087
3088 3088 # distinguish between file to forget and the other
3089 3089 added = set()
3090 3090 for abs in dsadded:
3091 3091 if repo.dirstate[abs] != 'a':
3092 3092 added.add(abs)
3093 3093 dsadded -= added
3094 3094
3095 3095 for abs in deladded:
3096 3096 if repo.dirstate[abs] == 'a':
3097 3097 dsadded.add(abs)
3098 3098 deladded -= dsadded
3099 3099
3100 3100 # For files marked as removed, we check if an unknown file is present at
3101 3101 # the same path. If a such file exists it may need to be backed up.
3102 3102 # Making the distinction at this stage helps have simpler backup
3103 3103 # logic.
3104 3104 removunk = set()
3105 3105 for abs in removed:
3106 3106 target = repo.wjoin(abs)
3107 3107 if os.path.lexists(target):
3108 3108 removunk.add(abs)
3109 3109 removed -= removunk
3110 3110
3111 3111 dsremovunk = set()
3112 3112 for abs in dsremoved:
3113 3113 target = repo.wjoin(abs)
3114 3114 if os.path.lexists(target):
3115 3115 dsremovunk.add(abs)
3116 3116 dsremoved -= dsremovunk
3117 3117
3118 3118 # action to be actually performed by revert
3119 3119 # (<list of file>, message>) tuple
3120 3120 actions = {'revert': ([], _('reverting %s\n')),
3121 3121 'add': ([], _('adding %s\n')),
3122 3122 'remove': ([], _('removing %s\n')),
3123 3123 'drop': ([], _('removing %s\n')),
3124 3124 'forget': ([], _('forgetting %s\n')),
3125 3125 'undelete': ([], _('undeleting %s\n')),
3126 3126 'noop': (None, _('no changes needed to %s\n')),
3127 3127 'unknown': (None, _('file not managed: %s\n')),
3128 3128 }
3129 3129
3130 3130 # "constant" that convey the backup strategy.
3131 3131 # All set to `discard` if `no-backup` is set do avoid checking
3132 3132 # no_backup lower in the code.
3133 3133 # These values are ordered for comparison purposes
3134 3134 backupinteractive = 3 # do backup if interactively modified
3135 3135 backup = 2 # unconditionally do backup
3136 3136 check = 1 # check if the existing file differs from target
3137 3137 discard = 0 # never do backup
3138 3138 if opts.get('no_backup'):
3139 3139 backupinteractive = backup = check = discard
3140 3140 if interactive:
3141 3141 dsmodifiedbackup = backupinteractive
3142 3142 else:
3143 3143 dsmodifiedbackup = backup
3144 3144 tobackup = set()
3145 3145
3146 3146 backupanddel = actions['remove']
3147 3147 if not opts.get('no_backup'):
3148 3148 backupanddel = actions['drop']
3149 3149
3150 3150 disptable = (
3151 3151 # dispatch table:
3152 3152 # file state
3153 3153 # action
3154 3154 # make backup
3155 3155
3156 3156 ## Sets that results that will change file on disk
3157 3157 # Modified compared to target, no local change
3158 3158 (modified, actions['revert'], discard),
3159 3159 # Modified compared to target, but local file is deleted
3160 3160 (deleted, actions['revert'], discard),
3161 3161 # Modified compared to target, local change
3162 3162 (dsmodified, actions['revert'], dsmodifiedbackup),
3163 3163 # Added since target
3164 3164 (added, actions['remove'], discard),
3165 3165 # Added in working directory
3166 3166 (dsadded, actions['forget'], discard),
3167 3167 # Added since target, have local modification
3168 3168 (modadded, backupanddel, backup),
3169 3169 # Added since target but file is missing in working directory
3170 3170 (deladded, actions['drop'], discard),
3171 3171 # Removed since target, before working copy parent
3172 3172 (removed, actions['add'], discard),
3173 3173 # Same as `removed` but an unknown file exists at the same path
3174 3174 (removunk, actions['add'], check),
3175 3175 # Removed since targe, marked as such in working copy parent
3176 3176 (dsremoved, actions['undelete'], discard),
3177 3177 # Same as `dsremoved` but an unknown file exists at the same path
3178 3178 (dsremovunk, actions['undelete'], check),
3179 3179 ## the following sets does not result in any file changes
3180 3180 # File with no modification
3181 3181 (clean, actions['noop'], discard),
3182 3182 # Existing file, not tracked anywhere
3183 3183 (unknown, actions['unknown'], discard),
3184 3184 )
3185 3185
3186 3186 for abs, (rel, exact) in sorted(names.items()):
3187 3187 # target file to be touch on disk (relative to cwd)
3188 3188 target = repo.wjoin(abs)
3189 3189 # search the entry in the dispatch table.
3190 3190 # if the file is in any of these sets, it was touched in the working
3191 3191 # directory parent and we are sure it needs to be reverted.
3192 3192 for table, (xlist, msg), dobackup in disptable:
3193 3193 if abs not in table:
3194 3194 continue
3195 3195 if xlist is not None:
3196 3196 xlist.append(abs)
3197 3197 if dobackup:
3198 3198 # If in interactive mode, don't automatically create
3199 3199 # .orig files (issue4793)
3200 3200 if dobackup == backupinteractive:
3201 3201 tobackup.add(abs)
3202 3202 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3203 3203 bakname = scmutil.origpath(ui, repo, rel)
3204 3204 ui.note(_('saving current version of %s as %s\n') %
3205 3205 (rel, bakname))
3206 3206 if not opts.get('dry_run'):
3207 3207 if interactive:
3208 3208 util.copyfile(target, bakname)
3209 3209 else:
3210 3210 util.rename(target, bakname)
3211 3211 if ui.verbose or not exact:
3212 3212 if not isinstance(msg, basestring):
3213 3213 msg = msg(abs)
3214 3214 ui.status(msg % rel)
3215 3215 elif exact:
3216 3216 ui.warn(msg % rel)
3217 3217 break
3218 3218
3219 3219 if not opts.get('dry_run'):
3220 3220 needdata = ('revert', 'add', 'undelete')
3221 3221 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3222 3222 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3223 3223
3224 3224 if targetsubs:
3225 3225 # Revert the subrepos on the revert list
3226 3226 for sub in targetsubs:
3227 3227 try:
3228 3228 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3229 3229 except KeyError:
3230 3230 raise error.Abort("subrepository '%s' does not exist in %s!"
3231 3231 % (sub, short(ctx.node())))
3232 3232
3233 3233 def _revertprefetch(repo, ctx, *files):
3234 3234 """Let extension changing the storage layer prefetch content"""
3235 3235 pass
3236 3236
3237 3237 def _performrevert(repo, parents, ctx, actions, interactive=False,
3238 3238 tobackup=None):
3239 3239 """function that actually perform all the actions computed for revert
3240 3240
3241 3241 This is an independent function to let extension to plug in and react to
3242 3242 the imminent revert.
3243 3243
3244 3244 Make sure you have the working directory locked when calling this function.
3245 3245 """
3246 3246 parent, p2 = parents
3247 3247 node = ctx.node()
3248 3248 excluded_files = []
3249 3249 matcher_opts = {"exclude": excluded_files}
3250 3250
3251 3251 def checkout(f):
3252 3252 fc = ctx[f]
3253 3253 repo.wwrite(f, fc.data(), fc.flags())
3254 3254
3255 3255 audit_path = pathutil.pathauditor(repo.root)
3256 3256 for f in actions['forget'][0]:
3257 3257 if interactive:
3258 3258 choice = \
3259 3259 repo.ui.promptchoice(
3260 3260 _("forget added file %s (yn)?$$ &Yes $$ &No")
3261 3261 % f)
3262 3262 if choice == 0:
3263 3263 repo.dirstate.drop(f)
3264 3264 else:
3265 3265 excluded_files.append(repo.wjoin(f))
3266 3266 else:
3267 3267 repo.dirstate.drop(f)
3268 3268 for f in actions['remove'][0]:
3269 3269 audit_path(f)
3270 3270 try:
3271 3271 util.unlinkpath(repo.wjoin(f))
3272 3272 except OSError:
3273 3273 pass
3274 3274 repo.dirstate.remove(f)
3275 3275 for f in actions['drop'][0]:
3276 3276 audit_path(f)
3277 3277 repo.dirstate.remove(f)
3278 3278
3279 3279 normal = None
3280 3280 if node == parent:
3281 3281 # We're reverting to our parent. If possible, we'd like status
3282 3282 # to report the file as clean. We have to use normallookup for
3283 3283 # merges to avoid losing information about merged/dirty files.
3284 3284 if p2 != nullid:
3285 3285 normal = repo.dirstate.normallookup
3286 3286 else:
3287 3287 normal = repo.dirstate.normal
3288 3288
3289 3289 newlyaddedandmodifiedfiles = set()
3290 3290 if interactive:
3291 3291 # Prompt the user for changes to revert
3292 3292 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3293 3293 m = scmutil.match(ctx, torevert, matcher_opts)
3294 3294 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3295 3295 diffopts.nodates = True
3296 3296 diffopts.git = True
3297 3297 reversehunks = repo.ui.configbool('experimental',
3298 3298 'revertalternateinteractivemode',
3299 3299 True)
3300 3300 if reversehunks:
3301 3301 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3302 3302 else:
3303 3303 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3304 3304 originalchunks = patch.parsepatch(diff)
3305 3305 operation = 'discard' if node == parent else 'revert'
3306 3306
3307 3307 try:
3308 3308
3309 3309 chunks, opts = recordfilter(repo.ui, originalchunks,
3310 3310 operation=operation)
3311 3311 if reversehunks:
3312 3312 chunks = patch.reversehunks(chunks)
3313 3313
3314 3314 except patch.PatchError as err:
3315 3315 raise error.Abort(_('error parsing patch: %s') % err)
3316 3316
3317 3317 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3318 3318 if tobackup is None:
3319 3319 tobackup = set()
3320 3320 # Apply changes
3321 3321 fp = stringio()
3322 3322 for c in chunks:
3323 3323 # Create a backup file only if this hunk should be backed up
3324 3324 if ishunk(c) and c.header.filename() in tobackup:
3325 3325 abs = c.header.filename()
3326 3326 target = repo.wjoin(abs)
3327 3327 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3328 3328 util.copyfile(target, bakname)
3329 3329 tobackup.remove(abs)
3330 3330 c.write(fp)
3331 3331 dopatch = fp.tell()
3332 3332 fp.seek(0)
3333 3333 if dopatch:
3334 3334 try:
3335 3335 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3336 3336 except patch.PatchError as err:
3337 3337 raise error.Abort(str(err))
3338 3338 del fp
3339 3339 else:
3340 3340 for f in actions['revert'][0]:
3341 3341 checkout(f)
3342 3342 if normal:
3343 3343 normal(f)
3344 3344
3345 3345 for f in actions['add'][0]:
3346 3346 # Don't checkout modified files, they are already created by the diff
3347 3347 if f not in newlyaddedandmodifiedfiles:
3348 3348 checkout(f)
3349 3349 repo.dirstate.add(f)
3350 3350
3351 3351 normal = repo.dirstate.normallookup
3352 3352 if node == parent and p2 == nullid:
3353 3353 normal = repo.dirstate.normal
3354 3354 for f in actions['undelete'][0]:
3355 3355 checkout(f)
3356 3356 normal(f)
3357 3357
3358 3358 copied = copies.pathcopies(repo[parent], ctx)
3359 3359
3360 3360 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3361 3361 if f in copied:
3362 3362 repo.dirstate.copy(copied[f], f)
3363 3363
3364 3364 def command(table):
3365 3365 """Returns a function object to be used as a decorator for making commands.
3366 3366
3367 3367 This function receives a command table as its argument. The table should
3368 3368 be a dict.
3369 3369
3370 3370 The returned function can be used as a decorator for adding commands
3371 3371 to that command table. This function accepts multiple arguments to define
3372 3372 a command.
3373 3373
3374 3374 The first argument is the command name.
3375 3375
3376 3376 The options argument is an iterable of tuples defining command arguments.
3377 3377 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3378 3378
3379 3379 The synopsis argument defines a short, one line summary of how to use the
3380 3380 command. This shows up in the help output.
3381 3381
3382 3382 The norepo argument defines whether the command does not require a
3383 3383 local repository. Most commands operate against a repository, thus the
3384 3384 default is False.
3385 3385
3386 3386 The optionalrepo argument defines whether the command optionally requires
3387 3387 a local repository.
3388 3388
3389 3389 The inferrepo argument defines whether to try to find a repository from the
3390 3390 command line arguments. If True, arguments will be examined for potential
3391 3391 repository locations. See ``findrepo()``. If a repository is found, it
3392 3392 will be used.
3393 3393 """
3394 3394 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3395 3395 inferrepo=False):
3396 3396 def decorator(func):
3397 3397 func.norepo = norepo
3398 3398 func.optionalrepo = optionalrepo
3399 3399 func.inferrepo = inferrepo
3400 3400 if synopsis:
3401 3401 table[name] = func, list(options), synopsis
3402 3402 else:
3403 3403 table[name] = func, list(options)
3404 3404 return func
3405 3405 return decorator
3406 3406
3407 3407 return cmd
3408 3408
3409 3409 def checkunresolved(ms):
3410 3410 if list(ms.unresolved()):
3411 3411 raise error.Abort(_("unresolved merge conflicts "
3412 3412 "(see 'hg help resolve')"))
3413 3413 if ms.mdstate() != 's' or list(ms.driverresolved()):
3414 3414 raise error.Abort(_('driver-resolved merge conflicts'),
3415 3415 hint=_('run "hg resolve --all" to resolve'))
3416 3416
3417 3417 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3418 3418 # commands.outgoing. "missing" is "missing" of the result of
3419 3419 # "findcommonoutgoing()"
3420 3420 outgoinghooks = util.hooks()
3421 3421
3422 3422 # a list of (ui, repo) functions called by commands.summary
3423 3423 summaryhooks = util.hooks()
3424 3424
3425 3425 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3426 3426 #
3427 3427 # functions should return tuple of booleans below, if 'changes' is None:
3428 3428 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3429 3429 #
3430 3430 # otherwise, 'changes' is a tuple of tuples below:
3431 3431 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3432 3432 # - (desturl, destbranch, destpeer, outgoing)
3433 3433 summaryremotehooks = util.hooks()
3434 3434
3435 3435 # A list of state files kept by multistep operations like graft.
3436 3436 # Since graft cannot be aborted, it is considered 'clearable' by update.
3437 3437 # note: bisect is intentionally excluded
3438 3438 # (state file, clearable, allowcommit, error, hint)
3439 3439 unfinishedstates = [
3440 3440 ('graftstate', True, False, _('graft in progress'),
3441 3441 _("use 'hg graft --continue' or 'hg update' to abort")),
3442 3442 ('updatestate', True, False, _('last update was interrupted'),
3443 3443 _("use 'hg update' to get a consistent checkout"))
3444 3444 ]
3445 3445
3446 3446 def checkunfinished(repo, commit=False):
3447 3447 '''Look for an unfinished multistep operation, like graft, and abort
3448 3448 if found. It's probably good to check this right before
3449 3449 bailifchanged().
3450 3450 '''
3451 3451 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3452 3452 if commit and allowcommit:
3453 3453 continue
3454 3454 if repo.vfs.exists(f):
3455 3455 raise error.Abort(msg, hint=hint)
3456 3456
3457 3457 def clearunfinished(repo):
3458 3458 '''Check for unfinished operations (as above), and clear the ones
3459 3459 that are clearable.
3460 3460 '''
3461 3461 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3462 3462 if not clearable and repo.vfs.exists(f):
3463 3463 raise error.Abort(msg, hint=hint)
3464 3464 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3465 3465 if clearable and repo.vfs.exists(f):
3466 3466 util.unlink(repo.join(f))
3467 3467
3468 3468 afterresolvedstates = [
3469 3469 ('graftstate',
3470 3470 _('hg graft --continue')),
3471 3471 ]
3472 3472
3473 3473 def howtocontinue(repo):
3474 3474 '''Check for an unfinished operation and return the command to finish
3475 3475 it.
3476 3476
3477 3477 afterresolvedstates tuples define a .hg/{file} and the corresponding
3478 3478 command needed to finish it.
3479 3479
3480 3480 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3481 3481 a boolean.
3482 3482 '''
3483 3483 contmsg = _("continue: %s")
3484 3484 for f, msg in afterresolvedstates:
3485 3485 if repo.vfs.exists(f):
3486 3486 return contmsg % msg, True
3487 3487 workingctx = repo[None]
3488 3488 dirty = any(repo.status()) or any(workingctx.sub(s).dirty()
3489 3489 for s in workingctx.substate)
3490 3490 if dirty:
3491 3491 return contmsg % _("hg commit"), False
3492 3492 return None, None
3493 3493
3494 3494 def checkafterresolved(repo):
3495 3495 '''Inform the user about the next action after completing hg resolve
3496 3496
3497 3497 If there's a matching afterresolvedstates, howtocontinue will yield
3498 3498 repo.ui.warn as the reporter.
3499 3499
3500 3500 Otherwise, it will yield repo.ui.note.
3501 3501 '''
3502 3502 msg, warning = howtocontinue(repo)
3503 3503 if msg is not None:
3504 3504 if warning:
3505 3505 repo.ui.warn("%s\n" % msg)
3506 3506 else:
3507 3507 repo.ui.note("%s\n" % msg)
3508 3508
3509 3509 def wrongtooltocontinue(repo, task):
3510 3510 '''Raise an abort suggesting how to properly continue if there is an
3511 3511 active task.
3512 3512
3513 3513 Uses howtocontinue() to find the active task.
3514 3514
3515 3515 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3516 3516 a hint.
3517 3517 '''
3518 3518 after = howtocontinue(repo)
3519 3519 hint = None
3520 3520 if after[1]:
3521 3521 hint = after[0]
3522 3522 raise error.Abort(_('no %s in progress') % task, hint=hint)
3523 3523
3524 3524 class dirstateguard(object):
3525 3525 '''Restore dirstate at unexpected failure.
3526 3526
3527 3527 At the construction, this class does:
3528 3528
3529 3529 - write current ``repo.dirstate`` out, and
3530 3530 - save ``.hg/dirstate`` into the backup file
3531 3531
3532 3532 This restores ``.hg/dirstate`` from backup file, if ``release()``
3533 3533 is invoked before ``close()``.
3534 3534
3535 3535 This just removes the backup file at ``close()`` before ``release()``.
3536 3536 '''
3537 3537
3538 3538 def __init__(self, repo, name):
3539 3539 self._repo = repo
3540 3540 self._active = False
3541 3541 self._closed = False
3542 3542 self._suffix = '.backup.%s.%d' % (name, id(self))
3543 3543 repo.dirstate.savebackup(repo.currenttransaction(), self._suffix)
3544 3544 self._active = True
3545 3545
3546 3546 def __del__(self):
3547 3547 if self._active: # still active
3548 3548 # this may occur, even if this class is used correctly:
3549 3549 # for example, releasing other resources like transaction
3550 3550 # may raise exception before ``dirstateguard.release`` in
3551 3551 # ``release(tr, ....)``.
3552 3552 self._abort()
3553 3553
3554 3554 def close(self):
3555 3555 if not self._active: # already inactivated
3556 3556 msg = (_("can't close already inactivated backup: dirstate%s")
3557 3557 % self._suffix)
3558 3558 raise error.Abort(msg)
3559 3559
3560 3560 self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
3561 3561 self._suffix)
3562 3562 self._active = False
3563 3563 self._closed = True
3564 3564
3565 3565 def _abort(self):
3566 3566 self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
3567 3567 self._suffix)
3568 3568 self._active = False
3569 3569
3570 3570 def release(self):
3571 3571 if not self._closed:
3572 3572 if not self._active: # already inactivated
3573 3573 msg = (_("can't release already inactivated backup:"
3574 3574 " dirstate%s")
3575 3575 % self._suffix)
3576 3576 raise error.Abort(msg)
3577 3577 self._abort()
@@ -1,544 +1,543 b''
1 1 # commandserver.py - communicate with Mercurial's API over a pipe
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import gc
12 12 import os
13 13 import random
14 14 import select
15 15 import signal
16 16 import socket
17 17 import struct
18 import sys
19 18 import traceback
20 19
21 20 from .i18n import _
22 21 from . import (
23 22 encoding,
24 23 error,
25 24 util,
26 25 )
27 26
28 27 logfile = None
29 28
30 29 def log(*args):
31 30 if not logfile:
32 31 return
33 32
34 33 for a in args:
35 34 logfile.write(str(a))
36 35
37 36 logfile.flush()
38 37
39 38 class channeledoutput(object):
40 39 """
41 40 Write data to out in the following format:
42 41
43 42 data length (unsigned int),
44 43 data
45 44 """
46 45 def __init__(self, out, channel):
47 46 self.out = out
48 47 self.channel = channel
49 48
50 49 @property
51 50 def name(self):
52 51 return '<%c-channel>' % self.channel
53 52
54 53 def write(self, data):
55 54 if not data:
56 55 return
57 56 # single write() to guarantee the same atomicity as the underlying file
58 57 self.out.write(struct.pack('>cI', self.channel, len(data)) + data)
59 58 self.out.flush()
60 59
61 60 def __getattr__(self, attr):
62 61 if attr in ('isatty', 'fileno', 'tell', 'seek'):
63 62 raise AttributeError(attr)
64 63 return getattr(self.out, attr)
65 64
66 65 class channeledinput(object):
67 66 """
68 67 Read data from in_.
69 68
70 69 Requests for input are written to out in the following format:
71 70 channel identifier - 'I' for plain input, 'L' line based (1 byte)
72 71 how many bytes to send at most (unsigned int),
73 72
74 73 The client replies with:
75 74 data length (unsigned int), 0 meaning EOF
76 75 data
77 76 """
78 77
79 78 maxchunksize = 4 * 1024
80 79
81 80 def __init__(self, in_, out, channel):
82 81 self.in_ = in_
83 82 self.out = out
84 83 self.channel = channel
85 84
86 85 @property
87 86 def name(self):
88 87 return '<%c-channel>' % self.channel
89 88
90 89 def read(self, size=-1):
91 90 if size < 0:
92 91 # if we need to consume all the clients input, ask for 4k chunks
93 92 # so the pipe doesn't fill up risking a deadlock
94 93 size = self.maxchunksize
95 94 s = self._read(size, self.channel)
96 95 buf = s
97 96 while s:
98 97 s = self._read(size, self.channel)
99 98 buf += s
100 99
101 100 return buf
102 101 else:
103 102 return self._read(size, self.channel)
104 103
105 104 def _read(self, size, channel):
106 105 if not size:
107 106 return ''
108 107 assert size > 0
109 108
110 109 # tell the client we need at most size bytes
111 110 self.out.write(struct.pack('>cI', channel, size))
112 111 self.out.flush()
113 112
114 113 length = self.in_.read(4)
115 114 length = struct.unpack('>I', length)[0]
116 115 if not length:
117 116 return ''
118 117 else:
119 118 return self.in_.read(length)
120 119
121 120 def readline(self, size=-1):
122 121 if size < 0:
123 122 size = self.maxchunksize
124 123 s = self._read(size, 'L')
125 124 buf = s
126 125 # keep asking for more until there's either no more or
127 126 # we got a full line
128 127 while s and s[-1] != '\n':
129 128 s = self._read(size, 'L')
130 129 buf += s
131 130
132 131 return buf
133 132 else:
134 133 return self._read(size, 'L')
135 134
136 135 def __iter__(self):
137 136 return self
138 137
139 138 def next(self):
140 139 l = self.readline()
141 140 if not l:
142 141 raise StopIteration
143 142 return l
144 143
145 144 def __getattr__(self, attr):
146 145 if attr in ('isatty', 'fileno', 'tell', 'seek'):
147 146 raise AttributeError(attr)
148 147 return getattr(self.in_, attr)
149 148
150 149 class server(object):
151 150 """
152 151 Listens for commands on fin, runs them and writes the output on a channel
153 152 based stream to fout.
154 153 """
155 154 def __init__(self, ui, repo, fin, fout):
156 155 self.cwd = os.getcwd()
157 156
158 157 # developer config: cmdserver.log
159 158 logpath = ui.config("cmdserver", "log", None)
160 159 if logpath:
161 160 global logfile
162 161 if logpath == '-':
163 162 # write log on a special 'd' (debug) channel
164 163 logfile = channeledoutput(fout, 'd')
165 164 else:
166 165 logfile = open(logpath, 'a')
167 166
168 167 if repo:
169 168 # the ui here is really the repo ui so take its baseui so we don't
170 169 # end up with its local configuration
171 170 self.ui = repo.baseui
172 171 self.repo = repo
173 172 self.repoui = repo.ui
174 173 else:
175 174 self.ui = ui
176 175 self.repo = self.repoui = None
177 176
178 177 self.cerr = channeledoutput(fout, 'e')
179 178 self.cout = channeledoutput(fout, 'o')
180 179 self.cin = channeledinput(fin, fout, 'I')
181 180 self.cresult = channeledoutput(fout, 'r')
182 181
183 182 self.client = fin
184 183
185 184 def cleanup(self):
186 185 """release and restore resources taken during server session"""
187 186 pass
188 187
189 188 def _read(self, size):
190 189 if not size:
191 190 return ''
192 191
193 192 data = self.client.read(size)
194 193
195 194 # is the other end closed?
196 195 if not data:
197 196 raise EOFError
198 197
199 198 return data
200 199
201 200 def _readstr(self):
202 201 """read a string from the channel
203 202
204 203 format:
205 204 data length (uint32), data
206 205 """
207 206 length = struct.unpack('>I', self._read(4))[0]
208 207 if not length:
209 208 return ''
210 209 return self._read(length)
211 210
212 211 def _readlist(self):
213 212 """read a list of NULL separated strings from the channel"""
214 213 s = self._readstr()
215 214 if s:
216 215 return s.split('\0')
217 216 else:
218 217 return []
219 218
220 219 def runcommand(self):
221 220 """ reads a list of \0 terminated arguments, executes
222 221 and writes the return code to the result channel """
223 222 from . import dispatch # avoid cycle
224 223
225 224 args = self._readlist()
226 225
227 226 # copy the uis so changes (e.g. --config or --verbose) don't
228 227 # persist between requests
229 228 copiedui = self.ui.copy()
230 229 uis = [copiedui]
231 230 if self.repo:
232 231 self.repo.baseui = copiedui
233 232 # clone ui without using ui.copy because this is protected
234 233 repoui = self.repoui.__class__(self.repoui)
235 234 repoui.copy = copiedui.copy # redo copy protection
236 235 uis.append(repoui)
237 236 self.repo.ui = self.repo.dirstate._ui = repoui
238 237 self.repo.invalidateall()
239 238
240 239 for ui in uis:
241 240 ui.resetstate()
242 241 # any kind of interaction must use server channels, but chg may
243 242 # replace channels by fully functional tty files. so nontty is
244 243 # enforced only if cin is a channel.
245 244 if not util.safehasattr(self.cin, 'fileno'):
246 245 ui.setconfig('ui', 'nontty', 'true', 'commandserver')
247 246
248 247 req = dispatch.request(args[:], copiedui, self.repo, self.cin,
249 248 self.cout, self.cerr)
250 249
251 250 ret = (dispatch.dispatch(req) or 0) & 255 # might return None
252 251
253 252 # restore old cwd
254 253 if '--cwd' in args:
255 254 os.chdir(self.cwd)
256 255
257 256 self.cresult.write(struct.pack('>i', int(ret)))
258 257
259 258 def getencoding(self):
260 259 """ writes the current encoding to the result channel """
261 260 self.cresult.write(encoding.encoding)
262 261
263 262 def serveone(self):
264 263 cmd = self.client.readline()[:-1]
265 264 if cmd:
266 265 handler = self.capabilities.get(cmd)
267 266 if handler:
268 267 handler(self)
269 268 else:
270 269 # clients are expected to check what commands are supported by
271 270 # looking at the servers capabilities
272 271 raise error.Abort(_('unknown command %s') % cmd)
273 272
274 273 return cmd != ''
275 274
276 275 capabilities = {'runcommand' : runcommand,
277 276 'getencoding' : getencoding}
278 277
279 278 def serve(self):
280 279 hellomsg = 'capabilities: ' + ' '.join(sorted(self.capabilities))
281 280 hellomsg += '\n'
282 281 hellomsg += 'encoding: ' + encoding.encoding
283 282 hellomsg += '\n'
284 283 hellomsg += 'pid: %d' % util.getpid()
285 284 if util.safehasattr(os, 'getpgid'):
286 285 hellomsg += '\n'
287 286 hellomsg += 'pgid: %d' % os.getpgid(0)
288 287
289 288 # write the hello msg in -one- chunk
290 289 self.cout.write(hellomsg)
291 290
292 291 try:
293 292 while self.serveone():
294 293 pass
295 294 except EOFError:
296 295 # we'll get here if the client disconnected while we were reading
297 296 # its request
298 297 return 1
299 298
300 299 return 0
301 300
302 301 def _protectio(ui):
303 302 """ duplicates streams and redirect original to null if ui uses stdio """
304 303 ui.flush()
305 304 newfiles = []
306 305 nullfd = os.open(os.devnull, os.O_RDWR)
307 for f, sysf, mode in [(ui.fin, sys.stdin, 'rb'),
308 (ui.fout, sys.stdout, 'wb')]:
306 for f, sysf, mode in [(ui.fin, util.stdin, 'rb'),
307 (ui.fout, util.stdout, 'wb')]:
309 308 if f is sysf:
310 309 newfd = os.dup(f.fileno())
311 310 os.dup2(nullfd, f.fileno())
312 311 f = os.fdopen(newfd, mode)
313 312 newfiles.append(f)
314 313 os.close(nullfd)
315 314 return tuple(newfiles)
316 315
317 316 def _restoreio(ui, fin, fout):
318 317 """ restores streams from duplicated ones """
319 318 ui.flush()
320 319 for f, uif in [(fin, ui.fin), (fout, ui.fout)]:
321 320 if f is not uif:
322 321 os.dup2(f.fileno(), uif.fileno())
323 322 f.close()
324 323
325 324 class pipeservice(object):
326 325 def __init__(self, ui, repo, opts):
327 326 self.ui = ui
328 327 self.repo = repo
329 328
330 329 def init(self):
331 330 pass
332 331
333 332 def run(self):
334 333 ui = self.ui
335 334 # redirect stdio to null device so that broken extensions or in-process
336 335 # hooks will never cause corruption of channel protocol.
337 336 fin, fout = _protectio(ui)
338 337 try:
339 338 sv = server(ui, self.repo, fin, fout)
340 339 return sv.serve()
341 340 finally:
342 341 sv.cleanup()
343 342 _restoreio(ui, fin, fout)
344 343
345 344 def _initworkerprocess():
346 345 # use a different process group from the master process, in order to:
347 346 # 1. make the current process group no longer "orphaned" (because the
348 347 # parent of this process is in a different process group while
349 348 # remains in a same session)
350 349 # according to POSIX 2.2.2.52, orphaned process group will ignore
351 350 # terminal-generated stop signals like SIGTSTP (Ctrl+Z), which will
352 351 # cause trouble for things like ncurses.
353 352 # 2. the client can use kill(-pgid, sig) to simulate terminal-generated
354 353 # SIGINT (Ctrl+C) and process-exit-generated SIGHUP. our child
355 354 # processes like ssh will be killed properly, without affecting
356 355 # unrelated processes.
357 356 os.setpgid(0, 0)
358 357 # change random state otherwise forked request handlers would have a
359 358 # same state inherited from parent.
360 359 random.seed()
361 360
362 361 def _serverequest(ui, repo, conn, createcmdserver):
363 362 fin = conn.makefile('rb')
364 363 fout = conn.makefile('wb')
365 364 sv = None
366 365 try:
367 366 sv = createcmdserver(repo, conn, fin, fout)
368 367 try:
369 368 sv.serve()
370 369 # handle exceptions that may be raised by command server. most of
371 370 # known exceptions are caught by dispatch.
372 371 except error.Abort as inst:
373 372 ui.warn(_('abort: %s\n') % inst)
374 373 except IOError as inst:
375 374 if inst.errno != errno.EPIPE:
376 375 raise
377 376 except KeyboardInterrupt:
378 377 pass
379 378 finally:
380 379 sv.cleanup()
381 380 except: # re-raises
382 381 # also write traceback to error channel. otherwise client cannot
383 382 # see it because it is written to server's stderr by default.
384 383 if sv:
385 384 cerr = sv.cerr
386 385 else:
387 386 cerr = channeledoutput(fout, 'e')
388 387 traceback.print_exc(file=cerr)
389 388 raise
390 389 finally:
391 390 fin.close()
392 391 try:
393 392 fout.close() # implicit flush() may cause another EPIPE
394 393 except IOError as inst:
395 394 if inst.errno != errno.EPIPE:
396 395 raise
397 396
398 397 class unixservicehandler(object):
399 398 """Set of pluggable operations for unix-mode services
400 399
401 400 Almost all methods except for createcmdserver() are called in the main
402 401 process. You can't pass mutable resource back from createcmdserver().
403 402 """
404 403
405 404 pollinterval = None
406 405
407 406 def __init__(self, ui):
408 407 self.ui = ui
409 408
410 409 def bindsocket(self, sock, address):
411 410 util.bindunixsocket(sock, address)
412 411
413 412 def unlinksocket(self, address):
414 413 os.unlink(address)
415 414
416 415 def printbanner(self, address):
417 416 self.ui.status(_('listening at %s\n') % address)
418 417 self.ui.flush() # avoid buffering of status message
419 418
420 419 def shouldexit(self):
421 420 """True if server should shut down; checked per pollinterval"""
422 421 return False
423 422
424 423 def newconnection(self):
425 424 """Called when main process notices new connection"""
426 425 pass
427 426
428 427 def createcmdserver(self, repo, conn, fin, fout):
429 428 """Create new command server instance; called in the process that
430 429 serves for the current connection"""
431 430 return server(self.ui, repo, fin, fout)
432 431
433 432 class unixforkingservice(object):
434 433 """
435 434 Listens on unix domain socket and forks server per connection
436 435 """
437 436
438 437 def __init__(self, ui, repo, opts, handler=None):
439 438 self.ui = ui
440 439 self.repo = repo
441 440 self.address = opts['address']
442 441 if not util.safehasattr(socket, 'AF_UNIX'):
443 442 raise error.Abort(_('unsupported platform'))
444 443 if not self.address:
445 444 raise error.Abort(_('no socket path specified with --address'))
446 445 self._servicehandler = handler or unixservicehandler(ui)
447 446 self._sock = None
448 447 self._oldsigchldhandler = None
449 448 self._workerpids = set() # updated by signal handler; do not iterate
450 449
451 450 def init(self):
452 451 self._sock = socket.socket(socket.AF_UNIX)
453 452 self._servicehandler.bindsocket(self._sock, self.address)
454 453 self._sock.listen(socket.SOMAXCONN)
455 454 o = signal.signal(signal.SIGCHLD, self._sigchldhandler)
456 455 self._oldsigchldhandler = o
457 456 self._servicehandler.printbanner(self.address)
458 457
459 458 def _cleanup(self):
460 459 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
461 460 self._sock.close()
462 461 self._servicehandler.unlinksocket(self.address)
463 462 # don't kill child processes as they have active clients, just wait
464 463 self._reapworkers(0)
465 464
466 465 def run(self):
467 466 try:
468 467 self._mainloop()
469 468 finally:
470 469 self._cleanup()
471 470
472 471 def _mainloop(self):
473 472 h = self._servicehandler
474 473 while not h.shouldexit():
475 474 try:
476 475 ready = select.select([self._sock], [], [], h.pollinterval)[0]
477 476 if not ready:
478 477 continue
479 478 conn, _addr = self._sock.accept()
480 479 except (select.error, socket.error) as inst:
481 480 if inst.args[0] == errno.EINTR:
482 481 continue
483 482 raise
484 483
485 484 pid = os.fork()
486 485 if pid:
487 486 try:
488 487 self.ui.debug('forked worker process (pid=%d)\n' % pid)
489 488 self._workerpids.add(pid)
490 489 h.newconnection()
491 490 finally:
492 491 conn.close() # release handle in parent process
493 492 else:
494 493 try:
495 494 self._runworker(conn)
496 495 conn.close()
497 496 os._exit(0)
498 497 except: # never return, hence no re-raises
499 498 try:
500 499 self.ui.traceback(force=True)
501 500 finally:
502 501 os._exit(255)
503 502
504 503 def _sigchldhandler(self, signal, frame):
505 504 self._reapworkers(os.WNOHANG)
506 505
507 506 def _reapworkers(self, options):
508 507 while self._workerpids:
509 508 try:
510 509 pid, _status = os.waitpid(-1, options)
511 510 except OSError as inst:
512 511 if inst.errno == errno.EINTR:
513 512 continue
514 513 if inst.errno != errno.ECHILD:
515 514 raise
516 515 # no child processes at all (reaped by other waitpid()?)
517 516 self._workerpids.clear()
518 517 return
519 518 if pid == 0:
520 519 # no waitable child processes
521 520 return
522 521 self.ui.debug('worker process exited (pid=%d)\n' % pid)
523 522 self._workerpids.discard(pid)
524 523
525 524 def _runworker(self, conn):
526 525 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
527 526 _initworkerprocess()
528 527 h = self._servicehandler
529 528 try:
530 529 _serverequest(self.ui, self.repo, conn, h.createcmdserver)
531 530 finally:
532 531 gc.collect() # trigger __del__ since worker process uses os._exit
533 532
534 533 _servicemap = {
535 534 'pipe': pipeservice,
536 535 'unix': unixforkingservice,
537 536 }
538 537
539 538 def createservice(ui, repo, opts):
540 539 mode = opts['cmdserver']
541 540 try:
542 541 return _servicemap[mode](ui, repo, opts)
543 542 except KeyError:
544 543 raise error.Abort(_('unknown mode %s') % mode)
@@ -1,984 +1,984 b''
1 1 # dispatch.py - command dispatching for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import, print_function
9 9
10 10 import atexit
11 11 import difflib
12 12 import errno
13 13 import os
14 14 import pdb
15 15 import re
16 16 import shlex
17 17 import signal
18 18 import socket
19 19 import sys
20 20 import time
21 21 import traceback
22 22
23 23
24 24 from .i18n import _
25 25
26 26 from . import (
27 27 cmdutil,
28 28 commands,
29 29 debugcommands,
30 30 demandimport,
31 31 encoding,
32 32 error,
33 33 extensions,
34 34 fancyopts,
35 35 fileset,
36 36 hg,
37 37 hook,
38 38 profiling,
39 39 pycompat,
40 40 revset,
41 41 templatefilters,
42 42 templatekw,
43 43 templater,
44 44 ui as uimod,
45 45 util,
46 46 )
47 47
48 48 class request(object):
49 49 def __init__(self, args, ui=None, repo=None, fin=None, fout=None,
50 50 ferr=None):
51 51 self.args = args
52 52 self.ui = ui
53 53 self.repo = repo
54 54
55 55 # input/output/error streams
56 56 self.fin = fin
57 57 self.fout = fout
58 58 self.ferr = ferr
59 59
60 60 def run():
61 61 "run the command in sys.argv"
62 62 sys.exit((dispatch(request(pycompat.sysargv[1:])) or 0) & 255)
63 63
64 64 def _getsimilar(symbols, value):
65 65 sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
66 66 # The cutoff for similarity here is pretty arbitrary. It should
67 67 # probably be investigated and tweaked.
68 68 return [s for s in symbols if sim(s) > 0.6]
69 69
70 70 def _reportsimilar(write, similar):
71 71 if len(similar) == 1:
72 72 write(_("(did you mean %s?)\n") % similar[0])
73 73 elif similar:
74 74 ss = ", ".join(sorted(similar))
75 75 write(_("(did you mean one of %s?)\n") % ss)
76 76
77 77 def _formatparse(write, inst):
78 78 similar = []
79 79 if isinstance(inst, error.UnknownIdentifier):
80 80 # make sure to check fileset first, as revset can invoke fileset
81 81 similar = _getsimilar(inst.symbols, inst.function)
82 82 if len(inst.args) > 1:
83 83 write(_("hg: parse error at %s: %s\n") %
84 84 (inst.args[1], inst.args[0]))
85 85 if (inst.args[0][0] == ' '):
86 86 write(_("unexpected leading whitespace\n"))
87 87 else:
88 88 write(_("hg: parse error: %s\n") % inst.args[0])
89 89 _reportsimilar(write, similar)
90 90 if inst.hint:
91 91 write(_("(%s)\n") % inst.hint)
92 92
93 93 def dispatch(req):
94 94 "run the command specified in req.args"
95 95 if req.ferr:
96 96 ferr = req.ferr
97 97 elif req.ui:
98 98 ferr = req.ui.ferr
99 99 else:
100 ferr = sys.stderr
100 ferr = util.stderr
101 101
102 102 try:
103 103 if not req.ui:
104 104 req.ui = uimod.ui()
105 105 if '--traceback' in req.args:
106 106 req.ui.setconfig('ui', 'traceback', 'on', '--traceback')
107 107
108 108 # set ui streams from the request
109 109 if req.fin:
110 110 req.ui.fin = req.fin
111 111 if req.fout:
112 112 req.ui.fout = req.fout
113 113 if req.ferr:
114 114 req.ui.ferr = req.ferr
115 115 except error.Abort as inst:
116 116 ferr.write(_("abort: %s\n") % inst)
117 117 if inst.hint:
118 118 ferr.write(_("(%s)\n") % inst.hint)
119 119 return -1
120 120 except error.ParseError as inst:
121 121 _formatparse(ferr.write, inst)
122 122 return -1
123 123
124 124 msg = ' '.join(' ' in a and repr(a) or a for a in req.args)
125 125 starttime = time.time()
126 126 ret = None
127 127 try:
128 128 ret = _runcatch(req)
129 129 except KeyboardInterrupt:
130 130 try:
131 131 req.ui.warn(_("interrupted!\n"))
132 132 except IOError as inst:
133 133 if inst.errno != errno.EPIPE:
134 134 raise
135 135 ret = -1
136 136 finally:
137 137 duration = time.time() - starttime
138 138 req.ui.flush()
139 139 req.ui.log("commandfinish", "%s exited %s after %0.2f seconds\n",
140 140 msg, ret or 0, duration)
141 141 return ret
142 142
143 143 def _runcatch(req):
144 144 def catchterm(*args):
145 145 raise error.SignalInterrupt
146 146
147 147 ui = req.ui
148 148 try:
149 149 for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
150 150 num = getattr(signal, name, None)
151 151 if num:
152 152 signal.signal(num, catchterm)
153 153 except ValueError:
154 154 pass # happens if called in a thread
155 155
156 156 def _runcatchfunc():
157 157 try:
158 158 debugger = 'pdb'
159 159 debugtrace = {
160 160 'pdb' : pdb.set_trace
161 161 }
162 162 debugmortem = {
163 163 'pdb' : pdb.post_mortem
164 164 }
165 165
166 166 # read --config before doing anything else
167 167 # (e.g. to change trust settings for reading .hg/hgrc)
168 168 cfgs = _parseconfig(req.ui, _earlygetopt(['--config'], req.args))
169 169
170 170 if req.repo:
171 171 # copy configs that were passed on the cmdline (--config) to
172 172 # the repo ui
173 173 for sec, name, val in cfgs:
174 174 req.repo.ui.setconfig(sec, name, val, source='--config')
175 175
176 176 # developer config: ui.debugger
177 177 debugger = ui.config("ui", "debugger")
178 178 debugmod = pdb
179 179 if not debugger or ui.plain():
180 180 # if we are in HGPLAIN mode, then disable custom debugging
181 181 debugger = 'pdb'
182 182 elif '--debugger' in req.args:
183 183 # This import can be slow for fancy debuggers, so only
184 184 # do it when absolutely necessary, i.e. when actual
185 185 # debugging has been requested
186 186 with demandimport.deactivated():
187 187 try:
188 188 debugmod = __import__(debugger)
189 189 except ImportError:
190 190 pass # Leave debugmod = pdb
191 191
192 192 debugtrace[debugger] = debugmod.set_trace
193 193 debugmortem[debugger] = debugmod.post_mortem
194 194
195 195 # enter the debugger before command execution
196 196 if '--debugger' in req.args:
197 197 ui.warn(_("entering debugger - "
198 198 "type c to continue starting hg or h for help\n"))
199 199
200 200 if (debugger != 'pdb' and
201 201 debugtrace[debugger] == debugtrace['pdb']):
202 202 ui.warn(_("%s debugger specified "
203 203 "but its module was not found\n") % debugger)
204 204 with demandimport.deactivated():
205 205 debugtrace[debugger]()
206 206 try:
207 207 return _dispatch(req)
208 208 finally:
209 209 ui.flush()
210 210 except: # re-raises
211 211 # enter the debugger when we hit an exception
212 212 if '--debugger' in req.args:
213 213 traceback.print_exc()
214 214 debugmortem[debugger](sys.exc_info()[2])
215 215 ui.traceback()
216 216 raise
217 217
218 218 return callcatch(ui, _runcatchfunc)
219 219
220 220 def callcatch(ui, func):
221 221 """call func() with global exception handling
222 222
223 223 return func() if no exception happens. otherwise do some error handling
224 224 and return an exit code accordingly.
225 225 """
226 226 try:
227 227 return func()
228 228 # Global exception handling, alphabetically
229 229 # Mercurial-specific first, followed by built-in and library exceptions
230 230 except error.AmbiguousCommand as inst:
231 231 ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") %
232 232 (inst.args[0], " ".join(inst.args[1])))
233 233 except error.ParseError as inst:
234 234 _formatparse(ui.warn, inst)
235 235 return -1
236 236 except error.LockHeld as inst:
237 237 if inst.errno == errno.ETIMEDOUT:
238 238 reason = _('timed out waiting for lock held by %s') % inst.locker
239 239 else:
240 240 reason = _('lock held by %s') % inst.locker
241 241 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
242 242 except error.LockUnavailable as inst:
243 243 ui.warn(_("abort: could not lock %s: %s\n") %
244 244 (inst.desc or inst.filename, inst.strerror))
245 245 except error.CommandError as inst:
246 246 if inst.args[0]:
247 247 ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
248 248 commands.help_(ui, inst.args[0], full=False, command=True)
249 249 else:
250 250 ui.warn(_("hg: %s\n") % inst.args[1])
251 251 commands.help_(ui, 'shortlist')
252 252 except error.OutOfBandError as inst:
253 253 if inst.args:
254 254 msg = _("abort: remote error:\n")
255 255 else:
256 256 msg = _("abort: remote error\n")
257 257 ui.warn(msg)
258 258 if inst.args:
259 259 ui.warn(''.join(inst.args))
260 260 if inst.hint:
261 261 ui.warn('(%s)\n' % inst.hint)
262 262 except error.RepoError as inst:
263 263 ui.warn(_("abort: %s!\n") % inst)
264 264 if inst.hint:
265 265 ui.warn(_("(%s)\n") % inst.hint)
266 266 except error.ResponseError as inst:
267 267 ui.warn(_("abort: %s") % inst.args[0])
268 268 if not isinstance(inst.args[1], basestring):
269 269 ui.warn(" %r\n" % (inst.args[1],))
270 270 elif not inst.args[1]:
271 271 ui.warn(_(" empty string\n"))
272 272 else:
273 273 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
274 274 except error.CensoredNodeError as inst:
275 275 ui.warn(_("abort: file censored %s!\n") % inst)
276 276 except error.RevlogError as inst:
277 277 ui.warn(_("abort: %s!\n") % inst)
278 278 except error.SignalInterrupt:
279 279 ui.warn(_("killed!\n"))
280 280 except error.UnknownCommand as inst:
281 281 ui.warn(_("hg: unknown command '%s'\n") % inst.args[0])
282 282 try:
283 283 # check if the command is in a disabled extension
284 284 # (but don't check for extensions themselves)
285 285 commands.help_(ui, inst.args[0], unknowncmd=True)
286 286 except (error.UnknownCommand, error.Abort):
287 287 suggested = False
288 288 if len(inst.args) == 2:
289 289 sim = _getsimilar(inst.args[1], inst.args[0])
290 290 if sim:
291 291 _reportsimilar(ui.warn, sim)
292 292 suggested = True
293 293 if not suggested:
294 294 commands.help_(ui, 'shortlist')
295 295 except error.InterventionRequired as inst:
296 296 ui.warn("%s\n" % inst)
297 297 if inst.hint:
298 298 ui.warn(_("(%s)\n") % inst.hint)
299 299 return 1
300 300 except error.Abort as inst:
301 301 ui.warn(_("abort: %s\n") % inst)
302 302 if inst.hint:
303 303 ui.warn(_("(%s)\n") % inst.hint)
304 304 except ImportError as inst:
305 305 ui.warn(_("abort: %s!\n") % inst)
306 306 m = str(inst).split()[-1]
307 307 if m in "mpatch bdiff".split():
308 308 ui.warn(_("(did you forget to compile extensions?)\n"))
309 309 elif m in "zlib".split():
310 310 ui.warn(_("(is your Python install correct?)\n"))
311 311 except IOError as inst:
312 312 if util.safehasattr(inst, "code"):
313 313 ui.warn(_("abort: %s\n") % inst)
314 314 elif util.safehasattr(inst, "reason"):
315 315 try: # usually it is in the form (errno, strerror)
316 316 reason = inst.reason.args[1]
317 317 except (AttributeError, IndexError):
318 318 # it might be anything, for example a string
319 319 reason = inst.reason
320 320 if isinstance(reason, unicode):
321 321 # SSLError of Python 2.7.9 contains a unicode
322 322 reason = reason.encode(encoding.encoding, 'replace')
323 323 ui.warn(_("abort: error: %s\n") % reason)
324 324 elif (util.safehasattr(inst, "args")
325 325 and inst.args and inst.args[0] == errno.EPIPE):
326 326 pass
327 327 elif getattr(inst, "strerror", None):
328 328 if getattr(inst, "filename", None):
329 329 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
330 330 else:
331 331 ui.warn(_("abort: %s\n") % inst.strerror)
332 332 else:
333 333 raise
334 334 except OSError as inst:
335 335 if getattr(inst, "filename", None) is not None:
336 336 ui.warn(_("abort: %s: '%s'\n") % (inst.strerror, inst.filename))
337 337 else:
338 338 ui.warn(_("abort: %s\n") % inst.strerror)
339 339 except KeyboardInterrupt:
340 340 raise
341 341 except MemoryError:
342 342 ui.warn(_("abort: out of memory\n"))
343 343 except SystemExit as inst:
344 344 # Commands shouldn't sys.exit directly, but give a return code.
345 345 # Just in case catch this and and pass exit code to caller.
346 346 return inst.code
347 347 except socket.error as inst:
348 348 ui.warn(_("abort: %s\n") % inst.args[-1])
349 349 except: # perhaps re-raises
350 350 if not handlecommandexception(ui):
351 351 raise
352 352
353 353 return -1
354 354
355 355 def aliasargs(fn, givenargs):
356 356 args = getattr(fn, 'args', [])
357 357 if args:
358 358 cmd = ' '.join(map(util.shellquote, args))
359 359
360 360 nums = []
361 361 def replacer(m):
362 362 num = int(m.group(1)) - 1
363 363 nums.append(num)
364 364 if num < len(givenargs):
365 365 return givenargs[num]
366 366 raise error.Abort(_('too few arguments for command alias'))
367 367 cmd = re.sub(r'\$(\d+|\$)', replacer, cmd)
368 368 givenargs = [x for i, x in enumerate(givenargs)
369 369 if i not in nums]
370 370 args = shlex.split(cmd)
371 371 return args + givenargs
372 372
373 373 def aliasinterpolate(name, args, cmd):
374 374 '''interpolate args into cmd for shell aliases
375 375
376 376 This also handles $0, $@ and "$@".
377 377 '''
378 378 # util.interpolate can't deal with "$@" (with quotes) because it's only
379 379 # built to match prefix + patterns.
380 380 replacemap = dict(('$%d' % (i + 1), arg) for i, arg in enumerate(args))
381 381 replacemap['$0'] = name
382 382 replacemap['$$'] = '$'
383 383 replacemap['$@'] = ' '.join(args)
384 384 # Typical Unix shells interpolate "$@" (with quotes) as all the positional
385 385 # parameters, separated out into words. Emulate the same behavior here by
386 386 # quoting the arguments individually. POSIX shells will then typically
387 387 # tokenize each argument into exactly one word.
388 388 replacemap['"$@"'] = ' '.join(util.shellquote(arg) for arg in args)
389 389 # escape '\$' for regex
390 390 regex = '|'.join(replacemap.keys()).replace('$', r'\$')
391 391 r = re.compile(regex)
392 392 return r.sub(lambda x: replacemap[x.group()], cmd)
393 393
394 394 class cmdalias(object):
395 395 def __init__(self, name, definition, cmdtable, source):
396 396 self.name = self.cmd = name
397 397 self.cmdname = ''
398 398 self.definition = definition
399 399 self.fn = None
400 400 self.givenargs = []
401 401 self.opts = []
402 402 self.help = ''
403 403 self.badalias = None
404 404 self.unknowncmd = False
405 405 self.source = source
406 406
407 407 try:
408 408 aliases, entry = cmdutil.findcmd(self.name, cmdtable)
409 409 for alias, e in cmdtable.iteritems():
410 410 if e is entry:
411 411 self.cmd = alias
412 412 break
413 413 self.shadows = True
414 414 except error.UnknownCommand:
415 415 self.shadows = False
416 416
417 417 if not self.definition:
418 418 self.badalias = _("no definition for alias '%s'") % self.name
419 419 return
420 420
421 421 if self.definition.startswith('!'):
422 422 self.shell = True
423 423 def fn(ui, *args):
424 424 env = {'HG_ARGS': ' '.join((self.name,) + args)}
425 425 def _checkvar(m):
426 426 if m.groups()[0] == '$':
427 427 return m.group()
428 428 elif int(m.groups()[0]) <= len(args):
429 429 return m.group()
430 430 else:
431 431 ui.debug("No argument found for substitution "
432 432 "of %i variable in alias '%s' definition."
433 433 % (int(m.groups()[0]), self.name))
434 434 return ''
435 435 cmd = re.sub(r'\$(\d+|\$)', _checkvar, self.definition[1:])
436 436 cmd = aliasinterpolate(self.name, args, cmd)
437 437 return ui.system(cmd, environ=env)
438 438 self.fn = fn
439 439 return
440 440
441 441 try:
442 442 args = shlex.split(self.definition)
443 443 except ValueError as inst:
444 444 self.badalias = (_("error in definition for alias '%s': %s")
445 445 % (self.name, inst))
446 446 return
447 447 self.cmdname = cmd = args.pop(0)
448 448 self.givenargs = args
449 449
450 450 for invalidarg in ("--cwd", "-R", "--repository", "--repo", "--config"):
451 451 if _earlygetopt([invalidarg], args):
452 452 self.badalias = (_("error in definition for alias '%s': %s may "
453 453 "only be given on the command line")
454 454 % (self.name, invalidarg))
455 455 return
456 456
457 457 try:
458 458 tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1]
459 459 if len(tableentry) > 2:
460 460 self.fn, self.opts, self.help = tableentry
461 461 else:
462 462 self.fn, self.opts = tableentry
463 463
464 464 if self.help.startswith("hg " + cmd):
465 465 # drop prefix in old-style help lines so hg shows the alias
466 466 self.help = self.help[4 + len(cmd):]
467 467 self.__doc__ = self.fn.__doc__
468 468
469 469 except error.UnknownCommand:
470 470 self.badalias = (_("alias '%s' resolves to unknown command '%s'")
471 471 % (self.name, cmd))
472 472 self.unknowncmd = True
473 473 except error.AmbiguousCommand:
474 474 self.badalias = (_("alias '%s' resolves to ambiguous command '%s'")
475 475 % (self.name, cmd))
476 476
477 477 @property
478 478 def args(self):
479 479 args = map(util.expandpath, self.givenargs)
480 480 return aliasargs(self.fn, args)
481 481
482 482 def __getattr__(self, name):
483 483 adefaults = {'norepo': True, 'optionalrepo': False, 'inferrepo': False}
484 484 if name not in adefaults:
485 485 raise AttributeError(name)
486 486 if self.badalias or util.safehasattr(self, 'shell'):
487 487 return adefaults[name]
488 488 return getattr(self.fn, name)
489 489
490 490 def __call__(self, ui, *args, **opts):
491 491 if self.badalias:
492 492 hint = None
493 493 if self.unknowncmd:
494 494 try:
495 495 # check if the command is in a disabled extension
496 496 cmd, ext = extensions.disabledcmd(ui, self.cmdname)[:2]
497 497 hint = _("'%s' is provided by '%s' extension") % (cmd, ext)
498 498 except error.UnknownCommand:
499 499 pass
500 500 raise error.Abort(self.badalias, hint=hint)
501 501 if self.shadows:
502 502 ui.debug("alias '%s' shadows command '%s'\n" %
503 503 (self.name, self.cmdname))
504 504
505 505 ui.log('commandalias', "alias '%s' expands to '%s'\n",
506 506 self.name, self.definition)
507 507 if util.safehasattr(self, 'shell'):
508 508 return self.fn(ui, *args, **opts)
509 509 else:
510 510 try:
511 511 return util.checksignature(self.fn)(ui, *args, **opts)
512 512 except error.SignatureError:
513 513 args = ' '.join([self.cmdname] + self.args)
514 514 ui.debug("alias '%s' expands to '%s'\n" % (self.name, args))
515 515 raise
516 516
517 517 def addaliases(ui, cmdtable):
518 518 # aliases are processed after extensions have been loaded, so they
519 519 # may use extension commands. Aliases can also use other alias definitions,
520 520 # but only if they have been defined prior to the current definition.
521 521 for alias, definition in ui.configitems('alias'):
522 522 source = ui.configsource('alias', alias)
523 523 aliasdef = cmdalias(alias, definition, cmdtable, source)
524 524
525 525 try:
526 526 olddef = cmdtable[aliasdef.cmd][0]
527 527 if olddef.definition == aliasdef.definition:
528 528 continue
529 529 except (KeyError, AttributeError):
530 530 # definition might not exist or it might not be a cmdalias
531 531 pass
532 532
533 533 cmdtable[aliasdef.name] = (aliasdef, aliasdef.opts, aliasdef.help)
534 534
535 535 def _parse(ui, args):
536 536 options = {}
537 537 cmdoptions = {}
538 538
539 539 try:
540 540 args = fancyopts.fancyopts(args, commands.globalopts, options)
541 541 except fancyopts.getopt.GetoptError as inst:
542 542 raise error.CommandError(None, inst)
543 543
544 544 if args:
545 545 cmd, args = args[0], args[1:]
546 546 aliases, entry = cmdutil.findcmd(cmd, commands.table,
547 547 ui.configbool("ui", "strict"))
548 548 cmd = aliases[0]
549 549 args = aliasargs(entry[0], args)
550 550 defaults = ui.config("defaults", cmd)
551 551 if defaults:
552 552 args = map(util.expandpath, shlex.split(defaults)) + args
553 553 c = list(entry[1])
554 554 else:
555 555 cmd = None
556 556 c = []
557 557
558 558 # combine global options into local
559 559 for o in commands.globalopts:
560 560 c.append((o[0], o[1], options[o[1]], o[3]))
561 561
562 562 try:
563 563 args = fancyopts.fancyopts(args, c, cmdoptions, gnu=True)
564 564 except fancyopts.getopt.GetoptError as inst:
565 565 raise error.CommandError(cmd, inst)
566 566
567 567 # separate global options back out
568 568 for o in commands.globalopts:
569 569 n = o[1]
570 570 options[n] = cmdoptions[n]
571 571 del cmdoptions[n]
572 572
573 573 return (cmd, cmd and entry[0] or None, args, options, cmdoptions)
574 574
575 575 def _parseconfig(ui, config):
576 576 """parse the --config options from the command line"""
577 577 configs = []
578 578
579 579 for cfg in config:
580 580 try:
581 581 name, value = [cfgelem.strip()
582 582 for cfgelem in cfg.split('=', 1)]
583 583 section, name = name.split('.', 1)
584 584 if not section or not name:
585 585 raise IndexError
586 586 ui.setconfig(section, name, value, '--config')
587 587 configs.append((section, name, value))
588 588 except (IndexError, ValueError):
589 589 raise error.Abort(_('malformed --config option: %r '
590 590 '(use --config section.name=value)') % cfg)
591 591
592 592 return configs
593 593
594 594 def _earlygetopt(aliases, args):
595 595 """Return list of values for an option (or aliases).
596 596
597 597 The values are listed in the order they appear in args.
598 598 The options and values are removed from args.
599 599
600 600 >>> args = ['x', '--cwd', 'foo', 'y']
601 601 >>> _earlygetopt(['--cwd'], args), args
602 602 (['foo'], ['x', 'y'])
603 603
604 604 >>> args = ['x', '--cwd=bar', 'y']
605 605 >>> _earlygetopt(['--cwd'], args), args
606 606 (['bar'], ['x', 'y'])
607 607
608 608 >>> args = ['x', '-R', 'foo', 'y']
609 609 >>> _earlygetopt(['-R'], args), args
610 610 (['foo'], ['x', 'y'])
611 611
612 612 >>> args = ['x', '-Rbar', 'y']
613 613 >>> _earlygetopt(['-R'], args), args
614 614 (['bar'], ['x', 'y'])
615 615 """
616 616 try:
617 617 argcount = args.index("--")
618 618 except ValueError:
619 619 argcount = len(args)
620 620 shortopts = [opt for opt in aliases if len(opt) == 2]
621 621 values = []
622 622 pos = 0
623 623 while pos < argcount:
624 624 fullarg = arg = args[pos]
625 625 equals = arg.find('=')
626 626 if equals > -1:
627 627 arg = arg[:equals]
628 628 if arg in aliases:
629 629 del args[pos]
630 630 if equals > -1:
631 631 values.append(fullarg[equals + 1:])
632 632 argcount -= 1
633 633 else:
634 634 if pos + 1 >= argcount:
635 635 # ignore and let getopt report an error if there is no value
636 636 break
637 637 values.append(args.pop(pos))
638 638 argcount -= 2
639 639 elif arg[:2] in shortopts:
640 640 # short option can have no following space, e.g. hg log -Rfoo
641 641 values.append(args.pop(pos)[2:])
642 642 argcount -= 1
643 643 else:
644 644 pos += 1
645 645 return values
646 646
647 647 def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
648 648 # run pre-hook, and abort if it fails
649 649 hook.hook(lui, repo, "pre-%s" % cmd, True, args=" ".join(fullargs),
650 650 pats=cmdpats, opts=cmdoptions)
651 651 try:
652 652 ret = _runcommand(ui, options, cmd, d)
653 653 # run post-hook, passing command result
654 654 hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs),
655 655 result=ret, pats=cmdpats, opts=cmdoptions)
656 656 except Exception:
657 657 # run failure hook and re-raise
658 658 hook.hook(lui, repo, "fail-%s" % cmd, False, args=" ".join(fullargs),
659 659 pats=cmdpats, opts=cmdoptions)
660 660 raise
661 661 return ret
662 662
663 663 def _getlocal(ui, rpath, wd=None):
664 664 """Return (path, local ui object) for the given target path.
665 665
666 666 Takes paths in [cwd]/.hg/hgrc into account."
667 667 """
668 668 if wd is None:
669 669 try:
670 670 wd = os.getcwd()
671 671 except OSError as e:
672 672 raise error.Abort(_("error getting current working directory: %s") %
673 673 e.strerror)
674 674 path = cmdutil.findrepo(wd) or ""
675 675 if not path:
676 676 lui = ui
677 677 else:
678 678 lui = ui.copy()
679 679 lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
680 680
681 681 if rpath and rpath[-1]:
682 682 path = lui.expandpath(rpath[-1])
683 683 lui = ui.copy()
684 684 lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
685 685
686 686 return path, lui
687 687
688 688 def _checkshellalias(lui, ui, args):
689 689 """Return the function to run the shell alias, if it is required"""
690 690 options = {}
691 691
692 692 try:
693 693 args = fancyopts.fancyopts(args, commands.globalopts, options)
694 694 except fancyopts.getopt.GetoptError:
695 695 return
696 696
697 697 if not args:
698 698 return
699 699
700 700 cmdtable = commands.table
701 701
702 702 cmd = args[0]
703 703 try:
704 704 strict = ui.configbool("ui", "strict")
705 705 aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict)
706 706 except (error.AmbiguousCommand, error.UnknownCommand):
707 707 return
708 708
709 709 cmd = aliases[0]
710 710 fn = entry[0]
711 711
712 712 if cmd and util.safehasattr(fn, 'shell'):
713 713 d = lambda: fn(ui, *args[1:])
714 714 return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d,
715 715 [], {})
716 716
717 717 def _cmdattr(ui, cmd, func, attr):
718 718 try:
719 719 return getattr(func, attr)
720 720 except AttributeError:
721 721 ui.deprecwarn("missing attribute '%s', use @command decorator "
722 722 "to register '%s'" % (attr, cmd), '3.8')
723 723 return False
724 724
725 725 _loaded = set()
726 726
727 727 # list of (objname, loadermod, loadername) tuple:
728 728 # - objname is the name of an object in extension module, from which
729 729 # extra information is loaded
730 730 # - loadermod is the module where loader is placed
731 731 # - loadername is the name of the function, which takes (ui, extensionname,
732 732 # extraobj) arguments
733 733 extraloaders = [
734 734 ('cmdtable', commands, 'loadcmdtable'),
735 735 ('filesetpredicate', fileset, 'loadpredicate'),
736 736 ('revsetpredicate', revset, 'loadpredicate'),
737 737 ('templatefilter', templatefilters, 'loadfilter'),
738 738 ('templatefunc', templater, 'loadfunction'),
739 739 ('templatekeyword', templatekw, 'loadkeyword'),
740 740 ]
741 741
742 742 def _dispatch(req):
743 743 args = req.args
744 744 ui = req.ui
745 745
746 746 # check for cwd
747 747 cwd = _earlygetopt(['--cwd'], args)
748 748 if cwd:
749 749 os.chdir(cwd[-1])
750 750
751 751 rpath = _earlygetopt(["-R", "--repository", "--repo"], args)
752 752 path, lui = _getlocal(ui, rpath)
753 753
754 754 # Configure extensions in phases: uisetup, extsetup, cmdtable, and
755 755 # reposetup. Programs like TortoiseHg will call _dispatch several
756 756 # times so we keep track of configured extensions in _loaded.
757 757 extensions.loadall(lui)
758 758 exts = [ext for ext in extensions.extensions() if ext[0] not in _loaded]
759 759 # Propagate any changes to lui.__class__ by extensions
760 760 ui.__class__ = lui.__class__
761 761
762 762 # (uisetup and extsetup are handled in extensions.loadall)
763 763
764 764 for name, module in exts:
765 765 for objname, loadermod, loadername in extraloaders:
766 766 extraobj = getattr(module, objname, None)
767 767 if extraobj is not None:
768 768 getattr(loadermod, loadername)(ui, name, extraobj)
769 769 _loaded.add(name)
770 770
771 771 # (reposetup is handled in hg.repository)
772 772
773 773 # Side-effect of accessing is debugcommands module is guaranteed to be
774 774 # imported and commands.table is populated.
775 775 debugcommands.command
776 776
777 777 addaliases(lui, commands.table)
778 778
779 779 # All aliases and commands are completely defined, now.
780 780 # Check abbreviation/ambiguity of shell alias.
781 781 shellaliasfn = _checkshellalias(lui, ui, args)
782 782 if shellaliasfn:
783 783 with profiling.maybeprofile(lui):
784 784 return shellaliasfn()
785 785
786 786 # check for fallback encoding
787 787 fallback = lui.config('ui', 'fallbackencoding')
788 788 if fallback:
789 789 encoding.fallbackencoding = fallback
790 790
791 791 fullargs = args
792 792 cmd, func, args, options, cmdoptions = _parse(lui, args)
793 793
794 794 if options["config"]:
795 795 raise error.Abort(_("option --config may not be abbreviated!"))
796 796 if options["cwd"]:
797 797 raise error.Abort(_("option --cwd may not be abbreviated!"))
798 798 if options["repository"]:
799 799 raise error.Abort(_(
800 800 "option -R has to be separated from other options (e.g. not -qR) "
801 801 "and --repository may only be abbreviated as --repo!"))
802 802
803 803 if options["encoding"]:
804 804 encoding.encoding = options["encoding"]
805 805 if options["encodingmode"]:
806 806 encoding.encodingmode = options["encodingmode"]
807 807 if options["time"]:
808 808 def get_times():
809 809 t = os.times()
810 810 if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
811 811 t = (t[0], t[1], t[2], t[3], time.clock())
812 812 return t
813 813 s = get_times()
814 814 def print_time():
815 815 t = get_times()
816 816 ui.warn(_("time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
817 817 (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
818 818 atexit.register(print_time)
819 819
820 820 uis = set([ui, lui])
821 821
822 822 if req.repo:
823 823 uis.add(req.repo.ui)
824 824
825 825 if options['verbose'] or options['debug'] or options['quiet']:
826 826 for opt in ('verbose', 'debug', 'quiet'):
827 827 val = str(bool(options[opt]))
828 828 for ui_ in uis:
829 829 ui_.setconfig('ui', opt, val, '--' + opt)
830 830
831 831 if options['profile']:
832 832 for ui_ in uis:
833 833 ui_.setconfig('profiling', 'enabled', 'true', '--profile')
834 834
835 835 if options['traceback']:
836 836 for ui_ in uis:
837 837 ui_.setconfig('ui', 'traceback', 'on', '--traceback')
838 838
839 839 if options['noninteractive']:
840 840 for ui_ in uis:
841 841 ui_.setconfig('ui', 'interactive', 'off', '-y')
842 842
843 843 if cmdoptions.get('insecure', False):
844 844 for ui_ in uis:
845 845 ui_.insecureconnections = True
846 846
847 847 if options['version']:
848 848 return commands.version_(ui)
849 849 if options['help']:
850 850 return commands.help_(ui, cmd, command=cmd is not None)
851 851 elif not cmd:
852 852 return commands.help_(ui, 'shortlist')
853 853
854 854 with profiling.maybeprofile(lui):
855 855 repo = None
856 856 cmdpats = args[:]
857 857 if not _cmdattr(ui, cmd, func, 'norepo'):
858 858 # use the repo from the request only if we don't have -R
859 859 if not rpath and not cwd:
860 860 repo = req.repo
861 861
862 862 if repo:
863 863 # set the descriptors of the repo ui to those of ui
864 864 repo.ui.fin = ui.fin
865 865 repo.ui.fout = ui.fout
866 866 repo.ui.ferr = ui.ferr
867 867 else:
868 868 try:
869 869 repo = hg.repository(ui, path=path)
870 870 if not repo.local():
871 871 raise error.Abort(_("repository '%s' is not local")
872 872 % path)
873 873 repo.ui.setconfig("bundle", "mainreporoot", repo.root,
874 874 'repo')
875 875 except error.RequirementError:
876 876 raise
877 877 except error.RepoError:
878 878 if rpath and rpath[-1]: # invalid -R path
879 879 raise
880 880 if not _cmdattr(ui, cmd, func, 'optionalrepo'):
881 881 if (_cmdattr(ui, cmd, func, 'inferrepo') and
882 882 args and not path):
883 883 # try to infer -R from command args
884 884 repos = map(cmdutil.findrepo, args)
885 885 guess = repos[0]
886 886 if guess and repos.count(guess) == len(repos):
887 887 req.args = ['--repository', guess] + fullargs
888 888 return _dispatch(req)
889 889 if not path:
890 890 raise error.RepoError(_("no repository found in"
891 891 " '%s' (.hg not found)")
892 892 % os.getcwd())
893 893 raise
894 894 if repo:
895 895 ui = repo.ui
896 896 if options['hidden']:
897 897 repo = repo.unfiltered()
898 898 args.insert(0, repo)
899 899 elif rpath:
900 900 ui.warn(_("warning: --repository ignored\n"))
901 901
902 902 msg = ' '.join(' ' in a and repr(a) or a for a in fullargs)
903 903 ui.log("command", '%s\n', msg)
904 904 d = lambda: util.checksignature(func)(ui, *args, **cmdoptions)
905 905 try:
906 906 return runcommand(lui, repo, cmd, fullargs, ui, options, d,
907 907 cmdpats, cmdoptions)
908 908 finally:
909 909 if repo and repo != req.repo:
910 910 repo.close()
911 911
912 912 def _runcommand(ui, options, cmd, cmdfunc):
913 913 """Run a command function, possibly with profiling enabled."""
914 914 try:
915 915 return cmdfunc()
916 916 except error.SignatureError:
917 917 raise error.CommandError(cmd, _('invalid arguments'))
918 918
919 919 def _exceptionwarning(ui):
920 920 """Produce a warning message for the current active exception"""
921 921
922 922 # For compatibility checking, we discard the portion of the hg
923 923 # version after the + on the assumption that if a "normal
924 924 # user" is running a build with a + in it the packager
925 925 # probably built from fairly close to a tag and anyone with a
926 926 # 'make local' copy of hg (where the version number can be out
927 927 # of date) will be clueful enough to notice the implausible
928 928 # version number and try updating.
929 929 ct = util.versiontuple(n=2)
930 930 worst = None, ct, ''
931 931 if ui.config('ui', 'supportcontact', None) is None:
932 932 for name, mod in extensions.extensions():
933 933 testedwith = getattr(mod, 'testedwith', '')
934 934 report = getattr(mod, 'buglink', _('the extension author.'))
935 935 if not testedwith.strip():
936 936 # We found an untested extension. It's likely the culprit.
937 937 worst = name, 'unknown', report
938 938 break
939 939
940 940 # Never blame on extensions bundled with Mercurial.
941 941 if extensions.ismoduleinternal(mod):
942 942 continue
943 943
944 944 tested = [util.versiontuple(t, 2) for t in testedwith.split()]
945 945 if ct in tested:
946 946 continue
947 947
948 948 lower = [t for t in tested if t < ct]
949 949 nearest = max(lower or tested)
950 950 if worst[0] is None or nearest < worst[1]:
951 951 worst = name, nearest, report
952 952 if worst[0] is not None:
953 953 name, testedwith, report = worst
954 954 if not isinstance(testedwith, str):
955 955 testedwith = '.'.join([str(c) for c in testedwith])
956 956 warning = (_('** Unknown exception encountered with '
957 957 'possibly-broken third-party extension %s\n'
958 958 '** which supports versions %s of Mercurial.\n'
959 959 '** Please disable %s and try your action again.\n'
960 960 '** If that fixes the bug please report it to %s\n')
961 961 % (name, testedwith, name, report))
962 962 else:
963 963 bugtracker = ui.config('ui', 'supportcontact', None)
964 964 if bugtracker is None:
965 965 bugtracker = _("https://mercurial-scm.org/wiki/BugTracker")
966 966 warning = (_("** unknown exception encountered, "
967 967 "please report by visiting\n** ") + bugtracker + '\n')
968 968 warning += ((_("** Python %s\n") % sys.version.replace('\n', '')) +
969 969 (_("** Mercurial Distributed SCM (version %s)\n") %
970 970 util.version()) +
971 971 (_("** Extensions loaded: %s\n") %
972 972 ", ".join([x[0] for x in extensions.extensions()])))
973 973 return warning
974 974
975 975 def handlecommandexception(ui):
976 976 """Produce a warning message for broken commands
977 977
978 978 Called when handling an exception; the exception is reraised if
979 979 this function returns False, ignored otherwise.
980 980 """
981 981 warning = _exceptionwarning(ui)
982 982 ui.log("commandexception", "%s\n%s\n", warning, traceback.format_exc())
983 983 ui.warn(warning)
984 984 return False # re-raise the exception
@@ -1,92 +1,91 b''
1 1 # hgweb/wsgicgi.py - CGI->WSGI translator
2 2 #
3 3 # Copyright 2006 Eric Hopper <hopper@omnifarious.org>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 #
8 8 # This was originally copied from the public domain code at
9 9 # http://www.python.org/dev/peps/pep-0333/#the-server-gateway-side
10 10
11 11 from __future__ import absolute_import
12 12
13 13 import os
14 import sys
15 14
16 15 from .. import (
17 16 util,
18 17 )
19 18
20 19 from . import (
21 20 common,
22 21 )
23 22
24 23 def launch(application):
25 util.setbinary(sys.stdin)
26 util.setbinary(sys.stdout)
24 util.setbinary(util.stdin)
25 util.setbinary(util.stdout)
27 26
28 27 environ = dict(os.environ.iteritems())
29 28 environ.setdefault('PATH_INFO', '')
30 29 if environ.get('SERVER_SOFTWARE', '').startswith('Microsoft-IIS'):
31 30 # IIS includes script_name in PATH_INFO
32 31 scriptname = environ['SCRIPT_NAME']
33 32 if environ['PATH_INFO'].startswith(scriptname):
34 33 environ['PATH_INFO'] = environ['PATH_INFO'][len(scriptname):]
35 34
36 stdin = sys.stdin
35 stdin = util.stdin
37 36 if environ.get('HTTP_EXPECT', '').lower() == '100-continue':
38 stdin = common.continuereader(stdin, sys.stdout.write)
37 stdin = common.continuereader(stdin, util.stdout.write)
39 38
40 39 environ['wsgi.input'] = stdin
41 environ['wsgi.errors'] = sys.stderr
40 environ['wsgi.errors'] = util.stderr
42 41 environ['wsgi.version'] = (1, 0)
43 42 environ['wsgi.multithread'] = False
44 43 environ['wsgi.multiprocess'] = True
45 44 environ['wsgi.run_once'] = True
46 45
47 46 if environ.get('HTTPS', 'off').lower() in ('on', '1', 'yes'):
48 47 environ['wsgi.url_scheme'] = 'https'
49 48 else:
50 49 environ['wsgi.url_scheme'] = 'http'
51 50
52 51 headers_set = []
53 52 headers_sent = []
54 out = sys.stdout
53 out = util.stdout
55 54
56 55 def write(data):
57 56 if not headers_set:
58 57 raise AssertionError("write() before start_response()")
59 58
60 59 elif not headers_sent:
61 60 # Before the first output, send the stored headers
62 61 status, response_headers = headers_sent[:] = headers_set
63 62 out.write('Status: %s\r\n' % status)
64 63 for header in response_headers:
65 64 out.write('%s: %s\r\n' % header)
66 65 out.write('\r\n')
67 66
68 67 out.write(data)
69 68 out.flush()
70 69
71 70 def start_response(status, response_headers, exc_info=None):
72 71 if exc_info:
73 72 try:
74 73 if headers_sent:
75 74 # Re-raise original exception if headers sent
76 75 raise exc_info[0](exc_info[1], exc_info[2])
77 76 finally:
78 77 exc_info = None # avoid dangling circular ref
79 78 elif headers_set:
80 79 raise AssertionError("Headers already set!")
81 80
82 81 headers_set[:] = [status, response_headers]
83 82 return write
84 83
85 84 content = application(environ, start_response)
86 85 try:
87 86 for chunk in content:
88 87 write(chunk)
89 88 if not headers_sent:
90 89 write('') # send headers now if body was empty
91 90 finally:
92 91 getattr(content, 'close', lambda : None)()
@@ -1,265 +1,265 b''
1 1 # hook.py - hook support for mercurial
2 2 #
3 3 # Copyright 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import os
11 11 import sys
12 12 import time
13 13
14 14 from .i18n import _
15 15 from . import (
16 16 demandimport,
17 17 error,
18 18 extensions,
19 19 util,
20 20 )
21 21
22 22 def _pythonhook(ui, repo, name, hname, funcname, args, throw):
23 23 '''call python hook. hook is callable object, looked up as
24 24 name in python module. if callable returns "true", hook
25 25 fails, else passes. if hook raises exception, treated as
26 26 hook failure. exception propagates if throw is "true".
27 27
28 28 reason for "true" meaning "hook failed" is so that
29 29 unmodified commands (e.g. mercurial.commands.update) can
30 30 be run as hooks without wrappers to convert return values.'''
31 31
32 32 if callable(funcname):
33 33 obj = funcname
34 34 funcname = obj.__module__ + "." + obj.__name__
35 35 else:
36 36 d = funcname.rfind('.')
37 37 if d == -1:
38 38 raise error.HookLoadError(
39 39 _('%s hook is invalid: "%s" not in a module')
40 40 % (hname, funcname))
41 41 modname = funcname[:d]
42 42 oldpaths = sys.path
43 43 if util.mainfrozen():
44 44 # binary installs require sys.path manipulation
45 45 modpath, modfile = os.path.split(modname)
46 46 if modpath and modfile:
47 47 sys.path = sys.path[:] + [modpath]
48 48 modname = modfile
49 49 with demandimport.deactivated():
50 50 try:
51 51 obj = __import__(modname)
52 52 except (ImportError, SyntaxError):
53 53 e1 = sys.exc_info()
54 54 try:
55 55 # extensions are loaded with hgext_ prefix
56 56 obj = __import__("hgext_%s" % modname)
57 57 except (ImportError, SyntaxError):
58 58 e2 = sys.exc_info()
59 59 if ui.tracebackflag:
60 60 ui.warn(_('exception from first failed import '
61 61 'attempt:\n'))
62 62 ui.traceback(e1)
63 63 if ui.tracebackflag:
64 64 ui.warn(_('exception from second failed import '
65 65 'attempt:\n'))
66 66 ui.traceback(e2)
67 67
68 68 if not ui.tracebackflag:
69 69 tracebackhint = _(
70 70 'run with --traceback for stack trace')
71 71 else:
72 72 tracebackhint = None
73 73 raise error.HookLoadError(
74 74 _('%s hook is invalid: import of "%s" failed') %
75 75 (hname, modname), hint=tracebackhint)
76 76 sys.path = oldpaths
77 77 try:
78 78 for p in funcname.split('.')[1:]:
79 79 obj = getattr(obj, p)
80 80 except AttributeError:
81 81 raise error.HookLoadError(
82 82 _('%s hook is invalid: "%s" is not defined')
83 83 % (hname, funcname))
84 84 if not callable(obj):
85 85 raise error.HookLoadError(
86 86 _('%s hook is invalid: "%s" is not callable')
87 87 % (hname, funcname))
88 88
89 89 ui.note(_("calling hook %s: %s\n") % (hname, funcname))
90 90 starttime = time.time()
91 91
92 92 try:
93 93 r = obj(ui=ui, repo=repo, hooktype=name, **args)
94 94 except Exception as exc:
95 95 if isinstance(exc, error.Abort):
96 96 ui.warn(_('error: %s hook failed: %s\n') %
97 97 (hname, exc.args[0]))
98 98 else:
99 99 ui.warn(_('error: %s hook raised an exception: '
100 100 '%s\n') % (hname, exc))
101 101 if throw:
102 102 raise
103 103 if not ui.tracebackflag:
104 104 ui.warn(_('(run with --traceback for stack trace)\n'))
105 105 ui.traceback()
106 106 return True, True
107 107 finally:
108 108 duration = time.time() - starttime
109 109 ui.log('pythonhook', 'pythonhook-%s: %s finished in %0.2f seconds\n',
110 110 name, funcname, duration)
111 111 if r:
112 112 if throw:
113 113 raise error.HookAbort(_('%s hook failed') % hname)
114 114 ui.warn(_('warning: %s hook failed\n') % hname)
115 115 return r, False
116 116
117 117 def _exthook(ui, repo, name, cmd, args, throw):
118 118 ui.note(_("running hook %s: %s\n") % (name, cmd))
119 119
120 120 starttime = time.time()
121 121 env = {}
122 122
123 123 # make in-memory changes visible to external process
124 124 if repo is not None:
125 125 tr = repo.currenttransaction()
126 126 repo.dirstate.write(tr)
127 127 if tr and tr.writepending():
128 128 env['HG_PENDING'] = repo.root
129 129
130 130 for k, v in args.iteritems():
131 131 if callable(v):
132 132 v = v()
133 133 if isinstance(v, dict):
134 134 # make the dictionary element order stable across Python
135 135 # implementations
136 136 v = ('{' +
137 137 ', '.join('%r: %r' % i for i in sorted(v.iteritems())) +
138 138 '}')
139 139 env['HG_' + k.upper()] = v
140 140
141 141 if repo:
142 142 cwd = repo.root
143 143 else:
144 144 cwd = os.getcwd()
145 145 r = ui.system(cmd, environ=env, cwd=cwd)
146 146
147 147 duration = time.time() - starttime
148 148 ui.log('exthook', 'exthook-%s: %s finished in %0.2f seconds\n',
149 149 name, cmd, duration)
150 150 if r:
151 151 desc, r = util.explainexit(r)
152 152 if throw:
153 153 raise error.HookAbort(_('%s hook %s') % (name, desc))
154 154 ui.warn(_('warning: %s hook %s\n') % (name, desc))
155 155 return r
156 156
157 157 # represent an untrusted hook command
158 158 _fromuntrusted = object()
159 159
160 160 def _allhooks(ui):
161 161 """return a list of (hook-id, cmd) pairs sorted by priority"""
162 162 hooks = _hookitems(ui)
163 163 # Be careful in this section, propagating the real commands from untrusted
164 164 # sources would create a security vulnerability, make sure anything altered
165 165 # in that section uses "_fromuntrusted" as its command.
166 166 untrustedhooks = _hookitems(ui, _untrusted=True)
167 167 for name, value in untrustedhooks.items():
168 168 trustedvalue = hooks.get(name, (None, None, name, _fromuntrusted))
169 169 if value != trustedvalue:
170 170 (lp, lo, lk, lv) = trustedvalue
171 171 hooks[name] = (lp, lo, lk, _fromuntrusted)
172 172 # (end of the security sensitive section)
173 173 return [(k, v) for p, o, k, v in sorted(hooks.values())]
174 174
175 175 def _hookitems(ui, _untrusted=False):
176 176 """return all hooks items ready to be sorted"""
177 177 hooks = {}
178 178 for name, cmd in ui.configitems('hooks', untrusted=_untrusted):
179 179 if not name.startswith('priority'):
180 180 priority = ui.configint('hooks', 'priority.%s' % name, 0)
181 181 hooks[name] = (-priority, len(hooks), name, cmd)
182 182 return hooks
183 183
184 184 _redirect = False
185 185 def redirect(state):
186 186 global _redirect
187 187 _redirect = state
188 188
189 189 def hook(ui, repo, name, throw=False, **args):
190 190 if not ui.callhooks:
191 191 return False
192 192
193 193 hooks = []
194 194 for hname, cmd in _allhooks(ui):
195 195 if hname.split('.')[0] == name and cmd:
196 196 hooks.append((hname, cmd))
197 197
198 198 res = runhooks(ui, repo, name, hooks, throw=throw, **args)
199 199 r = False
200 200 for hname, cmd in hooks:
201 201 r = res[hname][0] or r
202 202 return r
203 203
204 204 def runhooks(ui, repo, name, hooks, throw=False, **args):
205 205 res = {}
206 206 oldstdout = -1
207 207
208 208 try:
209 209 for hname, cmd in hooks:
210 210 if oldstdout == -1 and _redirect:
211 211 try:
212 stdoutno = sys.stdout.fileno()
213 stderrno = sys.stderr.fileno()
212 stdoutno = util.stdout.fileno()
213 stderrno = util.stderr.fileno()
214 214 # temporarily redirect stdout to stderr, if possible
215 215 if stdoutno >= 0 and stderrno >= 0:
216 sys.stdout.flush()
216 util.stdout.flush()
217 217 oldstdout = os.dup(stdoutno)
218 218 os.dup2(stderrno, stdoutno)
219 219 except (OSError, AttributeError):
220 220 # files seem to be bogus, give up on redirecting (WSGI, etc)
221 221 pass
222 222
223 223 if cmd is _fromuntrusted:
224 224 if throw:
225 225 raise error.HookAbort(
226 226 _('untrusted hook %s not executed') % name,
227 227 hint = _("see 'hg help config.trusted'"))
228 228 ui.warn(_('warning: untrusted hook %s not executed\n') % name)
229 229 r = 1
230 230 raised = False
231 231 elif callable(cmd):
232 232 r, raised = _pythonhook(ui, repo, name, hname, cmd, args, throw)
233 233 elif cmd.startswith('python:'):
234 234 if cmd.count(':') >= 2:
235 235 path, cmd = cmd[7:].rsplit(':', 1)
236 236 path = util.expandpath(path)
237 237 if repo:
238 238 path = os.path.join(repo.root, path)
239 239 try:
240 240 mod = extensions.loadpath(path, 'hghook.%s' % hname)
241 241 except Exception:
242 242 ui.write(_("loading %s hook failed:\n") % hname)
243 243 raise
244 244 hookfn = getattr(mod, cmd)
245 245 else:
246 246 hookfn = cmd[7:].strip()
247 247 r, raised = _pythonhook(ui, repo, name, hname, hookfn, args,
248 248 throw)
249 249 else:
250 250 r = _exthook(ui, repo, hname, cmd, args, throw)
251 251 raised = False
252 252
253 253 res[hname] = r, raised
254 254
255 255 # The stderr is fully buffered on Windows when connected to a pipe.
256 256 # A forcible flush is required to make small stderr data in the
257 257 # remote side available to the client immediately.
258 sys.stderr.flush()
258 util.stderr.flush()
259 259 finally:
260 260 if _redirect and oldstdout >= 0:
261 sys.stdout.flush() # write hook output to stderr fd
261 util.stdout.flush() # write hook output to stderr fd
262 262 os.dup2(oldstdout, stdoutno)
263 263 os.close(oldstdout)
264 264
265 265 return res
@@ -1,758 +1,758 b''
1 1 # This library is free software; you can redistribute it and/or
2 2 # modify it under the terms of the GNU Lesser General Public
3 3 # License as published by the Free Software Foundation; either
4 4 # version 2.1 of the License, or (at your option) any later version.
5 5 #
6 6 # This library is distributed in the hope that it will be useful,
7 7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 9 # Lesser General Public License for more details.
10 10 #
11 11 # You should have received a copy of the GNU Lesser General Public
12 12 # License along with this library; if not, see
13 13 # <http://www.gnu.org/licenses/>.
14 14
15 15 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
16 16 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
17 17
18 18 # Modified by Benoit Boissinot:
19 19 # - fix for digest auth (inspired from urllib2.py @ Python v2.4)
20 20 # Modified by Dirkjan Ochtman:
21 21 # - import md5 function from a local util module
22 22 # Modified by Augie Fackler:
23 23 # - add safesend method and use it to prevent broken pipe errors
24 24 # on large POST requests
25 25
26 26 """An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
27 27
28 28 >>> import urllib2
29 29 >>> from keepalive import HTTPHandler
30 30 >>> keepalive_handler = HTTPHandler()
31 31 >>> opener = urlreq.buildopener(keepalive_handler)
32 32 >>> urlreq.installopener(opener)
33 33 >>>
34 34 >>> fo = urlreq.urlopen('http://www.python.org')
35 35
36 36 If a connection to a given host is requested, and all of the existing
37 37 connections are still in use, another connection will be opened. If
38 38 the handler tries to use an existing connection but it fails in some
39 39 way, it will be closed and removed from the pool.
40 40
41 41 To remove the handler, simply re-run build_opener with no arguments, and
42 42 install that opener.
43 43
44 44 You can explicitly close connections by using the close_connection()
45 45 method of the returned file-like object (described below) or you can
46 46 use the handler methods:
47 47
48 48 close_connection(host)
49 49 close_all()
50 50 open_connections()
51 51
52 52 NOTE: using the close_connection and close_all methods of the handler
53 53 should be done with care when using multiple threads.
54 54 * there is nothing that prevents another thread from creating new
55 55 connections immediately after connections are closed
56 56 * no checks are done to prevent in-use connections from being closed
57 57
58 58 >>> keepalive_handler.close_all()
59 59
60 60 EXTRA ATTRIBUTES AND METHODS
61 61
62 62 Upon a status of 200, the object returned has a few additional
63 63 attributes and methods, which should not be used if you want to
64 64 remain consistent with the normal urllib2-returned objects:
65 65
66 66 close_connection() - close the connection to the host
67 67 readlines() - you know, readlines()
68 68 status - the return status (i.e. 404)
69 69 reason - english translation of status (i.e. 'File not found')
70 70
71 71 If you want the best of both worlds, use this inside an
72 72 AttributeError-catching try:
73 73
74 74 >>> try: status = fo.status
75 75 >>> except AttributeError: status = None
76 76
77 77 Unfortunately, these are ONLY there if status == 200, so it's not
78 78 easy to distinguish between non-200 responses. The reason is that
79 79 urllib2 tries to do clever things with error codes 301, 302, 401,
80 80 and 407, and it wraps the object upon return.
81 81
82 82 For python versions earlier than 2.4, you can avoid this fancy error
83 83 handling by setting the module-level global HANDLE_ERRORS to zero.
84 84 You see, prior to 2.4, it's the HTTP Handler's job to determine what
85 85 to handle specially, and what to just pass up. HANDLE_ERRORS == 0
86 86 means "pass everything up". In python 2.4, however, this job no
87 87 longer belongs to the HTTP Handler and is now done by a NEW handler,
88 88 HTTPErrorProcessor. Here's the bottom line:
89 89
90 90 python version < 2.4
91 91 HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
92 92 errors
93 93 HANDLE_ERRORS == 0 pass everything up, error processing is
94 94 left to the calling code
95 95 python version >= 2.4
96 96 HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
97 97 HANDLE_ERRORS == 0 (default) pass everything up, let the
98 98 other handlers (specifically,
99 99 HTTPErrorProcessor) decide what to do
100 100
101 101 In practice, setting the variable either way makes little difference
102 102 in python 2.4, so for the most consistent behavior across versions,
103 103 you probably just want to use the defaults, which will give you
104 104 exceptions on errors.
105 105
106 106 """
107 107
108 108 # $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
109 109
110 110 from __future__ import absolute_import, print_function
111 111
112 112 import errno
113 113 import hashlib
114 114 import socket
115 115 import sys
116 116 import threading
117 117
118 118 from . import (
119 119 util,
120 120 )
121 121
122 122 httplib = util.httplib
123 123 urlerr = util.urlerr
124 124 urlreq = util.urlreq
125 125
126 126 DEBUG = None
127 127
128 128 if sys.version_info < (2, 4):
129 129 HANDLE_ERRORS = 1
130 130 else: HANDLE_ERRORS = 0
131 131
132 132 class ConnectionManager(object):
133 133 """
134 134 The connection manager must be able to:
135 135 * keep track of all existing
136 136 """
137 137 def __init__(self):
138 138 self._lock = threading.Lock()
139 139 self._hostmap = {} # map hosts to a list of connections
140 140 self._connmap = {} # map connections to host
141 141 self._readymap = {} # map connection to ready state
142 142
143 143 def add(self, host, connection, ready):
144 144 self._lock.acquire()
145 145 try:
146 146 if host not in self._hostmap:
147 147 self._hostmap[host] = []
148 148 self._hostmap[host].append(connection)
149 149 self._connmap[connection] = host
150 150 self._readymap[connection] = ready
151 151 finally:
152 152 self._lock.release()
153 153
154 154 def remove(self, connection):
155 155 self._lock.acquire()
156 156 try:
157 157 try:
158 158 host = self._connmap[connection]
159 159 except KeyError:
160 160 pass
161 161 else:
162 162 del self._connmap[connection]
163 163 del self._readymap[connection]
164 164 self._hostmap[host].remove(connection)
165 165 if not self._hostmap[host]: del self._hostmap[host]
166 166 finally:
167 167 self._lock.release()
168 168
169 169 def set_ready(self, connection, ready):
170 170 try:
171 171 self._readymap[connection] = ready
172 172 except KeyError:
173 173 pass
174 174
175 175 def get_ready_conn(self, host):
176 176 conn = None
177 177 self._lock.acquire()
178 178 try:
179 179 if host in self._hostmap:
180 180 for c in self._hostmap[host]:
181 181 if self._readymap[c]:
182 182 self._readymap[c] = 0
183 183 conn = c
184 184 break
185 185 finally:
186 186 self._lock.release()
187 187 return conn
188 188
189 189 def get_all(self, host=None):
190 190 if host:
191 191 return list(self._hostmap.get(host, []))
192 192 else:
193 193 return dict(self._hostmap)
194 194
195 195 class KeepAliveHandler(object):
196 196 def __init__(self):
197 197 self._cm = ConnectionManager()
198 198
199 199 #### Connection Management
200 200 def open_connections(self):
201 201 """return a list of connected hosts and the number of connections
202 202 to each. [('foo.com:80', 2), ('bar.org', 1)]"""
203 203 return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
204 204
205 205 def close_connection(self, host):
206 206 """close connection(s) to <host>
207 207 host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
208 208 no error occurs if there is no connection to that host."""
209 209 for h in self._cm.get_all(host):
210 210 self._cm.remove(h)
211 211 h.close()
212 212
213 213 def close_all(self):
214 214 """close all open connections"""
215 215 for host, conns in self._cm.get_all().iteritems():
216 216 for h in conns:
217 217 self._cm.remove(h)
218 218 h.close()
219 219
220 220 def _request_closed(self, request, host, connection):
221 221 """tells us that this request is now closed and that the
222 222 connection is ready for another request"""
223 223 self._cm.set_ready(connection, 1)
224 224
225 225 def _remove_connection(self, host, connection, close=0):
226 226 if close:
227 227 connection.close()
228 228 self._cm.remove(connection)
229 229
230 230 #### Transaction Execution
231 231 def http_open(self, req):
232 232 return self.do_open(HTTPConnection, req)
233 233
234 234 def do_open(self, http_class, req):
235 235 host = req.get_host()
236 236 if not host:
237 237 raise urlerr.urlerror('no host given')
238 238
239 239 try:
240 240 h = self._cm.get_ready_conn(host)
241 241 while h:
242 242 r = self._reuse_connection(h, req, host)
243 243
244 244 # if this response is non-None, then it worked and we're
245 245 # done. Break out, skipping the else block.
246 246 if r:
247 247 break
248 248
249 249 # connection is bad - possibly closed by server
250 250 # discard it and ask for the next free connection
251 251 h.close()
252 252 self._cm.remove(h)
253 253 h = self._cm.get_ready_conn(host)
254 254 else:
255 255 # no (working) free connections were found. Create a new one.
256 256 h = http_class(host)
257 257 if DEBUG:
258 258 DEBUG.info("creating new connection to %s (%d)",
259 259 host, id(h))
260 260 self._cm.add(host, h, 0)
261 261 self._start_transaction(h, req)
262 262 r = h.getresponse()
263 263 except (socket.error, httplib.HTTPException) as err:
264 264 raise urlerr.urlerror(err)
265 265
266 266 # if not a persistent connection, don't try to reuse it
267 267 if r.will_close:
268 268 self._cm.remove(h)
269 269
270 270 if DEBUG:
271 271 DEBUG.info("STATUS: %s, %s", r.status, r.reason)
272 272 r._handler = self
273 273 r._host = host
274 274 r._url = req.get_full_url()
275 275 r._connection = h
276 276 r.code = r.status
277 277 r.headers = r.msg
278 278 r.msg = r.reason
279 279
280 280 if r.status == 200 or not HANDLE_ERRORS:
281 281 return r
282 282 else:
283 283 return self.parent.error('http', req, r,
284 284 r.status, r.msg, r.headers)
285 285
286 286 def _reuse_connection(self, h, req, host):
287 287 """start the transaction with a re-used connection
288 288 return a response object (r) upon success or None on failure.
289 289 This DOES not close or remove bad connections in cases where
290 290 it returns. However, if an unexpected exception occurs, it
291 291 will close and remove the connection before re-raising.
292 292 """
293 293 try:
294 294 self._start_transaction(h, req)
295 295 r = h.getresponse()
296 296 # note: just because we got something back doesn't mean it
297 297 # worked. We'll check the version below, too.
298 298 except (socket.error, httplib.HTTPException):
299 299 r = None
300 300 except: # re-raises
301 301 # adding this block just in case we've missed
302 302 # something we will still raise the exception, but
303 303 # lets try and close the connection and remove it
304 304 # first. We previously got into a nasty loop
305 305 # where an exception was uncaught, and so the
306 306 # connection stayed open. On the next try, the
307 307 # same exception was raised, etc. The trade-off is
308 308 # that it's now possible this call will raise
309 309 # a DIFFERENT exception
310 310 if DEBUG:
311 311 DEBUG.error("unexpected exception - closing "
312 312 "connection to %s (%d)", host, id(h))
313 313 self._cm.remove(h)
314 314 h.close()
315 315 raise
316 316
317 317 if r is None or r.version == 9:
318 318 # httplib falls back to assuming HTTP 0.9 if it gets a
319 319 # bad header back. This is most likely to happen if
320 320 # the socket has been closed by the server since we
321 321 # last used the connection.
322 322 if DEBUG:
323 323 DEBUG.info("failed to re-use connection to %s (%d)",
324 324 host, id(h))
325 325 r = None
326 326 else:
327 327 if DEBUG:
328 328 DEBUG.info("re-using connection to %s (%d)", host, id(h))
329 329
330 330 return r
331 331
332 332 def _start_transaction(self, h, req):
333 333 # What follows mostly reimplements HTTPConnection.request()
334 334 # except it adds self.parent.addheaders in the mix.
335 335 headers = dict(self.parent.addheaders)
336 336 headers.update(req.headers)
337 337 headers.update(req.unredirected_hdrs)
338 338 headers = dict((n.lower(), v) for n, v in headers.items())
339 339 skipheaders = {}
340 340 for n in ('host', 'accept-encoding'):
341 341 if n in headers:
342 342 skipheaders['skip_' + n.replace('-', '_')] = 1
343 343 try:
344 344 if req.has_data():
345 345 data = req.get_data()
346 346 h.putrequest('POST', req.get_selector(), **skipheaders)
347 347 if 'content-type' not in headers:
348 348 h.putheader('Content-type',
349 349 'application/x-www-form-urlencoded')
350 350 if 'content-length' not in headers:
351 351 h.putheader('Content-length', '%d' % len(data))
352 352 else:
353 353 h.putrequest('GET', req.get_selector(), **skipheaders)
354 354 except socket.error as err:
355 355 raise urlerr.urlerror(err)
356 356 for k, v in headers.items():
357 357 h.putheader(k, v)
358 358 h.endheaders()
359 359 if req.has_data():
360 360 h.send(data)
361 361
362 362 class HTTPHandler(KeepAliveHandler, urlreq.httphandler):
363 363 pass
364 364
365 365 class HTTPResponse(httplib.HTTPResponse):
366 366 # we need to subclass HTTPResponse in order to
367 367 # 1) add readline() and readlines() methods
368 368 # 2) add close_connection() methods
369 369 # 3) add info() and geturl() methods
370 370
371 371 # in order to add readline(), read must be modified to deal with a
372 372 # buffer. example: readline must read a buffer and then spit back
373 373 # one line at a time. The only real alternative is to read one
374 374 # BYTE at a time (ick). Once something has been read, it can't be
375 375 # put back (ok, maybe it can, but that's even uglier than this),
376 376 # so if you THEN do a normal read, you must first take stuff from
377 377 # the buffer.
378 378
379 379 # the read method wraps the original to accommodate buffering,
380 380 # although read() never adds to the buffer.
381 381 # Both readline and readlines have been stolen with almost no
382 382 # modification from socket.py
383 383
384 384
385 385 def __init__(self, sock, debuglevel=0, strict=0, method=None):
386 386 httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
387 387 self.fileno = sock.fileno
388 388 self.code = None
389 389 self._rbuf = ''
390 390 self._rbufsize = 8096
391 391 self._handler = None # inserted by the handler later
392 392 self._host = None # (same)
393 393 self._url = None # (same)
394 394 self._connection = None # (same)
395 395
396 396 _raw_read = httplib.HTTPResponse.read
397 397
398 398 def close(self):
399 399 if self.fp:
400 400 self.fp.close()
401 401 self.fp = None
402 402 if self._handler:
403 403 self._handler._request_closed(self, self._host,
404 404 self._connection)
405 405
406 406 def close_connection(self):
407 407 self._handler._remove_connection(self._host, self._connection, close=1)
408 408 self.close()
409 409
410 410 def info(self):
411 411 return self.headers
412 412
413 413 def geturl(self):
414 414 return self._url
415 415
416 416 def read(self, amt=None):
417 417 # the _rbuf test is only in this first if for speed. It's not
418 418 # logically necessary
419 419 if self._rbuf and not amt is None:
420 420 L = len(self._rbuf)
421 421 if amt > L:
422 422 amt -= L
423 423 else:
424 424 s = self._rbuf[:amt]
425 425 self._rbuf = self._rbuf[amt:]
426 426 return s
427 427
428 428 s = self._rbuf + self._raw_read(amt)
429 429 self._rbuf = ''
430 430 return s
431 431
432 432 # stolen from Python SVN #68532 to fix issue1088
433 433 def _read_chunked(self, amt):
434 434 chunk_left = self.chunk_left
435 435 value = ''
436 436
437 437 # XXX This accumulates chunks by repeated string concatenation,
438 438 # which is not efficient as the number or size of chunks gets big.
439 439 while True:
440 440 if chunk_left is None:
441 441 line = self.fp.readline()
442 442 i = line.find(';')
443 443 if i >= 0:
444 444 line = line[:i] # strip chunk-extensions
445 445 try:
446 446 chunk_left = int(line, 16)
447 447 except ValueError:
448 448 # close the connection as protocol synchronization is
449 449 # probably lost
450 450 self.close()
451 451 raise httplib.IncompleteRead(value)
452 452 if chunk_left == 0:
453 453 break
454 454 if amt is None:
455 455 value += self._safe_read(chunk_left)
456 456 elif amt < chunk_left:
457 457 value += self._safe_read(amt)
458 458 self.chunk_left = chunk_left - amt
459 459 return value
460 460 elif amt == chunk_left:
461 461 value += self._safe_read(amt)
462 462 self._safe_read(2) # toss the CRLF at the end of the chunk
463 463 self.chunk_left = None
464 464 return value
465 465 else:
466 466 value += self._safe_read(chunk_left)
467 467 amt -= chunk_left
468 468
469 469 # we read the whole chunk, get another
470 470 self._safe_read(2) # toss the CRLF at the end of the chunk
471 471 chunk_left = None
472 472
473 473 # read and discard trailer up to the CRLF terminator
474 474 ### note: we shouldn't have any trailers!
475 475 while True:
476 476 line = self.fp.readline()
477 477 if not line:
478 478 # a vanishingly small number of sites EOF without
479 479 # sending the trailer
480 480 break
481 481 if line == '\r\n':
482 482 break
483 483
484 484 # we read everything; close the "file"
485 485 self.close()
486 486
487 487 return value
488 488
489 489 def readline(self, limit=-1):
490 490 i = self._rbuf.find('\n')
491 491 while i < 0 and not (0 < limit <= len(self._rbuf)):
492 492 new = self._raw_read(self._rbufsize)
493 493 if not new:
494 494 break
495 495 i = new.find('\n')
496 496 if i >= 0:
497 497 i = i + len(self._rbuf)
498 498 self._rbuf = self._rbuf + new
499 499 if i < 0:
500 500 i = len(self._rbuf)
501 501 else:
502 502 i = i + 1
503 503 if 0 <= limit < len(self._rbuf):
504 504 i = limit
505 505 data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
506 506 return data
507 507
508 508 def readlines(self, sizehint=0):
509 509 total = 0
510 510 list = []
511 511 while True:
512 512 line = self.readline()
513 513 if not line:
514 514 break
515 515 list.append(line)
516 516 total += len(line)
517 517 if sizehint and total >= sizehint:
518 518 break
519 519 return list
520 520
521 521 def safesend(self, str):
522 522 """Send `str' to the server.
523 523
524 524 Shamelessly ripped off from httplib to patch a bad behavior.
525 525 """
526 526 # _broken_pipe_resp is an attribute we set in this function
527 527 # if the socket is closed while we're sending data but
528 528 # the server sent us a response before hanging up.
529 529 # In that case, we want to pretend to send the rest of the
530 530 # outgoing data, and then let the user use getresponse()
531 531 # (which we wrap) to get this last response before
532 532 # opening a new socket.
533 533 if getattr(self, '_broken_pipe_resp', None) is not None:
534 534 return
535 535
536 536 if self.sock is None:
537 537 if self.auto_open:
538 538 self.connect()
539 539 else:
540 540 raise httplib.NotConnected
541 541
542 542 # send the data to the server. if we get a broken pipe, then close
543 543 # the socket. we want to reconnect when somebody tries to send again.
544 544 #
545 545 # NOTE: we DO propagate the error, though, because we cannot simply
546 546 # ignore the error... the caller will know if they can retry.
547 547 if self.debuglevel > 0:
548 548 print("send:", repr(str))
549 549 try:
550 550 blocksize = 8192
551 551 read = getattr(str, 'read', None)
552 552 if read is not None:
553 553 if self.debuglevel > 0:
554 554 print("sending a read()able")
555 555 data = read(blocksize)
556 556 while data:
557 557 self.sock.sendall(data)
558 558 data = read(blocksize)
559 559 else:
560 560 self.sock.sendall(str)
561 561 except socket.error as v:
562 562 reraise = True
563 563 if v[0] == errno.EPIPE: # Broken pipe
564 564 if self._HTTPConnection__state == httplib._CS_REQ_SENT:
565 565 self._broken_pipe_resp = None
566 566 self._broken_pipe_resp = self.getresponse()
567 567 reraise = False
568 568 self.close()
569 569 if reraise:
570 570 raise
571 571
572 572 def wrapgetresponse(cls):
573 573 """Wraps getresponse in cls with a broken-pipe sane version.
574 574 """
575 575 def safegetresponse(self):
576 576 # In safesend() we might set the _broken_pipe_resp
577 577 # attribute, in which case the socket has already
578 578 # been closed and we just need to give them the response
579 579 # back. Otherwise, we use the normal response path.
580 580 r = getattr(self, '_broken_pipe_resp', None)
581 581 if r is not None:
582 582 return r
583 583 return cls.getresponse(self)
584 584 safegetresponse.__doc__ = cls.getresponse.__doc__
585 585 return safegetresponse
586 586
587 587 class HTTPConnection(httplib.HTTPConnection):
588 588 # use the modified response class
589 589 response_class = HTTPResponse
590 590 send = safesend
591 591 getresponse = wrapgetresponse(httplib.HTTPConnection)
592 592
593 593
594 594 #########################################################################
595 595 ##### TEST FUNCTIONS
596 596 #########################################################################
597 597
598 598 def error_handler(url):
599 599 global HANDLE_ERRORS
600 600 orig = HANDLE_ERRORS
601 601 keepalive_handler = HTTPHandler()
602 602 opener = urlreq.buildopener(keepalive_handler)
603 603 urlreq.installopener(opener)
604 604 pos = {0: 'off', 1: 'on'}
605 605 for i in (0, 1):
606 606 print(" fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i))
607 607 HANDLE_ERRORS = i
608 608 try:
609 609 fo = urlreq.urlopen(url)
610 610 fo.read()
611 611 fo.close()
612 612 try:
613 613 status, reason = fo.status, fo.reason
614 614 except AttributeError:
615 615 status, reason = None, None
616 616 except IOError as e:
617 617 print(" EXCEPTION: %s" % e)
618 618 raise
619 619 else:
620 620 print(" status = %s, reason = %s" % (status, reason))
621 621 HANDLE_ERRORS = orig
622 622 hosts = keepalive_handler.open_connections()
623 623 print("open connections:", hosts)
624 624 keepalive_handler.close_all()
625 625
626 626 def continuity(url):
627 627 md5 = hashlib.md5
628 628 format = '%25s: %s'
629 629
630 630 # first fetch the file with the normal http handler
631 631 opener = urlreq.buildopener()
632 632 urlreq.installopener(opener)
633 633 fo = urlreq.urlopen(url)
634 634 foo = fo.read()
635 635 fo.close()
636 636 m = md5(foo)
637 637 print(format % ('normal urllib', m.hexdigest()))
638 638
639 639 # now install the keepalive handler and try again
640 640 opener = urlreq.buildopener(HTTPHandler())
641 641 urlreq.installopener(opener)
642 642
643 643 fo = urlreq.urlopen(url)
644 644 foo = fo.read()
645 645 fo.close()
646 646 m = md5(foo)
647 647 print(format % ('keepalive read', m.hexdigest()))
648 648
649 649 fo = urlreq.urlopen(url)
650 650 foo = ''
651 651 while True:
652 652 f = fo.readline()
653 653 if f:
654 654 foo = foo + f
655 655 else: break
656 656 fo.close()
657 657 m = md5(foo)
658 658 print(format % ('keepalive readline', m.hexdigest()))
659 659
660 660 def comp(N, url):
661 661 print(' making %i connections to:\n %s' % (N, url))
662 662
663 sys.stdout.write(' first using the normal urllib handlers')
663 util.stdout.write(' first using the normal urllib handlers')
664 664 # first use normal opener
665 665 opener = urlreq.buildopener()
666 666 urlreq.installopener(opener)
667 667 t1 = fetch(N, url)
668 668 print(' TIME: %.3f s' % t1)
669 669
670 sys.stdout.write(' now using the keepalive handler ')
670 util.stdout.write(' now using the keepalive handler ')
671 671 # now install the keepalive handler and try again
672 672 opener = urlreq.buildopener(HTTPHandler())
673 673 urlreq.installopener(opener)
674 674 t2 = fetch(N, url)
675 675 print(' TIME: %.3f s' % t2)
676 676 print(' improvement factor: %.2f' % (t1 / t2))
677 677
678 678 def fetch(N, url, delay=0):
679 679 import time
680 680 lens = []
681 681 starttime = time.time()
682 682 for i in range(N):
683 683 if delay and i > 0:
684 684 time.sleep(delay)
685 685 fo = urlreq.urlopen(url)
686 686 foo = fo.read()
687 687 fo.close()
688 688 lens.append(len(foo))
689 689 diff = time.time() - starttime
690 690
691 691 j = 0
692 692 for i in lens[1:]:
693 693 j = j + 1
694 694 if not i == lens[0]:
695 695 print("WARNING: inconsistent length on read %i: %i" % (j, i))
696 696
697 697 return diff
698 698
699 699 def test_timeout(url):
700 700 global DEBUG
701 701 dbbackup = DEBUG
702 702 class FakeLogger(object):
703 703 def debug(self, msg, *args):
704 704 print(msg % args)
705 705 info = warning = error = debug
706 706 DEBUG = FakeLogger()
707 707 print(" fetching the file to establish a connection")
708 708 fo = urlreq.urlopen(url)
709 709 data1 = fo.read()
710 710 fo.close()
711 711
712 712 i = 20
713 713 print(" waiting %i seconds for the server to close the connection" % i)
714 714 while i > 0:
715 sys.stdout.write('\r %2i' % i)
716 sys.stdout.flush()
715 util.stdout.write('\r %2i' % i)
716 util.stdout.flush()
717 717 time.sleep(1)
718 718 i -= 1
719 sys.stderr.write('\r')
719 util.stderr.write('\r')
720 720
721 721 print(" fetching the file a second time")
722 722 fo = urlreq.urlopen(url)
723 723 data2 = fo.read()
724 724 fo.close()
725 725
726 726 if data1 == data2:
727 727 print(' data are identical')
728 728 else:
729 729 print(' ERROR: DATA DIFFER')
730 730
731 731 DEBUG = dbbackup
732 732
733 733
734 734 def test(url, N=10):
735 735 print("checking error handler (do this on a non-200)")
736 736 try: error_handler(url)
737 737 except IOError:
738 738 print("exiting - exception will prevent further tests")
739 739 sys.exit()
740 740 print('')
741 741 print("performing continuity test (making sure stuff isn't corrupted)")
742 742 continuity(url)
743 743 print('')
744 744 print("performing speed comparison")
745 745 comp(N, url)
746 746 print('')
747 747 print("performing dropped-connection check")
748 748 test_timeout(url)
749 749
750 750 if __name__ == '__main__':
751 751 import time
752 752 try:
753 753 N = int(sys.argv[1])
754 754 url = sys.argv[2]
755 755 except (IndexError, ValueError):
756 756 print("%s <integer> <url>" % sys.argv[0])
757 757 else:
758 758 test(url, N)
@@ -1,1373 +1,1373 b''
1 1 # ui.py - user interface bits for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import getpass
12 12 import inspect
13 13 import os
14 14 import re
15 15 import socket
16 16 import sys
17 17 import tempfile
18 18 import traceback
19 19
20 20 from .i18n import _
21 21 from .node import hex
22 22
23 23 from . import (
24 24 config,
25 25 encoding,
26 26 error,
27 27 formatter,
28 28 progress,
29 29 scmutil,
30 30 util,
31 31 )
32 32
33 33 urlreq = util.urlreq
34 34
35 35 samplehgrcs = {
36 36 'user':
37 37 """# example user config (see 'hg help config' for more info)
38 38 [ui]
39 39 # name and email, e.g.
40 40 # username = Jane Doe <jdoe@example.com>
41 41 username =
42 42
43 43 [extensions]
44 44 # uncomment these lines to enable some popular extensions
45 45 # (see 'hg help extensions' for more info)
46 46 #
47 47 # pager =
48 48 # color =""",
49 49
50 50 'cloned':
51 51 """# example repository config (see 'hg help config' for more info)
52 52 [paths]
53 53 default = %s
54 54
55 55 # path aliases to other clones of this repo in URLs or filesystem paths
56 56 # (see 'hg help config.paths' for more info)
57 57 #
58 58 # default-push = ssh://jdoe@example.net/hg/jdoes-fork
59 59 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
60 60 # my-clone = /home/jdoe/jdoes-clone
61 61
62 62 [ui]
63 63 # name and email (local to this repository, optional), e.g.
64 64 # username = Jane Doe <jdoe@example.com>
65 65 """,
66 66
67 67 'local':
68 68 """# example repository config (see 'hg help config' for more info)
69 69 [paths]
70 70 # path aliases to other clones of this repo in URLs or filesystem paths
71 71 # (see 'hg help config.paths' for more info)
72 72 #
73 73 # default = http://example.com/hg/example-repo
74 74 # default-push = ssh://jdoe@example.net/hg/jdoes-fork
75 75 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
76 76 # my-clone = /home/jdoe/jdoes-clone
77 77
78 78 [ui]
79 79 # name and email (local to this repository, optional), e.g.
80 80 # username = Jane Doe <jdoe@example.com>
81 81 """,
82 82
83 83 'global':
84 84 """# example system-wide hg config (see 'hg help config' for more info)
85 85
86 86 [extensions]
87 87 # uncomment these lines to enable some popular extensions
88 88 # (see 'hg help extensions' for more info)
89 89 #
90 90 # blackbox =
91 91 # color =
92 92 # pager =""",
93 93 }
94 94
95 95 class ui(object):
96 96 def __init__(self, src=None):
97 97 # _buffers: used for temporary capture of output
98 98 self._buffers = []
99 99 # 3-tuple describing how each buffer in the stack behaves.
100 100 # Values are (capture stderr, capture subprocesses, apply labels).
101 101 self._bufferstates = []
102 102 # When a buffer is active, defines whether we are expanding labels.
103 103 # This exists to prevent an extra list lookup.
104 104 self._bufferapplylabels = None
105 105 self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
106 106 self._reportuntrusted = True
107 107 self._ocfg = config.config() # overlay
108 108 self._tcfg = config.config() # trusted
109 109 self._ucfg = config.config() # untrusted
110 110 self._trustusers = set()
111 111 self._trustgroups = set()
112 112 self.callhooks = True
113 113 # Insecure server connections requested.
114 114 self.insecureconnections = False
115 115
116 116 if src:
117 117 self.fout = src.fout
118 118 self.ferr = src.ferr
119 119 self.fin = src.fin
120 120
121 121 self._tcfg = src._tcfg.copy()
122 122 self._ucfg = src._ucfg.copy()
123 123 self._ocfg = src._ocfg.copy()
124 124 self._trustusers = src._trustusers.copy()
125 125 self._trustgroups = src._trustgroups.copy()
126 126 self.environ = src.environ
127 127 self.callhooks = src.callhooks
128 128 self.insecureconnections = src.insecureconnections
129 129 self.fixconfig()
130 130
131 131 self.httppasswordmgrdb = src.httppasswordmgrdb
132 132 else:
133 self.fout = sys.stdout
134 self.ferr = sys.stderr
135 self.fin = sys.stdin
133 self.fout = util.stdout
134 self.ferr = util.stderr
135 self.fin = util.stdin
136 136
137 137 # shared read-only environment
138 138 self.environ = os.environ
139 139 # we always trust global config files
140 140 for f in scmutil.rcpath():
141 141 self.readconfig(f, trust=True)
142 142
143 143 self.httppasswordmgrdb = urlreq.httppasswordmgrwithdefaultrealm()
144 144
145 145 def copy(self):
146 146 return self.__class__(self)
147 147
148 148 def resetstate(self):
149 149 """Clear internal state that shouldn't persist across commands"""
150 150 if self._progbar:
151 151 self._progbar.resetstate() # reset last-print time of progress bar
152 152 self.httppasswordmgrdb = urlreq.httppasswordmgrwithdefaultrealm()
153 153
154 154 def formatter(self, topic, opts):
155 155 return formatter.formatter(self, topic, opts)
156 156
157 157 def _trusted(self, fp, f):
158 158 st = util.fstat(fp)
159 159 if util.isowner(st):
160 160 return True
161 161
162 162 tusers, tgroups = self._trustusers, self._trustgroups
163 163 if '*' in tusers or '*' in tgroups:
164 164 return True
165 165
166 166 user = util.username(st.st_uid)
167 167 group = util.groupname(st.st_gid)
168 168 if user in tusers or group in tgroups or user == util.username():
169 169 return True
170 170
171 171 if self._reportuntrusted:
172 172 self.warn(_('not trusting file %s from untrusted '
173 173 'user %s, group %s\n') % (f, user, group))
174 174 return False
175 175
176 176 def readconfig(self, filename, root=None, trust=False,
177 177 sections=None, remap=None):
178 178 try:
179 179 fp = open(filename, u'rb')
180 180 except IOError:
181 181 if not sections: # ignore unless we were looking for something
182 182 return
183 183 raise
184 184
185 185 cfg = config.config()
186 186 trusted = sections or trust or self._trusted(fp, filename)
187 187
188 188 try:
189 189 cfg.read(filename, fp, sections=sections, remap=remap)
190 190 fp.close()
191 191 except error.ConfigError as inst:
192 192 if trusted:
193 193 raise
194 194 self.warn(_("ignored: %s\n") % str(inst))
195 195
196 196 if self.plain():
197 197 for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
198 198 'logtemplate', 'statuscopies', 'style',
199 199 'traceback', 'verbose'):
200 200 if k in cfg['ui']:
201 201 del cfg['ui'][k]
202 202 for k, v in cfg.items('defaults'):
203 203 del cfg['defaults'][k]
204 204 # Don't remove aliases from the configuration if in the exceptionlist
205 205 if self.plain('alias'):
206 206 for k, v in cfg.items('alias'):
207 207 del cfg['alias'][k]
208 208 if self.plain('revsetalias'):
209 209 for k, v in cfg.items('revsetalias'):
210 210 del cfg['revsetalias'][k]
211 211 if self.plain('templatealias'):
212 212 for k, v in cfg.items('templatealias'):
213 213 del cfg['templatealias'][k]
214 214
215 215 if trusted:
216 216 self._tcfg.update(cfg)
217 217 self._tcfg.update(self._ocfg)
218 218 self._ucfg.update(cfg)
219 219 self._ucfg.update(self._ocfg)
220 220
221 221 if root is None:
222 222 root = os.path.expanduser('~')
223 223 self.fixconfig(root=root)
224 224
225 225 def fixconfig(self, root=None, section=None):
226 226 if section in (None, 'paths'):
227 227 # expand vars and ~
228 228 # translate paths relative to root (or home) into absolute paths
229 229 root = root or os.getcwd()
230 230 for c in self._tcfg, self._ucfg, self._ocfg:
231 231 for n, p in c.items('paths'):
232 232 # Ignore sub-options.
233 233 if ':' in n:
234 234 continue
235 235 if not p:
236 236 continue
237 237 if '%%' in p:
238 238 self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
239 239 % (n, p, self.configsource('paths', n)))
240 240 p = p.replace('%%', '%')
241 241 p = util.expandpath(p)
242 242 if not util.hasscheme(p) and not os.path.isabs(p):
243 243 p = os.path.normpath(os.path.join(root, p))
244 244 c.set("paths", n, p)
245 245
246 246 if section in (None, 'ui'):
247 247 # update ui options
248 248 self.debugflag = self.configbool('ui', 'debug')
249 249 self.verbose = self.debugflag or self.configbool('ui', 'verbose')
250 250 self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
251 251 if self.verbose and self.quiet:
252 252 self.quiet = self.verbose = False
253 253 self._reportuntrusted = self.debugflag or self.configbool("ui",
254 254 "report_untrusted", True)
255 255 self.tracebackflag = self.configbool('ui', 'traceback', False)
256 256
257 257 if section in (None, 'trusted'):
258 258 # update trust information
259 259 self._trustusers.update(self.configlist('trusted', 'users'))
260 260 self._trustgroups.update(self.configlist('trusted', 'groups'))
261 261
262 262 def backupconfig(self, section, item):
263 263 return (self._ocfg.backup(section, item),
264 264 self._tcfg.backup(section, item),
265 265 self._ucfg.backup(section, item),)
266 266 def restoreconfig(self, data):
267 267 self._ocfg.restore(data[0])
268 268 self._tcfg.restore(data[1])
269 269 self._ucfg.restore(data[2])
270 270
271 271 def setconfig(self, section, name, value, source=''):
272 272 for cfg in (self._ocfg, self._tcfg, self._ucfg):
273 273 cfg.set(section, name, value, source)
274 274 self.fixconfig(section=section)
275 275
276 276 def _data(self, untrusted):
277 277 return untrusted and self._ucfg or self._tcfg
278 278
279 279 def configsource(self, section, name, untrusted=False):
280 280 return self._data(untrusted).source(section, name) or 'none'
281 281
282 282 def config(self, section, name, default=None, untrusted=False):
283 283 if isinstance(name, list):
284 284 alternates = name
285 285 else:
286 286 alternates = [name]
287 287
288 288 for n in alternates:
289 289 value = self._data(untrusted).get(section, n, None)
290 290 if value is not None:
291 291 name = n
292 292 break
293 293 else:
294 294 value = default
295 295
296 296 if self.debugflag and not untrusted and self._reportuntrusted:
297 297 for n in alternates:
298 298 uvalue = self._ucfg.get(section, n)
299 299 if uvalue is not None and uvalue != value:
300 300 self.debug("ignoring untrusted configuration option "
301 301 "%s.%s = %s\n" % (section, n, uvalue))
302 302 return value
303 303
304 304 def configsuboptions(self, section, name, default=None, untrusted=False):
305 305 """Get a config option and all sub-options.
306 306
307 307 Some config options have sub-options that are declared with the
308 308 format "key:opt = value". This method is used to return the main
309 309 option and all its declared sub-options.
310 310
311 311 Returns a 2-tuple of ``(option, sub-options)``, where `sub-options``
312 312 is a dict of defined sub-options where keys and values are strings.
313 313 """
314 314 data = self._data(untrusted)
315 315 main = data.get(section, name, default)
316 316 if self.debugflag and not untrusted and self._reportuntrusted:
317 317 uvalue = self._ucfg.get(section, name)
318 318 if uvalue is not None and uvalue != main:
319 319 self.debug('ignoring untrusted configuration option '
320 320 '%s.%s = %s\n' % (section, name, uvalue))
321 321
322 322 sub = {}
323 323 prefix = '%s:' % name
324 324 for k, v in data.items(section):
325 325 if k.startswith(prefix):
326 326 sub[k[len(prefix):]] = v
327 327
328 328 if self.debugflag and not untrusted and self._reportuntrusted:
329 329 for k, v in sub.items():
330 330 uvalue = self._ucfg.get(section, '%s:%s' % (name, k))
331 331 if uvalue is not None and uvalue != v:
332 332 self.debug('ignoring untrusted configuration option '
333 333 '%s:%s.%s = %s\n' % (section, name, k, uvalue))
334 334
335 335 return main, sub
336 336
337 337 def configpath(self, section, name, default=None, untrusted=False):
338 338 'get a path config item, expanded relative to repo root or config file'
339 339 v = self.config(section, name, default, untrusted)
340 340 if v is None:
341 341 return None
342 342 if not os.path.isabs(v) or "://" not in v:
343 343 src = self.configsource(section, name, untrusted)
344 344 if ':' in src:
345 345 base = os.path.dirname(src.rsplit(':')[0])
346 346 v = os.path.join(base, os.path.expanduser(v))
347 347 return v
348 348
349 349 def configbool(self, section, name, default=False, untrusted=False):
350 350 """parse a configuration element as a boolean
351 351
352 352 >>> u = ui(); s = 'foo'
353 353 >>> u.setconfig(s, 'true', 'yes')
354 354 >>> u.configbool(s, 'true')
355 355 True
356 356 >>> u.setconfig(s, 'false', 'no')
357 357 >>> u.configbool(s, 'false')
358 358 False
359 359 >>> u.configbool(s, 'unknown')
360 360 False
361 361 >>> u.configbool(s, 'unknown', True)
362 362 True
363 363 >>> u.setconfig(s, 'invalid', 'somevalue')
364 364 >>> u.configbool(s, 'invalid')
365 365 Traceback (most recent call last):
366 366 ...
367 367 ConfigError: foo.invalid is not a boolean ('somevalue')
368 368 """
369 369
370 370 v = self.config(section, name, None, untrusted)
371 371 if v is None:
372 372 return default
373 373 if isinstance(v, bool):
374 374 return v
375 375 b = util.parsebool(v)
376 376 if b is None:
377 377 raise error.ConfigError(_("%s.%s is not a boolean ('%s')")
378 378 % (section, name, v))
379 379 return b
380 380
381 381 def configint(self, section, name, default=None, untrusted=False):
382 382 """parse a configuration element as an integer
383 383
384 384 >>> u = ui(); s = 'foo'
385 385 >>> u.setconfig(s, 'int1', '42')
386 386 >>> u.configint(s, 'int1')
387 387 42
388 388 >>> u.setconfig(s, 'int2', '-42')
389 389 >>> u.configint(s, 'int2')
390 390 -42
391 391 >>> u.configint(s, 'unknown', 7)
392 392 7
393 393 >>> u.setconfig(s, 'invalid', 'somevalue')
394 394 >>> u.configint(s, 'invalid')
395 395 Traceback (most recent call last):
396 396 ...
397 397 ConfigError: foo.invalid is not an integer ('somevalue')
398 398 """
399 399
400 400 v = self.config(section, name, None, untrusted)
401 401 if v is None:
402 402 return default
403 403 try:
404 404 return int(v)
405 405 except ValueError:
406 406 raise error.ConfigError(_("%s.%s is not an integer ('%s')")
407 407 % (section, name, v))
408 408
409 409 def configbytes(self, section, name, default=0, untrusted=False):
410 410 """parse a configuration element as a quantity in bytes
411 411
412 412 Units can be specified as b (bytes), k or kb (kilobytes), m or
413 413 mb (megabytes), g or gb (gigabytes).
414 414
415 415 >>> u = ui(); s = 'foo'
416 416 >>> u.setconfig(s, 'val1', '42')
417 417 >>> u.configbytes(s, 'val1')
418 418 42
419 419 >>> u.setconfig(s, 'val2', '42.5 kb')
420 420 >>> u.configbytes(s, 'val2')
421 421 43520
422 422 >>> u.configbytes(s, 'unknown', '7 MB')
423 423 7340032
424 424 >>> u.setconfig(s, 'invalid', 'somevalue')
425 425 >>> u.configbytes(s, 'invalid')
426 426 Traceback (most recent call last):
427 427 ...
428 428 ConfigError: foo.invalid is not a byte quantity ('somevalue')
429 429 """
430 430
431 431 value = self.config(section, name)
432 432 if value is None:
433 433 if not isinstance(default, str):
434 434 return default
435 435 value = default
436 436 try:
437 437 return util.sizetoint(value)
438 438 except error.ParseError:
439 439 raise error.ConfigError(_("%s.%s is not a byte quantity ('%s')")
440 440 % (section, name, value))
441 441
442 442 def configlist(self, section, name, default=None, untrusted=False):
443 443 """parse a configuration element as a list of comma/space separated
444 444 strings
445 445
446 446 >>> u = ui(); s = 'foo'
447 447 >>> u.setconfig(s, 'list1', 'this,is "a small" ,test')
448 448 >>> u.configlist(s, 'list1')
449 449 ['this', 'is', 'a small', 'test']
450 450 """
451 451
452 452 def _parse_plain(parts, s, offset):
453 453 whitespace = False
454 454 while offset < len(s) and (s[offset].isspace() or s[offset] == ','):
455 455 whitespace = True
456 456 offset += 1
457 457 if offset >= len(s):
458 458 return None, parts, offset
459 459 if whitespace:
460 460 parts.append('')
461 461 if s[offset] == '"' and not parts[-1]:
462 462 return _parse_quote, parts, offset + 1
463 463 elif s[offset] == '"' and parts[-1][-1] == '\\':
464 464 parts[-1] = parts[-1][:-1] + s[offset]
465 465 return _parse_plain, parts, offset + 1
466 466 parts[-1] += s[offset]
467 467 return _parse_plain, parts, offset + 1
468 468
469 469 def _parse_quote(parts, s, offset):
470 470 if offset < len(s) and s[offset] == '"': # ""
471 471 parts.append('')
472 472 offset += 1
473 473 while offset < len(s) and (s[offset].isspace() or
474 474 s[offset] == ','):
475 475 offset += 1
476 476 return _parse_plain, parts, offset
477 477
478 478 while offset < len(s) and s[offset] != '"':
479 479 if (s[offset] == '\\' and offset + 1 < len(s)
480 480 and s[offset + 1] == '"'):
481 481 offset += 1
482 482 parts[-1] += '"'
483 483 else:
484 484 parts[-1] += s[offset]
485 485 offset += 1
486 486
487 487 if offset >= len(s):
488 488 real_parts = _configlist(parts[-1])
489 489 if not real_parts:
490 490 parts[-1] = '"'
491 491 else:
492 492 real_parts[0] = '"' + real_parts[0]
493 493 parts = parts[:-1]
494 494 parts.extend(real_parts)
495 495 return None, parts, offset
496 496
497 497 offset += 1
498 498 while offset < len(s) and s[offset] in [' ', ',']:
499 499 offset += 1
500 500
501 501 if offset < len(s):
502 502 if offset + 1 == len(s) and s[offset] == '"':
503 503 parts[-1] += '"'
504 504 offset += 1
505 505 else:
506 506 parts.append('')
507 507 else:
508 508 return None, parts, offset
509 509
510 510 return _parse_plain, parts, offset
511 511
512 512 def _configlist(s):
513 513 s = s.rstrip(' ,')
514 514 if not s:
515 515 return []
516 516 parser, parts, offset = _parse_plain, [''], 0
517 517 while parser:
518 518 parser, parts, offset = parser(parts, s, offset)
519 519 return parts
520 520
521 521 result = self.config(section, name, untrusted=untrusted)
522 522 if result is None:
523 523 result = default or []
524 524 if isinstance(result, basestring):
525 525 result = _configlist(result.lstrip(' ,\n'))
526 526 if result is None:
527 527 result = default or []
528 528 return result
529 529
530 530 def hasconfig(self, section, name, untrusted=False):
531 531 return self._data(untrusted).hasitem(section, name)
532 532
533 533 def has_section(self, section, untrusted=False):
534 534 '''tell whether section exists in config.'''
535 535 return section in self._data(untrusted)
536 536
537 537 def configitems(self, section, untrusted=False, ignoresub=False):
538 538 items = self._data(untrusted).items(section)
539 539 if ignoresub:
540 540 newitems = {}
541 541 for k, v in items:
542 542 if ':' not in k:
543 543 newitems[k] = v
544 544 items = newitems.items()
545 545 if self.debugflag and not untrusted and self._reportuntrusted:
546 546 for k, v in self._ucfg.items(section):
547 547 if self._tcfg.get(section, k) != v:
548 548 self.debug("ignoring untrusted configuration option "
549 549 "%s.%s = %s\n" % (section, k, v))
550 550 return items
551 551
552 552 def walkconfig(self, untrusted=False):
553 553 cfg = self._data(untrusted)
554 554 for section in cfg.sections():
555 555 for name, value in self.configitems(section, untrusted):
556 556 yield section, name, value
557 557
558 558 def plain(self, feature=None):
559 559 '''is plain mode active?
560 560
561 561 Plain mode means that all configuration variables which affect
562 562 the behavior and output of Mercurial should be
563 563 ignored. Additionally, the output should be stable,
564 564 reproducible and suitable for use in scripts or applications.
565 565
566 566 The only way to trigger plain mode is by setting either the
567 567 `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
568 568
569 569 The return value can either be
570 570 - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
571 571 - True otherwise
572 572 '''
573 573 if ('HGPLAIN' not in encoding.environ and
574 574 'HGPLAINEXCEPT' not in encoding.environ):
575 575 return False
576 576 exceptions = encoding.environ.get('HGPLAINEXCEPT',
577 577 '').strip().split(',')
578 578 if feature and exceptions:
579 579 return feature not in exceptions
580 580 return True
581 581
582 582 def username(self):
583 583 """Return default username to be used in commits.
584 584
585 585 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
586 586 and stop searching if one of these is set.
587 587 If not found and ui.askusername is True, ask the user, else use
588 588 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
589 589 """
590 590 user = encoding.environ.get("HGUSER")
591 591 if user is None:
592 592 user = self.config("ui", ["username", "user"])
593 593 if user is not None:
594 594 user = os.path.expandvars(user)
595 595 if user is None:
596 596 user = encoding.environ.get("EMAIL")
597 597 if user is None and self.configbool("ui", "askusername"):
598 598 user = self.prompt(_("enter a commit username:"), default=None)
599 599 if user is None and not self.interactive():
600 600 try:
601 601 user = '%s@%s' % (util.getuser(), socket.getfqdn())
602 602 self.warn(_("no username found, using '%s' instead\n") % user)
603 603 except KeyError:
604 604 pass
605 605 if not user:
606 606 raise error.Abort(_('no username supplied'),
607 607 hint=_("use 'hg config --edit' "
608 608 'to set your username'))
609 609 if "\n" in user:
610 610 raise error.Abort(_("username %s contains a newline\n")
611 611 % repr(user))
612 612 return user
613 613
614 614 def shortuser(self, user):
615 615 """Return a short representation of a user name or email address."""
616 616 if not self.verbose:
617 617 user = util.shortuser(user)
618 618 return user
619 619
620 620 def expandpath(self, loc, default=None):
621 621 """Return repository location relative to cwd or from [paths]"""
622 622 try:
623 623 p = self.paths.getpath(loc)
624 624 if p:
625 625 return p.rawloc
626 626 except error.RepoError:
627 627 pass
628 628
629 629 if default:
630 630 try:
631 631 p = self.paths.getpath(default)
632 632 if p:
633 633 return p.rawloc
634 634 except error.RepoError:
635 635 pass
636 636
637 637 return loc
638 638
639 639 @util.propertycache
640 640 def paths(self):
641 641 return paths(self)
642 642
643 643 def pushbuffer(self, error=False, subproc=False, labeled=False):
644 644 """install a buffer to capture standard output of the ui object
645 645
646 646 If error is True, the error output will be captured too.
647 647
648 648 If subproc is True, output from subprocesses (typically hooks) will be
649 649 captured too.
650 650
651 651 If labeled is True, any labels associated with buffered
652 652 output will be handled. By default, this has no effect
653 653 on the output returned, but extensions and GUI tools may
654 654 handle this argument and returned styled output. If output
655 655 is being buffered so it can be captured and parsed or
656 656 processed, labeled should not be set to True.
657 657 """
658 658 self._buffers.append([])
659 659 self._bufferstates.append((error, subproc, labeled))
660 660 self._bufferapplylabels = labeled
661 661
662 662 def popbuffer(self):
663 663 '''pop the last buffer and return the buffered output'''
664 664 self._bufferstates.pop()
665 665 if self._bufferstates:
666 666 self._bufferapplylabels = self._bufferstates[-1][2]
667 667 else:
668 668 self._bufferapplylabels = None
669 669
670 670 return "".join(self._buffers.pop())
671 671
672 672 def write(self, *args, **opts):
673 673 '''write args to output
674 674
675 675 By default, this method simply writes to the buffer or stdout,
676 676 but extensions or GUI tools may override this method,
677 677 write_err(), popbuffer(), and label() to style output from
678 678 various parts of hg.
679 679
680 680 An optional keyword argument, "label", can be passed in.
681 681 This should be a string containing label names separated by
682 682 space. Label names take the form of "topic.type". For example,
683 683 ui.debug() issues a label of "ui.debug".
684 684
685 685 When labeling output for a specific command, a label of
686 686 "cmdname.type" is recommended. For example, status issues
687 687 a label of "status.modified" for modified files.
688 688 '''
689 689 if self._buffers and not opts.get('prompt', False):
690 690 self._buffers[-1].extend(a for a in args)
691 691 else:
692 692 self._progclear()
693 693 for a in args:
694 694 self.fout.write(a)
695 695
696 696 def write_err(self, *args, **opts):
697 697 self._progclear()
698 698 try:
699 699 if self._bufferstates and self._bufferstates[-1][0]:
700 700 return self.write(*args, **opts)
701 701 if not getattr(self.fout, 'closed', False):
702 702 self.fout.flush()
703 703 for a in args:
704 704 self.ferr.write(a)
705 705 # stderr may be buffered under win32 when redirected to files,
706 706 # including stdout.
707 707 if not getattr(self.ferr, 'closed', False):
708 708 self.ferr.flush()
709 709 except IOError as inst:
710 710 if inst.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
711 711 raise
712 712
713 713 def flush(self):
714 714 try: self.fout.flush()
715 715 except (IOError, ValueError): pass
716 716 try: self.ferr.flush()
717 717 except (IOError, ValueError): pass
718 718
719 719 def _isatty(self, fh):
720 720 if self.configbool('ui', 'nontty', False):
721 721 return False
722 722 return util.isatty(fh)
723 723
724 724 def interface(self, feature):
725 725 """what interface to use for interactive console features?
726 726
727 727 The interface is controlled by the value of `ui.interface` but also by
728 728 the value of feature-specific configuration. For example:
729 729
730 730 ui.interface.histedit = text
731 731 ui.interface.chunkselector = curses
732 732
733 733 Here the features are "histedit" and "chunkselector".
734 734
735 735 The configuration above means that the default interfaces for commands
736 736 is curses, the interface for histedit is text and the interface for
737 737 selecting chunk is crecord (the best curses interface available).
738 738
739 739 Consider the following example:
740 740 ui.interface = curses
741 741 ui.interface.histedit = text
742 742
743 743 Then histedit will use the text interface and chunkselector will use
744 744 the default curses interface (crecord at the moment).
745 745 """
746 746 alldefaults = frozenset(["text", "curses"])
747 747
748 748 featureinterfaces = {
749 749 "chunkselector": [
750 750 "text",
751 751 "curses",
752 752 ]
753 753 }
754 754
755 755 # Feature-specific interface
756 756 if feature not in featureinterfaces.keys():
757 757 # Programming error, not user error
758 758 raise ValueError("Unknown feature requested %s" % feature)
759 759
760 760 availableinterfaces = frozenset(featureinterfaces[feature])
761 761 if alldefaults > availableinterfaces:
762 762 # Programming error, not user error. We need a use case to
763 763 # define the right thing to do here.
764 764 raise ValueError(
765 765 "Feature %s does not handle all default interfaces" %
766 766 feature)
767 767
768 768 if self.plain():
769 769 return "text"
770 770
771 771 # Default interface for all the features
772 772 defaultinterface = "text"
773 773 i = self.config("ui", "interface", None)
774 774 if i in alldefaults:
775 775 defaultinterface = i
776 776
777 777 choseninterface = defaultinterface
778 778 f = self.config("ui", "interface.%s" % feature, None)
779 779 if f in availableinterfaces:
780 780 choseninterface = f
781 781
782 782 if i is not None and defaultinterface != i:
783 783 if f is not None:
784 784 self.warn(_("invalid value for ui.interface: %s\n") %
785 785 (i,))
786 786 else:
787 787 self.warn(_("invalid value for ui.interface: %s (using %s)\n") %
788 788 (i, choseninterface))
789 789 if f is not None and choseninterface != f:
790 790 self.warn(_("invalid value for ui.interface.%s: %s (using %s)\n") %
791 791 (feature, f, choseninterface))
792 792
793 793 return choseninterface
794 794
795 795 def interactive(self):
796 796 '''is interactive input allowed?
797 797
798 798 An interactive session is a session where input can be reasonably read
799 799 from `sys.stdin'. If this function returns false, any attempt to read
800 800 from stdin should fail with an error, unless a sensible default has been
801 801 specified.
802 802
803 803 Interactiveness is triggered by the value of the `ui.interactive'
804 804 configuration variable or - if it is unset - when `sys.stdin' points
805 805 to a terminal device.
806 806
807 807 This function refers to input only; for output, see `ui.formatted()'.
808 808 '''
809 809 i = self.configbool("ui", "interactive", None)
810 810 if i is None:
811 811 # some environments replace stdin without implementing isatty
812 812 # usually those are non-interactive
813 813 return self._isatty(self.fin)
814 814
815 815 return i
816 816
817 817 def termwidth(self):
818 818 '''how wide is the terminal in columns?
819 819 '''
820 820 if 'COLUMNS' in encoding.environ:
821 821 try:
822 822 return int(encoding.environ['COLUMNS'])
823 823 except ValueError:
824 824 pass
825 825 return scmutil.termsize(self)[0]
826 826
827 827 def formatted(self):
828 828 '''should formatted output be used?
829 829
830 830 It is often desirable to format the output to suite the output medium.
831 831 Examples of this are truncating long lines or colorizing messages.
832 832 However, this is not often not desirable when piping output into other
833 833 utilities, e.g. `grep'.
834 834
835 835 Formatted output is triggered by the value of the `ui.formatted'
836 836 configuration variable or - if it is unset - when `sys.stdout' points
837 837 to a terminal device. Please note that `ui.formatted' should be
838 838 considered an implementation detail; it is not intended for use outside
839 839 Mercurial or its extensions.
840 840
841 841 This function refers to output only; for input, see `ui.interactive()'.
842 842 This function always returns false when in plain mode, see `ui.plain()'.
843 843 '''
844 844 if self.plain():
845 845 return False
846 846
847 847 i = self.configbool("ui", "formatted", None)
848 848 if i is None:
849 849 # some environments replace stdout without implementing isatty
850 850 # usually those are non-interactive
851 851 return self._isatty(self.fout)
852 852
853 853 return i
854 854
855 855 def _readline(self, prompt=''):
856 856 if self._isatty(self.fin):
857 857 try:
858 858 # magically add command line editing support, where
859 859 # available
860 860 import readline
861 861 # force demandimport to really load the module
862 862 readline.read_history_file
863 863 # windows sometimes raises something other than ImportError
864 864 except Exception:
865 865 pass
866 866
867 867 # call write() so output goes through subclassed implementation
868 868 # e.g. color extension on Windows
869 869 self.write(prompt, prompt=True)
870 870
871 871 # instead of trying to emulate raw_input, swap (self.fin,
872 872 # self.fout) with (sys.stdin, sys.stdout)
873 873 oldin = sys.stdin
874 874 oldout = sys.stdout
875 875 sys.stdin = self.fin
876 876 sys.stdout = self.fout
877 877 # prompt ' ' must exist; otherwise readline may delete entire line
878 878 # - http://bugs.python.org/issue12833
879 879 line = raw_input(' ')
880 880 sys.stdin = oldin
881 881 sys.stdout = oldout
882 882
883 883 # When stdin is in binary mode on Windows, it can cause
884 884 # raw_input() to emit an extra trailing carriage return
885 885 if os.linesep == '\r\n' and line and line[-1] == '\r':
886 886 line = line[:-1]
887 887 return line
888 888
889 889 def prompt(self, msg, default="y"):
890 890 """Prompt user with msg, read response.
891 891 If ui is not interactive, the default is returned.
892 892 """
893 893 if not self.interactive():
894 894 self.write(msg, ' ', default or '', "\n")
895 895 return default
896 896 try:
897 897 r = self._readline(self.label(msg, 'ui.prompt'))
898 898 if not r:
899 899 r = default
900 900 if self.configbool('ui', 'promptecho'):
901 901 self.write(r, "\n")
902 902 return r
903 903 except EOFError:
904 904 raise error.ResponseExpected()
905 905
906 906 @staticmethod
907 907 def extractchoices(prompt):
908 908 """Extract prompt message and list of choices from specified prompt.
909 909
910 910 This returns tuple "(message, choices)", and "choices" is the
911 911 list of tuple "(response character, text without &)".
912 912
913 913 >>> ui.extractchoices("awake? $$ &Yes $$ &No")
914 914 ('awake? ', [('y', 'Yes'), ('n', 'No')])
915 915 >>> ui.extractchoices("line\\nbreak? $$ &Yes $$ &No")
916 916 ('line\\nbreak? ', [('y', 'Yes'), ('n', 'No')])
917 917 >>> ui.extractchoices("want lots of $$money$$?$$Ye&s$$N&o")
918 918 ('want lots of $$money$$?', [('s', 'Yes'), ('o', 'No')])
919 919 """
920 920
921 921 # Sadly, the prompt string may have been built with a filename
922 922 # containing "$$" so let's try to find the first valid-looking
923 923 # prompt to start parsing. Sadly, we also can't rely on
924 924 # choices containing spaces, ASCII, or basically anything
925 925 # except an ampersand followed by a character.
926 926 m = re.match(r'(?s)(.+?)\$\$([^\$]*&[^ \$].*)', prompt)
927 927 msg = m.group(1)
928 928 choices = [p.strip(' ') for p in m.group(2).split('$$')]
929 929 return (msg,
930 930 [(s[s.index('&') + 1].lower(), s.replace('&', '', 1))
931 931 for s in choices])
932 932
933 933 def promptchoice(self, prompt, default=0):
934 934 """Prompt user with a message, read response, and ensure it matches
935 935 one of the provided choices. The prompt is formatted as follows:
936 936
937 937 "would you like fries with that (Yn)? $$ &Yes $$ &No"
938 938
939 939 The index of the choice is returned. Responses are case
940 940 insensitive. If ui is not interactive, the default is
941 941 returned.
942 942 """
943 943
944 944 msg, choices = self.extractchoices(prompt)
945 945 resps = [r for r, t in choices]
946 946 while True:
947 947 r = self.prompt(msg, resps[default])
948 948 if r.lower() in resps:
949 949 return resps.index(r.lower())
950 950 self.write(_("unrecognized response\n"))
951 951
952 952 def getpass(self, prompt=None, default=None):
953 953 if not self.interactive():
954 954 return default
955 955 try:
956 956 self.write_err(self.label(prompt or _('password: '), 'ui.prompt'))
957 957 # disable getpass() only if explicitly specified. it's still valid
958 958 # to interact with tty even if fin is not a tty.
959 959 if self.configbool('ui', 'nontty'):
960 960 return self.fin.readline().rstrip('\n')
961 961 else:
962 962 return getpass.getpass('')
963 963 except EOFError:
964 964 raise error.ResponseExpected()
965 965 def status(self, *msg, **opts):
966 966 '''write status message to output (if ui.quiet is False)
967 967
968 968 This adds an output label of "ui.status".
969 969 '''
970 970 if not self.quiet:
971 971 opts['label'] = opts.get('label', '') + ' ui.status'
972 972 self.write(*msg, **opts)
973 973 def warn(self, *msg, **opts):
974 974 '''write warning message to output (stderr)
975 975
976 976 This adds an output label of "ui.warning".
977 977 '''
978 978 opts['label'] = opts.get('label', '') + ' ui.warning'
979 979 self.write_err(*msg, **opts)
980 980 def note(self, *msg, **opts):
981 981 '''write note to output (if ui.verbose is True)
982 982
983 983 This adds an output label of "ui.note".
984 984 '''
985 985 if self.verbose:
986 986 opts['label'] = opts.get('label', '') + ' ui.note'
987 987 self.write(*msg, **opts)
988 988 def debug(self, *msg, **opts):
989 989 '''write debug message to output (if ui.debugflag is True)
990 990
991 991 This adds an output label of "ui.debug".
992 992 '''
993 993 if self.debugflag:
994 994 opts['label'] = opts.get('label', '') + ' ui.debug'
995 995 self.write(*msg, **opts)
996 996
997 997 def edit(self, text, user, extra=None, editform=None, pending=None):
998 998 extra_defaults = {
999 999 'prefix': 'editor',
1000 1000 'suffix': '.txt',
1001 1001 }
1002 1002 if extra is not None:
1003 1003 extra_defaults.update(extra)
1004 1004 extra = extra_defaults
1005 1005 (fd, name) = tempfile.mkstemp(prefix='hg-' + extra['prefix'] + '-',
1006 1006 suffix=extra['suffix'], text=True)
1007 1007 try:
1008 1008 f = os.fdopen(fd, "w")
1009 1009 f.write(text)
1010 1010 f.close()
1011 1011
1012 1012 environ = {'HGUSER': user}
1013 1013 if 'transplant_source' in extra:
1014 1014 environ.update({'HGREVISION': hex(extra['transplant_source'])})
1015 1015 for label in ('intermediate-source', 'source', 'rebase_source'):
1016 1016 if label in extra:
1017 1017 environ.update({'HGREVISION': extra[label]})
1018 1018 break
1019 1019 if editform:
1020 1020 environ.update({'HGEDITFORM': editform})
1021 1021 if pending:
1022 1022 environ.update({'HG_PENDING': pending})
1023 1023
1024 1024 editor = self.geteditor()
1025 1025
1026 1026 self.system("%s \"%s\"" % (editor, name),
1027 1027 environ=environ,
1028 1028 onerr=error.Abort, errprefix=_("edit failed"))
1029 1029
1030 1030 f = open(name)
1031 1031 t = f.read()
1032 1032 f.close()
1033 1033 finally:
1034 1034 os.unlink(name)
1035 1035
1036 1036 return t
1037 1037
1038 1038 def system(self, cmd, environ=None, cwd=None, onerr=None, errprefix=None):
1039 1039 '''execute shell command with appropriate output stream. command
1040 1040 output will be redirected if fout is not stdout.
1041 1041 '''
1042 1042 out = self.fout
1043 1043 if any(s[1] for s in self._bufferstates):
1044 1044 out = self
1045 1045 return util.system(cmd, environ=environ, cwd=cwd, onerr=onerr,
1046 1046 errprefix=errprefix, out=out)
1047 1047
1048 1048 def traceback(self, exc=None, force=False):
1049 1049 '''print exception traceback if traceback printing enabled or forced.
1050 1050 only to call in exception handler. returns true if traceback
1051 1051 printed.'''
1052 1052 if self.tracebackflag or force:
1053 1053 if exc is None:
1054 1054 exc = sys.exc_info()
1055 1055 cause = getattr(exc[1], 'cause', None)
1056 1056
1057 1057 if cause is not None:
1058 1058 causetb = traceback.format_tb(cause[2])
1059 1059 exctb = traceback.format_tb(exc[2])
1060 1060 exconly = traceback.format_exception_only(cause[0], cause[1])
1061 1061
1062 1062 # exclude frame where 'exc' was chained and rethrown from exctb
1063 1063 self.write_err('Traceback (most recent call last):\n',
1064 1064 ''.join(exctb[:-1]),
1065 1065 ''.join(causetb),
1066 1066 ''.join(exconly))
1067 1067 else:
1068 1068 output = traceback.format_exception(exc[0], exc[1], exc[2])
1069 1069 self.write_err(''.join(output))
1070 1070 return self.tracebackflag or force
1071 1071
1072 1072 def geteditor(self):
1073 1073 '''return editor to use'''
1074 1074 if sys.platform == 'plan9':
1075 1075 # vi is the MIPS instruction simulator on Plan 9. We
1076 1076 # instead default to E to plumb commit messages to
1077 1077 # avoid confusion.
1078 1078 editor = 'E'
1079 1079 else:
1080 1080 editor = 'vi'
1081 1081 return (encoding.environ.get("HGEDITOR") or
1082 1082 self.config("ui", "editor") or
1083 1083 encoding.environ.get("VISUAL") or
1084 1084 encoding.environ.get("EDITOR", editor))
1085 1085
1086 1086 @util.propertycache
1087 1087 def _progbar(self):
1088 1088 """setup the progbar singleton to the ui object"""
1089 1089 if (self.quiet or self.debugflag
1090 1090 or self.configbool('progress', 'disable', False)
1091 1091 or not progress.shouldprint(self)):
1092 1092 return None
1093 1093 return getprogbar(self)
1094 1094
1095 1095 def _progclear(self):
1096 1096 """clear progress bar output if any. use it before any output"""
1097 1097 if '_progbar' not in vars(self): # nothing loaded yet
1098 1098 return
1099 1099 if self._progbar is not None and self._progbar.printed:
1100 1100 self._progbar.clear()
1101 1101
1102 1102 def progress(self, topic, pos, item="", unit="", total=None):
1103 1103 '''show a progress message
1104 1104
1105 1105 By default a textual progress bar will be displayed if an operation
1106 1106 takes too long. 'topic' is the current operation, 'item' is a
1107 1107 non-numeric marker of the current position (i.e. the currently
1108 1108 in-process file), 'pos' is the current numeric position (i.e.
1109 1109 revision, bytes, etc.), unit is a corresponding unit label,
1110 1110 and total is the highest expected pos.
1111 1111
1112 1112 Multiple nested topics may be active at a time.
1113 1113
1114 1114 All topics should be marked closed by setting pos to None at
1115 1115 termination.
1116 1116 '''
1117 1117 if self._progbar is not None:
1118 1118 self._progbar.progress(topic, pos, item=item, unit=unit,
1119 1119 total=total)
1120 1120 if pos is None or not self.configbool('progress', 'debug'):
1121 1121 return
1122 1122
1123 1123 if unit:
1124 1124 unit = ' ' + unit
1125 1125 if item:
1126 1126 item = ' ' + item
1127 1127
1128 1128 if total:
1129 1129 pct = 100.0 * pos / total
1130 1130 self.debug('%s:%s %s/%s%s (%4.2f%%)\n'
1131 1131 % (topic, item, pos, total, unit, pct))
1132 1132 else:
1133 1133 self.debug('%s:%s %s%s\n' % (topic, item, pos, unit))
1134 1134
1135 1135 def log(self, service, *msg, **opts):
1136 1136 '''hook for logging facility extensions
1137 1137
1138 1138 service should be a readily-identifiable subsystem, which will
1139 1139 allow filtering.
1140 1140
1141 1141 *msg should be a newline-terminated format string to log, and
1142 1142 then any values to %-format into that format string.
1143 1143
1144 1144 **opts currently has no defined meanings.
1145 1145 '''
1146 1146
1147 1147 def label(self, msg, label):
1148 1148 '''style msg based on supplied label
1149 1149
1150 1150 Like ui.write(), this just returns msg unchanged, but extensions
1151 1151 and GUI tools can override it to allow styling output without
1152 1152 writing it.
1153 1153
1154 1154 ui.write(s, 'label') is equivalent to
1155 1155 ui.write(ui.label(s, 'label')).
1156 1156 '''
1157 1157 return msg
1158 1158
1159 1159 def develwarn(self, msg, stacklevel=1, config=None):
1160 1160 """issue a developer warning message
1161 1161
1162 1162 Use 'stacklevel' to report the offender some layers further up in the
1163 1163 stack.
1164 1164 """
1165 1165 if not self.configbool('devel', 'all-warnings'):
1166 1166 if config is not None and not self.configbool('devel', config):
1167 1167 return
1168 1168 msg = 'devel-warn: ' + msg
1169 1169 stacklevel += 1 # get in develwarn
1170 1170 if self.tracebackflag:
1171 1171 util.debugstacktrace(msg, stacklevel, self.ferr, self.fout)
1172 1172 self.log('develwarn', '%s at:\n%s' %
1173 1173 (msg, ''.join(util.getstackframes(stacklevel))))
1174 1174 else:
1175 1175 curframe = inspect.currentframe()
1176 1176 calframe = inspect.getouterframes(curframe, 2)
1177 1177 self.write_err('%s at: %s:%s (%s)\n'
1178 1178 % ((msg,) + calframe[stacklevel][1:4]))
1179 1179 self.log('develwarn', '%s at: %s:%s (%s)\n',
1180 1180 msg, *calframe[stacklevel][1:4])
1181 1181 curframe = calframe = None # avoid cycles
1182 1182
1183 1183 def deprecwarn(self, msg, version):
1184 1184 """issue a deprecation warning
1185 1185
1186 1186 - msg: message explaining what is deprecated and how to upgrade,
1187 1187 - version: last version where the API will be supported,
1188 1188 """
1189 1189 if not (self.configbool('devel', 'all-warnings')
1190 1190 or self.configbool('devel', 'deprec-warn')):
1191 1191 return
1192 1192 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
1193 1193 " update your code.)") % version
1194 1194 self.develwarn(msg, stacklevel=2, config='deprec-warn')
1195 1195
1196 1196 class paths(dict):
1197 1197 """Represents a collection of paths and their configs.
1198 1198
1199 1199 Data is initially derived from ui instances and the config files they have
1200 1200 loaded.
1201 1201 """
1202 1202 def __init__(self, ui):
1203 1203 dict.__init__(self)
1204 1204
1205 1205 for name, loc in ui.configitems('paths', ignoresub=True):
1206 1206 # No location is the same as not existing.
1207 1207 if not loc:
1208 1208 continue
1209 1209 loc, sub = ui.configsuboptions('paths', name)
1210 1210 self[name] = path(ui, name, rawloc=loc, suboptions=sub)
1211 1211
1212 1212 def getpath(self, name, default=None):
1213 1213 """Return a ``path`` from a string, falling back to default.
1214 1214
1215 1215 ``name`` can be a named path or locations. Locations are filesystem
1216 1216 paths or URIs.
1217 1217
1218 1218 Returns None if ``name`` is not a registered path, a URI, or a local
1219 1219 path to a repo.
1220 1220 """
1221 1221 # Only fall back to default if no path was requested.
1222 1222 if name is None:
1223 1223 if not default:
1224 1224 default = ()
1225 1225 elif not isinstance(default, (tuple, list)):
1226 1226 default = (default,)
1227 1227 for k in default:
1228 1228 try:
1229 1229 return self[k]
1230 1230 except KeyError:
1231 1231 continue
1232 1232 return None
1233 1233
1234 1234 # Most likely empty string.
1235 1235 # This may need to raise in the future.
1236 1236 if not name:
1237 1237 return None
1238 1238
1239 1239 try:
1240 1240 return self[name]
1241 1241 except KeyError:
1242 1242 # Try to resolve as a local path or URI.
1243 1243 try:
1244 1244 # We don't pass sub-options in, so no need to pass ui instance.
1245 1245 return path(None, None, rawloc=name)
1246 1246 except ValueError:
1247 1247 raise error.RepoError(_('repository %s does not exist') %
1248 1248 name)
1249 1249
1250 1250 _pathsuboptions = {}
1251 1251
1252 1252 def pathsuboption(option, attr):
1253 1253 """Decorator used to declare a path sub-option.
1254 1254
1255 1255 Arguments are the sub-option name and the attribute it should set on
1256 1256 ``path`` instances.
1257 1257
1258 1258 The decorated function will receive as arguments a ``ui`` instance,
1259 1259 ``path`` instance, and the string value of this option from the config.
1260 1260 The function should return the value that will be set on the ``path``
1261 1261 instance.
1262 1262
1263 1263 This decorator can be used to perform additional verification of
1264 1264 sub-options and to change the type of sub-options.
1265 1265 """
1266 1266 def register(func):
1267 1267 _pathsuboptions[option] = (attr, func)
1268 1268 return func
1269 1269 return register
1270 1270
1271 1271 @pathsuboption('pushurl', 'pushloc')
1272 1272 def pushurlpathoption(ui, path, value):
1273 1273 u = util.url(value)
1274 1274 # Actually require a URL.
1275 1275 if not u.scheme:
1276 1276 ui.warn(_('(paths.%s:pushurl not a URL; ignoring)\n') % path.name)
1277 1277 return None
1278 1278
1279 1279 # Don't support the #foo syntax in the push URL to declare branch to
1280 1280 # push.
1281 1281 if u.fragment:
1282 1282 ui.warn(_('("#fragment" in paths.%s:pushurl not supported; '
1283 1283 'ignoring)\n') % path.name)
1284 1284 u.fragment = None
1285 1285
1286 1286 return str(u)
1287 1287
1288 1288 @pathsuboption('pushrev', 'pushrev')
1289 1289 def pushrevpathoption(ui, path, value):
1290 1290 return value
1291 1291
1292 1292 class path(object):
1293 1293 """Represents an individual path and its configuration."""
1294 1294
1295 1295 def __init__(self, ui, name, rawloc=None, suboptions=None):
1296 1296 """Construct a path from its config options.
1297 1297
1298 1298 ``ui`` is the ``ui`` instance the path is coming from.
1299 1299 ``name`` is the symbolic name of the path.
1300 1300 ``rawloc`` is the raw location, as defined in the config.
1301 1301 ``pushloc`` is the raw locations pushes should be made to.
1302 1302
1303 1303 If ``name`` is not defined, we require that the location be a) a local
1304 1304 filesystem path with a .hg directory or b) a URL. If not,
1305 1305 ``ValueError`` is raised.
1306 1306 """
1307 1307 if not rawloc:
1308 1308 raise ValueError('rawloc must be defined')
1309 1309
1310 1310 # Locations may define branches via syntax <base>#<branch>.
1311 1311 u = util.url(rawloc)
1312 1312 branch = None
1313 1313 if u.fragment:
1314 1314 branch = u.fragment
1315 1315 u.fragment = None
1316 1316
1317 1317 self.url = u
1318 1318 self.branch = branch
1319 1319
1320 1320 self.name = name
1321 1321 self.rawloc = rawloc
1322 1322 self.loc = str(u)
1323 1323
1324 1324 # When given a raw location but not a symbolic name, validate the
1325 1325 # location is valid.
1326 1326 if not name and not u.scheme and not self._isvalidlocalpath(self.loc):
1327 1327 raise ValueError('location is not a URL or path to a local '
1328 1328 'repo: %s' % rawloc)
1329 1329
1330 1330 suboptions = suboptions or {}
1331 1331
1332 1332 # Now process the sub-options. If a sub-option is registered, its
1333 1333 # attribute will always be present. The value will be None if there
1334 1334 # was no valid sub-option.
1335 1335 for suboption, (attr, func) in _pathsuboptions.iteritems():
1336 1336 if suboption not in suboptions:
1337 1337 setattr(self, attr, None)
1338 1338 continue
1339 1339
1340 1340 value = func(ui, self, suboptions[suboption])
1341 1341 setattr(self, attr, value)
1342 1342
1343 1343 def _isvalidlocalpath(self, path):
1344 1344 """Returns True if the given path is a potentially valid repository.
1345 1345 This is its own function so that extensions can change the definition of
1346 1346 'valid' in this case (like when pulling from a git repo into a hg
1347 1347 one)."""
1348 1348 return os.path.isdir(os.path.join(path, '.hg'))
1349 1349
1350 1350 @property
1351 1351 def suboptions(self):
1352 1352 """Return sub-options and their values for this path.
1353 1353
1354 1354 This is intended to be used for presentation purposes.
1355 1355 """
1356 1356 d = {}
1357 1357 for subopt, (attr, _func) in _pathsuboptions.iteritems():
1358 1358 value = getattr(self, attr)
1359 1359 if value is not None:
1360 1360 d[subopt] = value
1361 1361 return d
1362 1362
1363 1363 # we instantiate one globally shared progress bar to avoid
1364 1364 # competing progress bars when multiple UI objects get created
1365 1365 _progresssingleton = None
1366 1366
1367 1367 def getprogbar(ui):
1368 1368 global _progresssingleton
1369 1369 if _progresssingleton is None:
1370 1370 # passing 'ui' object to the singleton is fishy,
1371 1371 # this is how the extension used to work but feel free to rework it.
1372 1372 _progresssingleton = progress.progbar(ui)
1373 1373 return _progresssingleton
@@ -1,3242 +1,3242 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import collections
21 21 import datetime
22 22 import errno
23 23 import gc
24 24 import hashlib
25 25 import imp
26 26 import os
27 27 import platform as pyplatform
28 28 import re as remod
29 29 import shutil
30 30 import signal
31 31 import socket
32 32 import stat
33 33 import string
34 34 import subprocess
35 35 import sys
36 36 import tempfile
37 37 import textwrap
38 38 import time
39 39 import traceback
40 40 import zlib
41 41
42 42 from . import (
43 43 encoding,
44 44 error,
45 45 i18n,
46 46 osutil,
47 47 parsers,
48 48 pycompat,
49 49 )
50 50
51 51 empty = pycompat.empty
52 52 httplib = pycompat.httplib
53 53 httpserver = pycompat.httpserver
54 54 pickle = pycompat.pickle
55 55 queue = pycompat.queue
56 56 socketserver = pycompat.socketserver
57 57 stderr = pycompat.stderr
58 58 stdin = pycompat.stdin
59 59 stdout = pycompat.stdout
60 60 stringio = pycompat.stringio
61 61 urlerr = pycompat.urlerr
62 62 urlparse = pycompat.urlparse
63 63 urlreq = pycompat.urlreq
64 64 xmlrpclib = pycompat.xmlrpclib
65 65
66 66 if os.name == 'nt':
67 67 from . import windows as platform
68 68 stdout = platform.winstdout(pycompat.stdout)
69 69 else:
70 70 from . import posix as platform
71 71
72 72 _ = i18n._
73 73
74 74 bindunixsocket = platform.bindunixsocket
75 75 cachestat = platform.cachestat
76 76 checkexec = platform.checkexec
77 77 checklink = platform.checklink
78 78 copymode = platform.copymode
79 79 executablepath = platform.executablepath
80 80 expandglobs = platform.expandglobs
81 81 explainexit = platform.explainexit
82 82 findexe = platform.findexe
83 83 gethgcmd = platform.gethgcmd
84 84 getuser = platform.getuser
85 85 getpid = os.getpid
86 86 groupmembers = platform.groupmembers
87 87 groupname = platform.groupname
88 88 hidewindow = platform.hidewindow
89 89 isexec = platform.isexec
90 90 isowner = platform.isowner
91 91 localpath = platform.localpath
92 92 lookupreg = platform.lookupreg
93 93 makedir = platform.makedir
94 94 nlinks = platform.nlinks
95 95 normpath = platform.normpath
96 96 normcase = platform.normcase
97 97 normcasespec = platform.normcasespec
98 98 normcasefallback = platform.normcasefallback
99 99 openhardlinks = platform.openhardlinks
100 100 oslink = platform.oslink
101 101 parsepatchoutput = platform.parsepatchoutput
102 102 pconvert = platform.pconvert
103 103 poll = platform.poll
104 104 popen = platform.popen
105 105 posixfile = platform.posixfile
106 106 quotecommand = platform.quotecommand
107 107 readpipe = platform.readpipe
108 108 rename = platform.rename
109 109 removedirs = platform.removedirs
110 110 samedevice = platform.samedevice
111 111 samefile = platform.samefile
112 112 samestat = platform.samestat
113 113 setbinary = platform.setbinary
114 114 setflags = platform.setflags
115 115 setsignalhandler = platform.setsignalhandler
116 116 shellquote = platform.shellquote
117 117 spawndetached = platform.spawndetached
118 118 split = platform.split
119 119 sshargs = platform.sshargs
120 120 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
121 121 statisexec = platform.statisexec
122 122 statislink = platform.statislink
123 123 testpid = platform.testpid
124 124 umask = platform.umask
125 125 unlink = platform.unlink
126 126 unlinkpath = platform.unlinkpath
127 127 username = platform.username
128 128
129 129 # Python compatibility
130 130
131 131 _notset = object()
132 132
133 133 # disable Python's problematic floating point timestamps (issue4836)
134 134 # (Python hypocritically says you shouldn't change this behavior in
135 135 # libraries, and sure enough Mercurial is not a library.)
136 136 os.stat_float_times(False)
137 137
138 138 def safehasattr(thing, attr):
139 139 return getattr(thing, attr, _notset) is not _notset
140 140
141 141 DIGESTS = {
142 142 'md5': hashlib.md5,
143 143 'sha1': hashlib.sha1,
144 144 'sha512': hashlib.sha512,
145 145 }
146 146 # List of digest types from strongest to weakest
147 147 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
148 148
149 149 for k in DIGESTS_BY_STRENGTH:
150 150 assert k in DIGESTS
151 151
152 152 class digester(object):
153 153 """helper to compute digests.
154 154
155 155 This helper can be used to compute one or more digests given their name.
156 156
157 157 >>> d = digester(['md5', 'sha1'])
158 158 >>> d.update('foo')
159 159 >>> [k for k in sorted(d)]
160 160 ['md5', 'sha1']
161 161 >>> d['md5']
162 162 'acbd18db4cc2f85cedef654fccc4a4d8'
163 163 >>> d['sha1']
164 164 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
165 165 >>> digester.preferred(['md5', 'sha1'])
166 166 'sha1'
167 167 """
168 168
169 169 def __init__(self, digests, s=''):
170 170 self._hashes = {}
171 171 for k in digests:
172 172 if k not in DIGESTS:
173 173 raise Abort(_('unknown digest type: %s') % k)
174 174 self._hashes[k] = DIGESTS[k]()
175 175 if s:
176 176 self.update(s)
177 177
178 178 def update(self, data):
179 179 for h in self._hashes.values():
180 180 h.update(data)
181 181
182 182 def __getitem__(self, key):
183 183 if key not in DIGESTS:
184 184 raise Abort(_('unknown digest type: %s') % k)
185 185 return self._hashes[key].hexdigest()
186 186
187 187 def __iter__(self):
188 188 return iter(self._hashes)
189 189
190 190 @staticmethod
191 191 def preferred(supported):
192 192 """returns the strongest digest type in both supported and DIGESTS."""
193 193
194 194 for k in DIGESTS_BY_STRENGTH:
195 195 if k in supported:
196 196 return k
197 197 return None
198 198
199 199 class digestchecker(object):
200 200 """file handle wrapper that additionally checks content against a given
201 201 size and digests.
202 202
203 203 d = digestchecker(fh, size, {'md5': '...'})
204 204
205 205 When multiple digests are given, all of them are validated.
206 206 """
207 207
208 208 def __init__(self, fh, size, digests):
209 209 self._fh = fh
210 210 self._size = size
211 211 self._got = 0
212 212 self._digests = dict(digests)
213 213 self._digester = digester(self._digests.keys())
214 214
215 215 def read(self, length=-1):
216 216 content = self._fh.read(length)
217 217 self._digester.update(content)
218 218 self._got += len(content)
219 219 return content
220 220
221 221 def validate(self):
222 222 if self._size != self._got:
223 223 raise Abort(_('size mismatch: expected %d, got %d') %
224 224 (self._size, self._got))
225 225 for k, v in self._digests.items():
226 226 if v != self._digester[k]:
227 227 # i18n: first parameter is a digest name
228 228 raise Abort(_('%s mismatch: expected %s, got %s') %
229 229 (k, v, self._digester[k]))
230 230
231 231 try:
232 232 buffer = buffer
233 233 except NameError:
234 234 if not pycompat.ispy3:
235 235 def buffer(sliceable, offset=0):
236 236 return sliceable[offset:]
237 237 else:
238 238 def buffer(sliceable, offset=0):
239 239 return memoryview(sliceable)[offset:]
240 240
241 241 closefds = os.name == 'posix'
242 242
243 243 _chunksize = 4096
244 244
245 245 class bufferedinputpipe(object):
246 246 """a manually buffered input pipe
247 247
248 248 Python will not let us use buffered IO and lazy reading with 'polling' at
249 249 the same time. We cannot probe the buffer state and select will not detect
250 250 that data are ready to read if they are already buffered.
251 251
252 252 This class let us work around that by implementing its own buffering
253 253 (allowing efficient readline) while offering a way to know if the buffer is
254 254 empty from the output (allowing collaboration of the buffer with polling).
255 255
256 256 This class lives in the 'util' module because it makes use of the 'os'
257 257 module from the python stdlib.
258 258 """
259 259
260 260 def __init__(self, input):
261 261 self._input = input
262 262 self._buffer = []
263 263 self._eof = False
264 264 self._lenbuf = 0
265 265
266 266 @property
267 267 def hasbuffer(self):
268 268 """True is any data is currently buffered
269 269
270 270 This will be used externally a pre-step for polling IO. If there is
271 271 already data then no polling should be set in place."""
272 272 return bool(self._buffer)
273 273
274 274 @property
275 275 def closed(self):
276 276 return self._input.closed
277 277
278 278 def fileno(self):
279 279 return self._input.fileno()
280 280
281 281 def close(self):
282 282 return self._input.close()
283 283
284 284 def read(self, size):
285 285 while (not self._eof) and (self._lenbuf < size):
286 286 self._fillbuffer()
287 287 return self._frombuffer(size)
288 288
289 289 def readline(self, *args, **kwargs):
290 290 if 1 < len(self._buffer):
291 291 # this should not happen because both read and readline end with a
292 292 # _frombuffer call that collapse it.
293 293 self._buffer = [''.join(self._buffer)]
294 294 self._lenbuf = len(self._buffer[0])
295 295 lfi = -1
296 296 if self._buffer:
297 297 lfi = self._buffer[-1].find('\n')
298 298 while (not self._eof) and lfi < 0:
299 299 self._fillbuffer()
300 300 if self._buffer:
301 301 lfi = self._buffer[-1].find('\n')
302 302 size = lfi + 1
303 303 if lfi < 0: # end of file
304 304 size = self._lenbuf
305 305 elif 1 < len(self._buffer):
306 306 # we need to take previous chunks into account
307 307 size += self._lenbuf - len(self._buffer[-1])
308 308 return self._frombuffer(size)
309 309
310 310 def _frombuffer(self, size):
311 311 """return at most 'size' data from the buffer
312 312
313 313 The data are removed from the buffer."""
314 314 if size == 0 or not self._buffer:
315 315 return ''
316 316 buf = self._buffer[0]
317 317 if 1 < len(self._buffer):
318 318 buf = ''.join(self._buffer)
319 319
320 320 data = buf[:size]
321 321 buf = buf[len(data):]
322 322 if buf:
323 323 self._buffer = [buf]
324 324 self._lenbuf = len(buf)
325 325 else:
326 326 self._buffer = []
327 327 self._lenbuf = 0
328 328 return data
329 329
330 330 def _fillbuffer(self):
331 331 """read data to the buffer"""
332 332 data = os.read(self._input.fileno(), _chunksize)
333 333 if not data:
334 334 self._eof = True
335 335 else:
336 336 self._lenbuf += len(data)
337 337 self._buffer.append(data)
338 338
339 339 def popen2(cmd, env=None, newlines=False):
340 340 # Setting bufsize to -1 lets the system decide the buffer size.
341 341 # The default for bufsize is 0, meaning unbuffered. This leads to
342 342 # poor performance on Mac OS X: http://bugs.python.org/issue4194
343 343 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
344 344 close_fds=closefds,
345 345 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
346 346 universal_newlines=newlines,
347 347 env=env)
348 348 return p.stdin, p.stdout
349 349
350 350 def popen3(cmd, env=None, newlines=False):
351 351 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
352 352 return stdin, stdout, stderr
353 353
354 354 def popen4(cmd, env=None, newlines=False, bufsize=-1):
355 355 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
356 356 close_fds=closefds,
357 357 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
358 358 stderr=subprocess.PIPE,
359 359 universal_newlines=newlines,
360 360 env=env)
361 361 return p.stdin, p.stdout, p.stderr, p
362 362
363 363 def version():
364 364 """Return version information if available."""
365 365 try:
366 366 from . import __version__
367 367 return __version__.version
368 368 except ImportError:
369 369 return 'unknown'
370 370
371 371 def versiontuple(v=None, n=4):
372 372 """Parses a Mercurial version string into an N-tuple.
373 373
374 374 The version string to be parsed is specified with the ``v`` argument.
375 375 If it isn't defined, the current Mercurial version string will be parsed.
376 376
377 377 ``n`` can be 2, 3, or 4. Here is how some version strings map to
378 378 returned values:
379 379
380 380 >>> v = '3.6.1+190-df9b73d2d444'
381 381 >>> versiontuple(v, 2)
382 382 (3, 6)
383 383 >>> versiontuple(v, 3)
384 384 (3, 6, 1)
385 385 >>> versiontuple(v, 4)
386 386 (3, 6, 1, '190-df9b73d2d444')
387 387
388 388 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
389 389 (3, 6, 1, '190-df9b73d2d444+20151118')
390 390
391 391 >>> v = '3.6'
392 392 >>> versiontuple(v, 2)
393 393 (3, 6)
394 394 >>> versiontuple(v, 3)
395 395 (3, 6, None)
396 396 >>> versiontuple(v, 4)
397 397 (3, 6, None, None)
398 398
399 399 >>> v = '3.9-rc'
400 400 >>> versiontuple(v, 2)
401 401 (3, 9)
402 402 >>> versiontuple(v, 3)
403 403 (3, 9, None)
404 404 >>> versiontuple(v, 4)
405 405 (3, 9, None, 'rc')
406 406
407 407 >>> v = '3.9-rc+2-02a8fea4289b'
408 408 >>> versiontuple(v, 2)
409 409 (3, 9)
410 410 >>> versiontuple(v, 3)
411 411 (3, 9, None)
412 412 >>> versiontuple(v, 4)
413 413 (3, 9, None, 'rc+2-02a8fea4289b')
414 414 """
415 415 if not v:
416 416 v = version()
417 417 parts = remod.split('[\+-]', v, 1)
418 418 if len(parts) == 1:
419 419 vparts, extra = parts[0], None
420 420 else:
421 421 vparts, extra = parts
422 422
423 423 vints = []
424 424 for i in vparts.split('.'):
425 425 try:
426 426 vints.append(int(i))
427 427 except ValueError:
428 428 break
429 429 # (3, 6) -> (3, 6, None)
430 430 while len(vints) < 3:
431 431 vints.append(None)
432 432
433 433 if n == 2:
434 434 return (vints[0], vints[1])
435 435 if n == 3:
436 436 return (vints[0], vints[1], vints[2])
437 437 if n == 4:
438 438 return (vints[0], vints[1], vints[2], extra)
439 439
440 440 # used by parsedate
441 441 defaultdateformats = (
442 442 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
443 443 '%Y-%m-%dT%H:%M', # without seconds
444 444 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
445 445 '%Y-%m-%dT%H%M', # without seconds
446 446 '%Y-%m-%d %H:%M:%S', # our common legal variant
447 447 '%Y-%m-%d %H:%M', # without seconds
448 448 '%Y-%m-%d %H%M%S', # without :
449 449 '%Y-%m-%d %H%M', # without seconds
450 450 '%Y-%m-%d %I:%M:%S%p',
451 451 '%Y-%m-%d %H:%M',
452 452 '%Y-%m-%d %I:%M%p',
453 453 '%Y-%m-%d',
454 454 '%m-%d',
455 455 '%m/%d',
456 456 '%m/%d/%y',
457 457 '%m/%d/%Y',
458 458 '%a %b %d %H:%M:%S %Y',
459 459 '%a %b %d %I:%M:%S%p %Y',
460 460 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
461 461 '%b %d %H:%M:%S %Y',
462 462 '%b %d %I:%M:%S%p %Y',
463 463 '%b %d %H:%M:%S',
464 464 '%b %d %I:%M:%S%p',
465 465 '%b %d %H:%M',
466 466 '%b %d %I:%M%p',
467 467 '%b %d %Y',
468 468 '%b %d',
469 469 '%H:%M:%S',
470 470 '%I:%M:%S%p',
471 471 '%H:%M',
472 472 '%I:%M%p',
473 473 )
474 474
475 475 extendeddateformats = defaultdateformats + (
476 476 "%Y",
477 477 "%Y-%m",
478 478 "%b",
479 479 "%b %Y",
480 480 )
481 481
482 482 def cachefunc(func):
483 483 '''cache the result of function calls'''
484 484 # XXX doesn't handle keywords args
485 485 if func.__code__.co_argcount == 0:
486 486 cache = []
487 487 def f():
488 488 if len(cache) == 0:
489 489 cache.append(func())
490 490 return cache[0]
491 491 return f
492 492 cache = {}
493 493 if func.__code__.co_argcount == 1:
494 494 # we gain a small amount of time because
495 495 # we don't need to pack/unpack the list
496 496 def f(arg):
497 497 if arg not in cache:
498 498 cache[arg] = func(arg)
499 499 return cache[arg]
500 500 else:
501 501 def f(*args):
502 502 if args not in cache:
503 503 cache[args] = func(*args)
504 504 return cache[args]
505 505
506 506 return f
507 507
508 508 class sortdict(dict):
509 509 '''a simple sorted dictionary'''
510 510 def __init__(self, data=None):
511 511 self._list = []
512 512 if data:
513 513 self.update(data)
514 514 def copy(self):
515 515 return sortdict(self)
516 516 def __setitem__(self, key, val):
517 517 if key in self:
518 518 self._list.remove(key)
519 519 self._list.append(key)
520 520 dict.__setitem__(self, key, val)
521 521 def __iter__(self):
522 522 return self._list.__iter__()
523 523 def update(self, src):
524 524 if isinstance(src, dict):
525 525 src = src.iteritems()
526 526 for k, v in src:
527 527 self[k] = v
528 528 def clear(self):
529 529 dict.clear(self)
530 530 self._list = []
531 531 def items(self):
532 532 return [(k, self[k]) for k in self._list]
533 533 def __delitem__(self, key):
534 534 dict.__delitem__(self, key)
535 535 self._list.remove(key)
536 536 def pop(self, key, *args, **kwargs):
537 537 dict.pop(self, key, *args, **kwargs)
538 538 try:
539 539 self._list.remove(key)
540 540 except ValueError:
541 541 pass
542 542 def keys(self):
543 543 return self._list
544 544 def iterkeys(self):
545 545 return self._list.__iter__()
546 546 def iteritems(self):
547 547 for k in self._list:
548 548 yield k, self[k]
549 549 def insert(self, index, key, val):
550 550 self._list.insert(index, key)
551 551 dict.__setitem__(self, key, val)
552 552 def __repr__(self):
553 553 if not self:
554 554 return '%s()' % self.__class__.__name__
555 555 return '%s(%r)' % (self.__class__.__name__, self.items())
556 556
557 557 class _lrucachenode(object):
558 558 """A node in a doubly linked list.
559 559
560 560 Holds a reference to nodes on either side as well as a key-value
561 561 pair for the dictionary entry.
562 562 """
563 563 __slots__ = (u'next', u'prev', u'key', u'value')
564 564
565 565 def __init__(self):
566 566 self.next = None
567 567 self.prev = None
568 568
569 569 self.key = _notset
570 570 self.value = None
571 571
572 572 def markempty(self):
573 573 """Mark the node as emptied."""
574 574 self.key = _notset
575 575
576 576 class lrucachedict(object):
577 577 """Dict that caches most recent accesses and sets.
578 578
579 579 The dict consists of an actual backing dict - indexed by original
580 580 key - and a doubly linked circular list defining the order of entries in
581 581 the cache.
582 582
583 583 The head node is the newest entry in the cache. If the cache is full,
584 584 we recycle head.prev and make it the new head. Cache accesses result in
585 585 the node being moved to before the existing head and being marked as the
586 586 new head node.
587 587 """
588 588 def __init__(self, max):
589 589 self._cache = {}
590 590
591 591 self._head = head = _lrucachenode()
592 592 head.prev = head
593 593 head.next = head
594 594 self._size = 1
595 595 self._capacity = max
596 596
597 597 def __len__(self):
598 598 return len(self._cache)
599 599
600 600 def __contains__(self, k):
601 601 return k in self._cache
602 602
603 603 def __iter__(self):
604 604 # We don't have to iterate in cache order, but why not.
605 605 n = self._head
606 606 for i in range(len(self._cache)):
607 607 yield n.key
608 608 n = n.next
609 609
610 610 def __getitem__(self, k):
611 611 node = self._cache[k]
612 612 self._movetohead(node)
613 613 return node.value
614 614
615 615 def __setitem__(self, k, v):
616 616 node = self._cache.get(k)
617 617 # Replace existing value and mark as newest.
618 618 if node is not None:
619 619 node.value = v
620 620 self._movetohead(node)
621 621 return
622 622
623 623 if self._size < self._capacity:
624 624 node = self._addcapacity()
625 625 else:
626 626 # Grab the last/oldest item.
627 627 node = self._head.prev
628 628
629 629 # At capacity. Kill the old entry.
630 630 if node.key is not _notset:
631 631 del self._cache[node.key]
632 632
633 633 node.key = k
634 634 node.value = v
635 635 self._cache[k] = node
636 636 # And mark it as newest entry. No need to adjust order since it
637 637 # is already self._head.prev.
638 638 self._head = node
639 639
640 640 def __delitem__(self, k):
641 641 node = self._cache.pop(k)
642 642 node.markempty()
643 643
644 644 # Temporarily mark as newest item before re-adjusting head to make
645 645 # this node the oldest item.
646 646 self._movetohead(node)
647 647 self._head = node.next
648 648
649 649 # Additional dict methods.
650 650
651 651 def get(self, k, default=None):
652 652 try:
653 653 return self._cache[k].value
654 654 except KeyError:
655 655 return default
656 656
657 657 def clear(self):
658 658 n = self._head
659 659 while n.key is not _notset:
660 660 n.markempty()
661 661 n = n.next
662 662
663 663 self._cache.clear()
664 664
665 665 def copy(self):
666 666 result = lrucachedict(self._capacity)
667 667 n = self._head.prev
668 668 # Iterate in oldest-to-newest order, so the copy has the right ordering
669 669 for i in range(len(self._cache)):
670 670 result[n.key] = n.value
671 671 n = n.prev
672 672 return result
673 673
674 674 def _movetohead(self, node):
675 675 """Mark a node as the newest, making it the new head.
676 676
677 677 When a node is accessed, it becomes the freshest entry in the LRU
678 678 list, which is denoted by self._head.
679 679
680 680 Visually, let's make ``N`` the new head node (* denotes head):
681 681
682 682 previous/oldest <-> head <-> next/next newest
683 683
684 684 ----<->--- A* ---<->-----
685 685 | |
686 686 E <-> D <-> N <-> C <-> B
687 687
688 688 To:
689 689
690 690 ----<->--- N* ---<->-----
691 691 | |
692 692 E <-> D <-> C <-> B <-> A
693 693
694 694 This requires the following moves:
695 695
696 696 C.next = D (node.prev.next = node.next)
697 697 D.prev = C (node.next.prev = node.prev)
698 698 E.next = N (head.prev.next = node)
699 699 N.prev = E (node.prev = head.prev)
700 700 N.next = A (node.next = head)
701 701 A.prev = N (head.prev = node)
702 702 """
703 703 head = self._head
704 704 # C.next = D
705 705 node.prev.next = node.next
706 706 # D.prev = C
707 707 node.next.prev = node.prev
708 708 # N.prev = E
709 709 node.prev = head.prev
710 710 # N.next = A
711 711 # It is tempting to do just "head" here, however if node is
712 712 # adjacent to head, this will do bad things.
713 713 node.next = head.prev.next
714 714 # E.next = N
715 715 node.next.prev = node
716 716 # A.prev = N
717 717 node.prev.next = node
718 718
719 719 self._head = node
720 720
721 721 def _addcapacity(self):
722 722 """Add a node to the circular linked list.
723 723
724 724 The new node is inserted before the head node.
725 725 """
726 726 head = self._head
727 727 node = _lrucachenode()
728 728 head.prev.next = node
729 729 node.prev = head.prev
730 730 node.next = head
731 731 head.prev = node
732 732 self._size += 1
733 733 return node
734 734
735 735 def lrucachefunc(func):
736 736 '''cache most recent results of function calls'''
737 737 cache = {}
738 738 order = collections.deque()
739 739 if func.__code__.co_argcount == 1:
740 740 def f(arg):
741 741 if arg not in cache:
742 742 if len(cache) > 20:
743 743 del cache[order.popleft()]
744 744 cache[arg] = func(arg)
745 745 else:
746 746 order.remove(arg)
747 747 order.append(arg)
748 748 return cache[arg]
749 749 else:
750 750 def f(*args):
751 751 if args not in cache:
752 752 if len(cache) > 20:
753 753 del cache[order.popleft()]
754 754 cache[args] = func(*args)
755 755 else:
756 756 order.remove(args)
757 757 order.append(args)
758 758 return cache[args]
759 759
760 760 return f
761 761
762 762 class propertycache(object):
763 763 def __init__(self, func):
764 764 self.func = func
765 765 self.name = func.__name__
766 766 def __get__(self, obj, type=None):
767 767 result = self.func(obj)
768 768 self.cachevalue(obj, result)
769 769 return result
770 770
771 771 def cachevalue(self, obj, value):
772 772 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
773 773 obj.__dict__[self.name] = value
774 774
775 775 def pipefilter(s, cmd):
776 776 '''filter string S through command CMD, returning its output'''
777 777 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
778 778 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
779 779 pout, perr = p.communicate(s)
780 780 return pout
781 781
782 782 def tempfilter(s, cmd):
783 783 '''filter string S through a pair of temporary files with CMD.
784 784 CMD is used as a template to create the real command to be run,
785 785 with the strings INFILE and OUTFILE replaced by the real names of
786 786 the temporary files generated.'''
787 787 inname, outname = None, None
788 788 try:
789 789 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
790 790 fp = os.fdopen(infd, 'wb')
791 791 fp.write(s)
792 792 fp.close()
793 793 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
794 794 os.close(outfd)
795 795 cmd = cmd.replace('INFILE', inname)
796 796 cmd = cmd.replace('OUTFILE', outname)
797 797 code = os.system(cmd)
798 798 if sys.platform == 'OpenVMS' and code & 1:
799 799 code = 0
800 800 if code:
801 801 raise Abort(_("command '%s' failed: %s") %
802 802 (cmd, explainexit(code)))
803 803 return readfile(outname)
804 804 finally:
805 805 try:
806 806 if inname:
807 807 os.unlink(inname)
808 808 except OSError:
809 809 pass
810 810 try:
811 811 if outname:
812 812 os.unlink(outname)
813 813 except OSError:
814 814 pass
815 815
816 816 filtertable = {
817 817 'tempfile:': tempfilter,
818 818 'pipe:': pipefilter,
819 819 }
820 820
821 821 def filter(s, cmd):
822 822 "filter a string through a command that transforms its input to its output"
823 823 for name, fn in filtertable.iteritems():
824 824 if cmd.startswith(name):
825 825 return fn(s, cmd[len(name):].lstrip())
826 826 return pipefilter(s, cmd)
827 827
828 828 def binary(s):
829 829 """return true if a string is binary data"""
830 830 return bool(s and '\0' in s)
831 831
832 832 def increasingchunks(source, min=1024, max=65536):
833 833 '''return no less than min bytes per chunk while data remains,
834 834 doubling min after each chunk until it reaches max'''
835 835 def log2(x):
836 836 if not x:
837 837 return 0
838 838 i = 0
839 839 while x:
840 840 x >>= 1
841 841 i += 1
842 842 return i - 1
843 843
844 844 buf = []
845 845 blen = 0
846 846 for chunk in source:
847 847 buf.append(chunk)
848 848 blen += len(chunk)
849 849 if blen >= min:
850 850 if min < max:
851 851 min = min << 1
852 852 nmin = 1 << log2(blen)
853 853 if nmin > min:
854 854 min = nmin
855 855 if min > max:
856 856 min = max
857 857 yield ''.join(buf)
858 858 blen = 0
859 859 buf = []
860 860 if buf:
861 861 yield ''.join(buf)
862 862
863 863 Abort = error.Abort
864 864
865 865 def always(fn):
866 866 return True
867 867
868 868 def never(fn):
869 869 return False
870 870
871 871 def nogc(func):
872 872 """disable garbage collector
873 873
874 874 Python's garbage collector triggers a GC each time a certain number of
875 875 container objects (the number being defined by gc.get_threshold()) are
876 876 allocated even when marked not to be tracked by the collector. Tracking has
877 877 no effect on when GCs are triggered, only on what objects the GC looks
878 878 into. As a workaround, disable GC while building complex (huge)
879 879 containers.
880 880
881 881 This garbage collector issue have been fixed in 2.7.
882 882 """
883 883 if sys.version_info >= (2, 7):
884 884 return func
885 885 def wrapper(*args, **kwargs):
886 886 gcenabled = gc.isenabled()
887 887 gc.disable()
888 888 try:
889 889 return func(*args, **kwargs)
890 890 finally:
891 891 if gcenabled:
892 892 gc.enable()
893 893 return wrapper
894 894
895 895 def pathto(root, n1, n2):
896 896 '''return the relative path from one place to another.
897 897 root should use os.sep to separate directories
898 898 n1 should use os.sep to separate directories
899 899 n2 should use "/" to separate directories
900 900 returns an os.sep-separated path.
901 901
902 902 If n1 is a relative path, it's assumed it's
903 903 relative to root.
904 904 n2 should always be relative to root.
905 905 '''
906 906 if not n1:
907 907 return localpath(n2)
908 908 if os.path.isabs(n1):
909 909 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
910 910 return os.path.join(root, localpath(n2))
911 911 n2 = '/'.join((pconvert(root), n2))
912 912 a, b = splitpath(n1), n2.split('/')
913 913 a.reverse()
914 914 b.reverse()
915 915 while a and b and a[-1] == b[-1]:
916 916 a.pop()
917 917 b.pop()
918 918 b.reverse()
919 919 return os.sep.join((['..'] * len(a)) + b) or '.'
920 920
921 921 def mainfrozen():
922 922 """return True if we are a frozen executable.
923 923
924 924 The code supports py2exe (most common, Windows only) and tools/freeze
925 925 (portable, not much used).
926 926 """
927 927 return (safehasattr(sys, "frozen") or # new py2exe
928 928 safehasattr(sys, "importers") or # old py2exe
929 929 imp.is_frozen(u"__main__")) # tools/freeze
930 930
931 931 # the location of data files matching the source code
932 932 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
933 933 # executable version (py2exe) doesn't support __file__
934 934 datapath = os.path.dirname(sys.executable)
935 935 else:
936 936 datapath = os.path.dirname(__file__)
937 937
938 938 if not isinstance(datapath, bytes):
939 939 datapath = pycompat.fsencode(datapath)
940 940
941 941 i18n.setdatapath(datapath)
942 942
943 943 _hgexecutable = None
944 944
945 945 def hgexecutable():
946 946 """return location of the 'hg' executable.
947 947
948 948 Defaults to $HG or 'hg' in the search path.
949 949 """
950 950 if _hgexecutable is None:
951 951 hg = os.environ.get('HG')
952 952 mainmod = sys.modules['__main__']
953 953 if hg:
954 954 _sethgexecutable(hg)
955 955 elif mainfrozen():
956 956 if getattr(sys, 'frozen', None) == 'macosx_app':
957 957 # Env variable set by py2app
958 958 _sethgexecutable(os.environ['EXECUTABLEPATH'])
959 959 else:
960 960 _sethgexecutable(sys.executable)
961 961 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
962 962 _sethgexecutable(mainmod.__file__)
963 963 else:
964 964 exe = findexe('hg') or os.path.basename(sys.argv[0])
965 965 _sethgexecutable(exe)
966 966 return _hgexecutable
967 967
968 968 def _sethgexecutable(path):
969 969 """set location of the 'hg' executable"""
970 970 global _hgexecutable
971 971 _hgexecutable = path
972 972
973 973 def _isstdout(f):
974 974 fileno = getattr(f, 'fileno', None)
975 975 return fileno and fileno() == sys.__stdout__.fileno()
976 976
977 977 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
978 978 '''enhanced shell command execution.
979 979 run with environment maybe modified, maybe in different dir.
980 980
981 981 if command fails and onerr is None, return status, else raise onerr
982 982 object as exception.
983 983
984 984 if out is specified, it is assumed to be a file-like object that has a
985 985 write() method. stdout and stderr will be redirected to out.'''
986 986 if environ is None:
987 987 environ = {}
988 988 try:
989 sys.stdout.flush()
989 stdout.flush()
990 990 except Exception:
991 991 pass
992 992 def py2shell(val):
993 993 'convert python object into string that is useful to shell'
994 994 if val is None or val is False:
995 995 return '0'
996 996 if val is True:
997 997 return '1'
998 998 return str(val)
999 999 origcmd = cmd
1000 1000 cmd = quotecommand(cmd)
1001 1001 if sys.platform == 'plan9' and (sys.version_info[0] == 2
1002 1002 and sys.version_info[1] < 7):
1003 1003 # subprocess kludge to work around issues in half-baked Python
1004 1004 # ports, notably bichued/python:
1005 1005 if not cwd is None:
1006 1006 os.chdir(cwd)
1007 1007 rc = os.system(cmd)
1008 1008 else:
1009 1009 env = dict(os.environ)
1010 1010 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1011 1011 env['HG'] = hgexecutable()
1012 1012 if out is None or _isstdout(out):
1013 1013 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1014 1014 env=env, cwd=cwd)
1015 1015 else:
1016 1016 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1017 1017 env=env, cwd=cwd, stdout=subprocess.PIPE,
1018 1018 stderr=subprocess.STDOUT)
1019 1019 for line in iter(proc.stdout.readline, ''):
1020 1020 out.write(line)
1021 1021 proc.wait()
1022 1022 rc = proc.returncode
1023 1023 if sys.platform == 'OpenVMS' and rc & 1:
1024 1024 rc = 0
1025 1025 if rc and onerr:
1026 1026 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1027 1027 explainexit(rc)[0])
1028 1028 if errprefix:
1029 1029 errmsg = '%s: %s' % (errprefix, errmsg)
1030 1030 raise onerr(errmsg)
1031 1031 return rc
1032 1032
1033 1033 def checksignature(func):
1034 1034 '''wrap a function with code to check for calling errors'''
1035 1035 def check(*args, **kwargs):
1036 1036 try:
1037 1037 return func(*args, **kwargs)
1038 1038 except TypeError:
1039 1039 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1040 1040 raise error.SignatureError
1041 1041 raise
1042 1042
1043 1043 return check
1044 1044
1045 1045 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1046 1046 '''copy a file, preserving mode and optionally other stat info like
1047 1047 atime/mtime
1048 1048
1049 1049 checkambig argument is used with filestat, and is useful only if
1050 1050 destination file is guarded by any lock (e.g. repo.lock or
1051 1051 repo.wlock).
1052 1052
1053 1053 copystat and checkambig should be exclusive.
1054 1054 '''
1055 1055 assert not (copystat and checkambig)
1056 1056 oldstat = None
1057 1057 if os.path.lexists(dest):
1058 1058 if checkambig:
1059 1059 oldstat = checkambig and filestat(dest)
1060 1060 unlink(dest)
1061 1061 # hardlinks are problematic on CIFS, quietly ignore this flag
1062 1062 # until we find a way to work around it cleanly (issue4546)
1063 1063 if False and hardlink:
1064 1064 try:
1065 1065 oslink(src, dest)
1066 1066 return
1067 1067 except (IOError, OSError):
1068 1068 pass # fall back to normal copy
1069 1069 if os.path.islink(src):
1070 1070 os.symlink(os.readlink(src), dest)
1071 1071 # copytime is ignored for symlinks, but in general copytime isn't needed
1072 1072 # for them anyway
1073 1073 else:
1074 1074 try:
1075 1075 shutil.copyfile(src, dest)
1076 1076 if copystat:
1077 1077 # copystat also copies mode
1078 1078 shutil.copystat(src, dest)
1079 1079 else:
1080 1080 shutil.copymode(src, dest)
1081 1081 if oldstat and oldstat.stat:
1082 1082 newstat = filestat(dest)
1083 1083 if newstat.isambig(oldstat):
1084 1084 # stat of copied file is ambiguous to original one
1085 1085 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1086 1086 os.utime(dest, (advanced, advanced))
1087 1087 except shutil.Error as inst:
1088 1088 raise Abort(str(inst))
1089 1089
1090 1090 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1091 1091 """Copy a directory tree using hardlinks if possible."""
1092 1092 num = 0
1093 1093
1094 1094 if hardlink is None:
1095 1095 hardlink = (os.stat(src).st_dev ==
1096 1096 os.stat(os.path.dirname(dst)).st_dev)
1097 1097 if hardlink:
1098 1098 topic = _('linking')
1099 1099 else:
1100 1100 topic = _('copying')
1101 1101
1102 1102 if os.path.isdir(src):
1103 1103 os.mkdir(dst)
1104 1104 for name, kind in osutil.listdir(src):
1105 1105 srcname = os.path.join(src, name)
1106 1106 dstname = os.path.join(dst, name)
1107 1107 def nprog(t, pos):
1108 1108 if pos is not None:
1109 1109 return progress(t, pos + num)
1110 1110 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1111 1111 num += n
1112 1112 else:
1113 1113 if hardlink:
1114 1114 try:
1115 1115 oslink(src, dst)
1116 1116 except (IOError, OSError):
1117 1117 hardlink = False
1118 1118 shutil.copy(src, dst)
1119 1119 else:
1120 1120 shutil.copy(src, dst)
1121 1121 num += 1
1122 1122 progress(topic, num)
1123 1123 progress(topic, None)
1124 1124
1125 1125 return hardlink, num
1126 1126
1127 1127 _winreservednames = '''con prn aux nul
1128 1128 com1 com2 com3 com4 com5 com6 com7 com8 com9
1129 1129 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1130 1130 _winreservedchars = ':*?"<>|'
1131 1131 def checkwinfilename(path):
1132 1132 r'''Check that the base-relative path is a valid filename on Windows.
1133 1133 Returns None if the path is ok, or a UI string describing the problem.
1134 1134
1135 1135 >>> checkwinfilename("just/a/normal/path")
1136 1136 >>> checkwinfilename("foo/bar/con.xml")
1137 1137 "filename contains 'con', which is reserved on Windows"
1138 1138 >>> checkwinfilename("foo/con.xml/bar")
1139 1139 "filename contains 'con', which is reserved on Windows"
1140 1140 >>> checkwinfilename("foo/bar/xml.con")
1141 1141 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1142 1142 "filename contains 'AUX', which is reserved on Windows"
1143 1143 >>> checkwinfilename("foo/bar/bla:.txt")
1144 1144 "filename contains ':', which is reserved on Windows"
1145 1145 >>> checkwinfilename("foo/bar/b\07la.txt")
1146 1146 "filename contains '\\x07', which is invalid on Windows"
1147 1147 >>> checkwinfilename("foo/bar/bla ")
1148 1148 "filename ends with ' ', which is not allowed on Windows"
1149 1149 >>> checkwinfilename("../bar")
1150 1150 >>> checkwinfilename("foo\\")
1151 1151 "filename ends with '\\', which is invalid on Windows"
1152 1152 >>> checkwinfilename("foo\\/bar")
1153 1153 "directory name ends with '\\', which is invalid on Windows"
1154 1154 '''
1155 1155 if path.endswith('\\'):
1156 1156 return _("filename ends with '\\', which is invalid on Windows")
1157 1157 if '\\/' in path:
1158 1158 return _("directory name ends with '\\', which is invalid on Windows")
1159 1159 for n in path.replace('\\', '/').split('/'):
1160 1160 if not n:
1161 1161 continue
1162 1162 for c in n:
1163 1163 if c in _winreservedchars:
1164 1164 return _("filename contains '%s', which is reserved "
1165 1165 "on Windows") % c
1166 1166 if ord(c) <= 31:
1167 1167 return _("filename contains %r, which is invalid "
1168 1168 "on Windows") % c
1169 1169 base = n.split('.')[0]
1170 1170 if base and base.lower() in _winreservednames:
1171 1171 return _("filename contains '%s', which is reserved "
1172 1172 "on Windows") % base
1173 1173 t = n[-1]
1174 1174 if t in '. ' and n not in '..':
1175 1175 return _("filename ends with '%s', which is not allowed "
1176 1176 "on Windows") % t
1177 1177
1178 1178 if os.name == 'nt':
1179 1179 checkosfilename = checkwinfilename
1180 1180 else:
1181 1181 checkosfilename = platform.checkosfilename
1182 1182
1183 1183 def makelock(info, pathname):
1184 1184 try:
1185 1185 return os.symlink(info, pathname)
1186 1186 except OSError as why:
1187 1187 if why.errno == errno.EEXIST:
1188 1188 raise
1189 1189 except AttributeError: # no symlink in os
1190 1190 pass
1191 1191
1192 1192 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1193 1193 os.write(ld, info)
1194 1194 os.close(ld)
1195 1195
1196 1196 def readlock(pathname):
1197 1197 try:
1198 1198 return os.readlink(pathname)
1199 1199 except OSError as why:
1200 1200 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1201 1201 raise
1202 1202 except AttributeError: # no symlink in os
1203 1203 pass
1204 1204 fp = posixfile(pathname)
1205 1205 r = fp.read()
1206 1206 fp.close()
1207 1207 return r
1208 1208
1209 1209 def fstat(fp):
1210 1210 '''stat file object that may not have fileno method.'''
1211 1211 try:
1212 1212 return os.fstat(fp.fileno())
1213 1213 except AttributeError:
1214 1214 return os.stat(fp.name)
1215 1215
1216 1216 # File system features
1217 1217
1218 1218 def fscasesensitive(path):
1219 1219 """
1220 1220 Return true if the given path is on a case-sensitive filesystem
1221 1221
1222 1222 Requires a path (like /foo/.hg) ending with a foldable final
1223 1223 directory component.
1224 1224 """
1225 1225 s1 = os.lstat(path)
1226 1226 d, b = os.path.split(path)
1227 1227 b2 = b.upper()
1228 1228 if b == b2:
1229 1229 b2 = b.lower()
1230 1230 if b == b2:
1231 1231 return True # no evidence against case sensitivity
1232 1232 p2 = os.path.join(d, b2)
1233 1233 try:
1234 1234 s2 = os.lstat(p2)
1235 1235 if s2 == s1:
1236 1236 return False
1237 1237 return True
1238 1238 except OSError:
1239 1239 return True
1240 1240
1241 1241 try:
1242 1242 import re2
1243 1243 _re2 = None
1244 1244 except ImportError:
1245 1245 _re2 = False
1246 1246
1247 1247 class _re(object):
1248 1248 def _checkre2(self):
1249 1249 global _re2
1250 1250 try:
1251 1251 # check if match works, see issue3964
1252 1252 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1253 1253 except ImportError:
1254 1254 _re2 = False
1255 1255
1256 1256 def compile(self, pat, flags=0):
1257 1257 '''Compile a regular expression, using re2 if possible
1258 1258
1259 1259 For best performance, use only re2-compatible regexp features. The
1260 1260 only flags from the re module that are re2-compatible are
1261 1261 IGNORECASE and MULTILINE.'''
1262 1262 if _re2 is None:
1263 1263 self._checkre2()
1264 1264 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1265 1265 if flags & remod.IGNORECASE:
1266 1266 pat = '(?i)' + pat
1267 1267 if flags & remod.MULTILINE:
1268 1268 pat = '(?m)' + pat
1269 1269 try:
1270 1270 return re2.compile(pat)
1271 1271 except re2.error:
1272 1272 pass
1273 1273 return remod.compile(pat, flags)
1274 1274
1275 1275 @propertycache
1276 1276 def escape(self):
1277 1277 '''Return the version of escape corresponding to self.compile.
1278 1278
1279 1279 This is imperfect because whether re2 or re is used for a particular
1280 1280 function depends on the flags, etc, but it's the best we can do.
1281 1281 '''
1282 1282 global _re2
1283 1283 if _re2 is None:
1284 1284 self._checkre2()
1285 1285 if _re2:
1286 1286 return re2.escape
1287 1287 else:
1288 1288 return remod.escape
1289 1289
1290 1290 re = _re()
1291 1291
1292 1292 _fspathcache = {}
1293 1293 def fspath(name, root):
1294 1294 '''Get name in the case stored in the filesystem
1295 1295
1296 1296 The name should be relative to root, and be normcase-ed for efficiency.
1297 1297
1298 1298 Note that this function is unnecessary, and should not be
1299 1299 called, for case-sensitive filesystems (simply because it's expensive).
1300 1300
1301 1301 The root should be normcase-ed, too.
1302 1302 '''
1303 1303 def _makefspathcacheentry(dir):
1304 1304 return dict((normcase(n), n) for n in os.listdir(dir))
1305 1305
1306 1306 seps = os.sep
1307 1307 if os.altsep:
1308 1308 seps = seps + os.altsep
1309 1309 # Protect backslashes. This gets silly very quickly.
1310 1310 seps.replace('\\','\\\\')
1311 1311 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1312 1312 dir = os.path.normpath(root)
1313 1313 result = []
1314 1314 for part, sep in pattern.findall(name):
1315 1315 if sep:
1316 1316 result.append(sep)
1317 1317 continue
1318 1318
1319 1319 if dir not in _fspathcache:
1320 1320 _fspathcache[dir] = _makefspathcacheentry(dir)
1321 1321 contents = _fspathcache[dir]
1322 1322
1323 1323 found = contents.get(part)
1324 1324 if not found:
1325 1325 # retry "once per directory" per "dirstate.walk" which
1326 1326 # may take place for each patches of "hg qpush", for example
1327 1327 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1328 1328 found = contents.get(part)
1329 1329
1330 1330 result.append(found or part)
1331 1331 dir = os.path.join(dir, part)
1332 1332
1333 1333 return ''.join(result)
1334 1334
1335 1335 def checknlink(testfile):
1336 1336 '''check whether hardlink count reporting works properly'''
1337 1337
1338 1338 # testfile may be open, so we need a separate file for checking to
1339 1339 # work around issue2543 (or testfile may get lost on Samba shares)
1340 1340 f1 = testfile + ".hgtmp1"
1341 1341 if os.path.lexists(f1):
1342 1342 return False
1343 1343 try:
1344 1344 posixfile(f1, 'w').close()
1345 1345 except IOError:
1346 1346 try:
1347 1347 os.unlink(f1)
1348 1348 except OSError:
1349 1349 pass
1350 1350 return False
1351 1351
1352 1352 f2 = testfile + ".hgtmp2"
1353 1353 fd = None
1354 1354 try:
1355 1355 oslink(f1, f2)
1356 1356 # nlinks() may behave differently for files on Windows shares if
1357 1357 # the file is open.
1358 1358 fd = posixfile(f2)
1359 1359 return nlinks(f2) > 1
1360 1360 except OSError:
1361 1361 return False
1362 1362 finally:
1363 1363 if fd is not None:
1364 1364 fd.close()
1365 1365 for f in (f1, f2):
1366 1366 try:
1367 1367 os.unlink(f)
1368 1368 except OSError:
1369 1369 pass
1370 1370
1371 1371 def endswithsep(path):
1372 1372 '''Check path ends with os.sep or os.altsep.'''
1373 1373 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1374 1374
1375 1375 def splitpath(path):
1376 1376 '''Split path by os.sep.
1377 1377 Note that this function does not use os.altsep because this is
1378 1378 an alternative of simple "xxx.split(os.sep)".
1379 1379 It is recommended to use os.path.normpath() before using this
1380 1380 function if need.'''
1381 1381 return path.split(os.sep)
1382 1382
1383 1383 def gui():
1384 1384 '''Are we running in a GUI?'''
1385 1385 if sys.platform == 'darwin':
1386 1386 if 'SSH_CONNECTION' in os.environ:
1387 1387 # handle SSH access to a box where the user is logged in
1388 1388 return False
1389 1389 elif getattr(osutil, 'isgui', None):
1390 1390 # check if a CoreGraphics session is available
1391 1391 return osutil.isgui()
1392 1392 else:
1393 1393 # pure build; use a safe default
1394 1394 return True
1395 1395 else:
1396 1396 return os.name == "nt" or os.environ.get("DISPLAY")
1397 1397
1398 1398 def mktempcopy(name, emptyok=False, createmode=None):
1399 1399 """Create a temporary file with the same contents from name
1400 1400
1401 1401 The permission bits are copied from the original file.
1402 1402
1403 1403 If the temporary file is going to be truncated immediately, you
1404 1404 can use emptyok=True as an optimization.
1405 1405
1406 1406 Returns the name of the temporary file.
1407 1407 """
1408 1408 d, fn = os.path.split(name)
1409 1409 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1410 1410 os.close(fd)
1411 1411 # Temporary files are created with mode 0600, which is usually not
1412 1412 # what we want. If the original file already exists, just copy
1413 1413 # its mode. Otherwise, manually obey umask.
1414 1414 copymode(name, temp, createmode)
1415 1415 if emptyok:
1416 1416 return temp
1417 1417 try:
1418 1418 try:
1419 1419 ifp = posixfile(name, "rb")
1420 1420 except IOError as inst:
1421 1421 if inst.errno == errno.ENOENT:
1422 1422 return temp
1423 1423 if not getattr(inst, 'filename', None):
1424 1424 inst.filename = name
1425 1425 raise
1426 1426 ofp = posixfile(temp, "wb")
1427 1427 for chunk in filechunkiter(ifp):
1428 1428 ofp.write(chunk)
1429 1429 ifp.close()
1430 1430 ofp.close()
1431 1431 except: # re-raises
1432 1432 try: os.unlink(temp)
1433 1433 except OSError: pass
1434 1434 raise
1435 1435 return temp
1436 1436
1437 1437 class filestat(object):
1438 1438 """help to exactly detect change of a file
1439 1439
1440 1440 'stat' attribute is result of 'os.stat()' if specified 'path'
1441 1441 exists. Otherwise, it is None. This can avoid preparative
1442 1442 'exists()' examination on client side of this class.
1443 1443 """
1444 1444 def __init__(self, path):
1445 1445 try:
1446 1446 self.stat = os.stat(path)
1447 1447 except OSError as err:
1448 1448 if err.errno != errno.ENOENT:
1449 1449 raise
1450 1450 self.stat = None
1451 1451
1452 1452 __hash__ = object.__hash__
1453 1453
1454 1454 def __eq__(self, old):
1455 1455 try:
1456 1456 # if ambiguity between stat of new and old file is
1457 1457 # avoided, comparison of size, ctime and mtime is enough
1458 1458 # to exactly detect change of a file regardless of platform
1459 1459 return (self.stat.st_size == old.stat.st_size and
1460 1460 self.stat.st_ctime == old.stat.st_ctime and
1461 1461 self.stat.st_mtime == old.stat.st_mtime)
1462 1462 except AttributeError:
1463 1463 return False
1464 1464
1465 1465 def isambig(self, old):
1466 1466 """Examine whether new (= self) stat is ambiguous against old one
1467 1467
1468 1468 "S[N]" below means stat of a file at N-th change:
1469 1469
1470 1470 - S[n-1].ctime < S[n].ctime: can detect change of a file
1471 1471 - S[n-1].ctime == S[n].ctime
1472 1472 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1473 1473 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1474 1474 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1475 1475 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1476 1476
1477 1477 Case (*2) above means that a file was changed twice or more at
1478 1478 same time in sec (= S[n-1].ctime), and comparison of timestamp
1479 1479 is ambiguous.
1480 1480
1481 1481 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1482 1482 timestamp is ambiguous".
1483 1483
1484 1484 But advancing mtime only in case (*2) doesn't work as
1485 1485 expected, because naturally advanced S[n].mtime in case (*1)
1486 1486 might be equal to manually advanced S[n-1 or earlier].mtime.
1487 1487
1488 1488 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1489 1489 treated as ambiguous regardless of mtime, to avoid overlooking
1490 1490 by confliction between such mtime.
1491 1491
1492 1492 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1493 1493 S[n].mtime", even if size of a file isn't changed.
1494 1494 """
1495 1495 try:
1496 1496 return (self.stat.st_ctime == old.stat.st_ctime)
1497 1497 except AttributeError:
1498 1498 return False
1499 1499
1500 1500 def avoidambig(self, path, old):
1501 1501 """Change file stat of specified path to avoid ambiguity
1502 1502
1503 1503 'old' should be previous filestat of 'path'.
1504 1504
1505 1505 This skips avoiding ambiguity, if a process doesn't have
1506 1506 appropriate privileges for 'path'.
1507 1507 """
1508 1508 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1509 1509 try:
1510 1510 os.utime(path, (advanced, advanced))
1511 1511 except OSError as inst:
1512 1512 if inst.errno == errno.EPERM:
1513 1513 # utime() on the file created by another user causes EPERM,
1514 1514 # if a process doesn't have appropriate privileges
1515 1515 return
1516 1516 raise
1517 1517
1518 1518 def __ne__(self, other):
1519 1519 return not self == other
1520 1520
1521 1521 class atomictempfile(object):
1522 1522 '''writable file object that atomically updates a file
1523 1523
1524 1524 All writes will go to a temporary copy of the original file. Call
1525 1525 close() when you are done writing, and atomictempfile will rename
1526 1526 the temporary copy to the original name, making the changes
1527 1527 visible. If the object is destroyed without being closed, all your
1528 1528 writes are discarded.
1529 1529
1530 1530 checkambig argument of constructor is used with filestat, and is
1531 1531 useful only if target file is guarded by any lock (e.g. repo.lock
1532 1532 or repo.wlock).
1533 1533 '''
1534 1534 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1535 1535 self.__name = name # permanent name
1536 1536 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1537 1537 createmode=createmode)
1538 1538 self._fp = posixfile(self._tempname, mode)
1539 1539 self._checkambig = checkambig
1540 1540
1541 1541 # delegated methods
1542 1542 self.read = self._fp.read
1543 1543 self.write = self._fp.write
1544 1544 self.seek = self._fp.seek
1545 1545 self.tell = self._fp.tell
1546 1546 self.fileno = self._fp.fileno
1547 1547
1548 1548 def close(self):
1549 1549 if not self._fp.closed:
1550 1550 self._fp.close()
1551 1551 filename = localpath(self.__name)
1552 1552 oldstat = self._checkambig and filestat(filename)
1553 1553 if oldstat and oldstat.stat:
1554 1554 rename(self._tempname, filename)
1555 1555 newstat = filestat(filename)
1556 1556 if newstat.isambig(oldstat):
1557 1557 # stat of changed file is ambiguous to original one
1558 1558 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1559 1559 os.utime(filename, (advanced, advanced))
1560 1560 else:
1561 1561 rename(self._tempname, filename)
1562 1562
1563 1563 def discard(self):
1564 1564 if not self._fp.closed:
1565 1565 try:
1566 1566 os.unlink(self._tempname)
1567 1567 except OSError:
1568 1568 pass
1569 1569 self._fp.close()
1570 1570
1571 1571 def __del__(self):
1572 1572 if safehasattr(self, '_fp'): # constructor actually did something
1573 1573 self.discard()
1574 1574
1575 1575 def __enter__(self):
1576 1576 return self
1577 1577
1578 1578 def __exit__(self, exctype, excvalue, traceback):
1579 1579 if exctype is not None:
1580 1580 self.discard()
1581 1581 else:
1582 1582 self.close()
1583 1583
1584 1584 def makedirs(name, mode=None, notindexed=False):
1585 1585 """recursive directory creation with parent mode inheritance
1586 1586
1587 1587 Newly created directories are marked as "not to be indexed by
1588 1588 the content indexing service", if ``notindexed`` is specified
1589 1589 for "write" mode access.
1590 1590 """
1591 1591 try:
1592 1592 makedir(name, notindexed)
1593 1593 except OSError as err:
1594 1594 if err.errno == errno.EEXIST:
1595 1595 return
1596 1596 if err.errno != errno.ENOENT or not name:
1597 1597 raise
1598 1598 parent = os.path.dirname(os.path.abspath(name))
1599 1599 if parent == name:
1600 1600 raise
1601 1601 makedirs(parent, mode, notindexed)
1602 1602 try:
1603 1603 makedir(name, notindexed)
1604 1604 except OSError as err:
1605 1605 # Catch EEXIST to handle races
1606 1606 if err.errno == errno.EEXIST:
1607 1607 return
1608 1608 raise
1609 1609 if mode is not None:
1610 1610 os.chmod(name, mode)
1611 1611
1612 1612 def readfile(path):
1613 1613 with open(path, 'rb') as fp:
1614 1614 return fp.read()
1615 1615
1616 1616 def writefile(path, text):
1617 1617 with open(path, 'wb') as fp:
1618 1618 fp.write(text)
1619 1619
1620 1620 def appendfile(path, text):
1621 1621 with open(path, 'ab') as fp:
1622 1622 fp.write(text)
1623 1623
1624 1624 class chunkbuffer(object):
1625 1625 """Allow arbitrary sized chunks of data to be efficiently read from an
1626 1626 iterator over chunks of arbitrary size."""
1627 1627
1628 1628 def __init__(self, in_iter):
1629 1629 """in_iter is the iterator that's iterating over the input chunks.
1630 1630 targetsize is how big a buffer to try to maintain."""
1631 1631 def splitbig(chunks):
1632 1632 for chunk in chunks:
1633 1633 if len(chunk) > 2**20:
1634 1634 pos = 0
1635 1635 while pos < len(chunk):
1636 1636 end = pos + 2 ** 18
1637 1637 yield chunk[pos:end]
1638 1638 pos = end
1639 1639 else:
1640 1640 yield chunk
1641 1641 self.iter = splitbig(in_iter)
1642 1642 self._queue = collections.deque()
1643 1643 self._chunkoffset = 0
1644 1644
1645 1645 def read(self, l=None):
1646 1646 """Read L bytes of data from the iterator of chunks of data.
1647 1647 Returns less than L bytes if the iterator runs dry.
1648 1648
1649 1649 If size parameter is omitted, read everything"""
1650 1650 if l is None:
1651 1651 return ''.join(self.iter)
1652 1652
1653 1653 left = l
1654 1654 buf = []
1655 1655 queue = self._queue
1656 1656 while left > 0:
1657 1657 # refill the queue
1658 1658 if not queue:
1659 1659 target = 2**18
1660 1660 for chunk in self.iter:
1661 1661 queue.append(chunk)
1662 1662 target -= len(chunk)
1663 1663 if target <= 0:
1664 1664 break
1665 1665 if not queue:
1666 1666 break
1667 1667
1668 1668 # The easy way to do this would be to queue.popleft(), modify the
1669 1669 # chunk (if necessary), then queue.appendleft(). However, for cases
1670 1670 # where we read partial chunk content, this incurs 2 dequeue
1671 1671 # mutations and creates a new str for the remaining chunk in the
1672 1672 # queue. Our code below avoids this overhead.
1673 1673
1674 1674 chunk = queue[0]
1675 1675 chunkl = len(chunk)
1676 1676 offset = self._chunkoffset
1677 1677
1678 1678 # Use full chunk.
1679 1679 if offset == 0 and left >= chunkl:
1680 1680 left -= chunkl
1681 1681 queue.popleft()
1682 1682 buf.append(chunk)
1683 1683 # self._chunkoffset remains at 0.
1684 1684 continue
1685 1685
1686 1686 chunkremaining = chunkl - offset
1687 1687
1688 1688 # Use all of unconsumed part of chunk.
1689 1689 if left >= chunkremaining:
1690 1690 left -= chunkremaining
1691 1691 queue.popleft()
1692 1692 # offset == 0 is enabled by block above, so this won't merely
1693 1693 # copy via ``chunk[0:]``.
1694 1694 buf.append(chunk[offset:])
1695 1695 self._chunkoffset = 0
1696 1696
1697 1697 # Partial chunk needed.
1698 1698 else:
1699 1699 buf.append(chunk[offset:offset + left])
1700 1700 self._chunkoffset += left
1701 1701 left -= chunkremaining
1702 1702
1703 1703 return ''.join(buf)
1704 1704
1705 1705 def filechunkiter(f, size=131072, limit=None):
1706 1706 """Create a generator that produces the data in the file size
1707 1707 (default 131072) bytes at a time, up to optional limit (default is
1708 1708 to read all data). Chunks may be less than size bytes if the
1709 1709 chunk is the last chunk in the file, or the file is a socket or
1710 1710 some other type of file that sometimes reads less data than is
1711 1711 requested."""
1712 1712 assert size >= 0
1713 1713 assert limit is None or limit >= 0
1714 1714 while True:
1715 1715 if limit is None:
1716 1716 nbytes = size
1717 1717 else:
1718 1718 nbytes = min(limit, size)
1719 1719 s = nbytes and f.read(nbytes)
1720 1720 if not s:
1721 1721 break
1722 1722 if limit:
1723 1723 limit -= len(s)
1724 1724 yield s
1725 1725
1726 1726 def makedate(timestamp=None):
1727 1727 '''Return a unix timestamp (or the current time) as a (unixtime,
1728 1728 offset) tuple based off the local timezone.'''
1729 1729 if timestamp is None:
1730 1730 timestamp = time.time()
1731 1731 if timestamp < 0:
1732 1732 hint = _("check your clock")
1733 1733 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1734 1734 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1735 1735 datetime.datetime.fromtimestamp(timestamp))
1736 1736 tz = delta.days * 86400 + delta.seconds
1737 1737 return timestamp, tz
1738 1738
1739 1739 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1740 1740 """represent a (unixtime, offset) tuple as a localized time.
1741 1741 unixtime is seconds since the epoch, and offset is the time zone's
1742 1742 number of seconds away from UTC.
1743 1743
1744 1744 >>> datestr((0, 0))
1745 1745 'Thu Jan 01 00:00:00 1970 +0000'
1746 1746 >>> datestr((42, 0))
1747 1747 'Thu Jan 01 00:00:42 1970 +0000'
1748 1748 >>> datestr((-42, 0))
1749 1749 'Wed Dec 31 23:59:18 1969 +0000'
1750 1750 >>> datestr((0x7fffffff, 0))
1751 1751 'Tue Jan 19 03:14:07 2038 +0000'
1752 1752 >>> datestr((-0x80000000, 0))
1753 1753 'Fri Dec 13 20:45:52 1901 +0000'
1754 1754 """
1755 1755 t, tz = date or makedate()
1756 1756 if "%1" in format or "%2" in format or "%z" in format:
1757 1757 sign = (tz > 0) and "-" or "+"
1758 1758 minutes = abs(tz) // 60
1759 1759 q, r = divmod(minutes, 60)
1760 1760 format = format.replace("%z", "%1%2")
1761 1761 format = format.replace("%1", "%c%02d" % (sign, q))
1762 1762 format = format.replace("%2", "%02d" % r)
1763 1763 d = t - tz
1764 1764 if d > 0x7fffffff:
1765 1765 d = 0x7fffffff
1766 1766 elif d < -0x80000000:
1767 1767 d = -0x80000000
1768 1768 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1769 1769 # because they use the gmtime() system call which is buggy on Windows
1770 1770 # for negative values.
1771 1771 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1772 1772 s = t.strftime(format)
1773 1773 return s
1774 1774
1775 1775 def shortdate(date=None):
1776 1776 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1777 1777 return datestr(date, format='%Y-%m-%d')
1778 1778
1779 1779 def parsetimezone(s):
1780 1780 """find a trailing timezone, if any, in string, and return a
1781 1781 (offset, remainder) pair"""
1782 1782
1783 1783 if s.endswith("GMT") or s.endswith("UTC"):
1784 1784 return 0, s[:-3].rstrip()
1785 1785
1786 1786 # Unix-style timezones [+-]hhmm
1787 1787 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1788 1788 sign = (s[-5] == "+") and 1 or -1
1789 1789 hours = int(s[-4:-2])
1790 1790 minutes = int(s[-2:])
1791 1791 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1792 1792
1793 1793 # ISO8601 trailing Z
1794 1794 if s.endswith("Z") and s[-2:-1].isdigit():
1795 1795 return 0, s[:-1]
1796 1796
1797 1797 # ISO8601-style [+-]hh:mm
1798 1798 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1799 1799 s[-5:-3].isdigit() and s[-2:].isdigit()):
1800 1800 sign = (s[-6] == "+") and 1 or -1
1801 1801 hours = int(s[-5:-3])
1802 1802 minutes = int(s[-2:])
1803 1803 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1804 1804
1805 1805 return None, s
1806 1806
1807 1807 def strdate(string, format, defaults=[]):
1808 1808 """parse a localized time string and return a (unixtime, offset) tuple.
1809 1809 if the string cannot be parsed, ValueError is raised."""
1810 1810 # NOTE: unixtime = localunixtime + offset
1811 1811 offset, date = parsetimezone(string)
1812 1812
1813 1813 # add missing elements from defaults
1814 1814 usenow = False # default to using biased defaults
1815 1815 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1816 1816 found = [True for p in part if ("%"+p) in format]
1817 1817 if not found:
1818 1818 date += "@" + defaults[part][usenow]
1819 1819 format += "@%" + part[0]
1820 1820 else:
1821 1821 # We've found a specific time element, less specific time
1822 1822 # elements are relative to today
1823 1823 usenow = True
1824 1824
1825 1825 timetuple = time.strptime(date, format)
1826 1826 localunixtime = int(calendar.timegm(timetuple))
1827 1827 if offset is None:
1828 1828 # local timezone
1829 1829 unixtime = int(time.mktime(timetuple))
1830 1830 offset = unixtime - localunixtime
1831 1831 else:
1832 1832 unixtime = localunixtime + offset
1833 1833 return unixtime, offset
1834 1834
1835 1835 def parsedate(date, formats=None, bias=None):
1836 1836 """parse a localized date/time and return a (unixtime, offset) tuple.
1837 1837
1838 1838 The date may be a "unixtime offset" string or in one of the specified
1839 1839 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1840 1840
1841 1841 >>> parsedate(' today ') == parsedate(\
1842 1842 datetime.date.today().strftime('%b %d'))
1843 1843 True
1844 1844 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1845 1845 datetime.timedelta(days=1)\
1846 1846 ).strftime('%b %d'))
1847 1847 True
1848 1848 >>> now, tz = makedate()
1849 1849 >>> strnow, strtz = parsedate('now')
1850 1850 >>> (strnow - now) < 1
1851 1851 True
1852 1852 >>> tz == strtz
1853 1853 True
1854 1854 """
1855 1855 if bias is None:
1856 1856 bias = {}
1857 1857 if not date:
1858 1858 return 0, 0
1859 1859 if isinstance(date, tuple) and len(date) == 2:
1860 1860 return date
1861 1861 if not formats:
1862 1862 formats = defaultdateformats
1863 1863 date = date.strip()
1864 1864
1865 1865 if date == 'now' or date == _('now'):
1866 1866 return makedate()
1867 1867 if date == 'today' or date == _('today'):
1868 1868 date = datetime.date.today().strftime('%b %d')
1869 1869 elif date == 'yesterday' or date == _('yesterday'):
1870 1870 date = (datetime.date.today() -
1871 1871 datetime.timedelta(days=1)).strftime('%b %d')
1872 1872
1873 1873 try:
1874 1874 when, offset = map(int, date.split(' '))
1875 1875 except ValueError:
1876 1876 # fill out defaults
1877 1877 now = makedate()
1878 1878 defaults = {}
1879 1879 for part in ("d", "mb", "yY", "HI", "M", "S"):
1880 1880 # this piece is for rounding the specific end of unknowns
1881 1881 b = bias.get(part)
1882 1882 if b is None:
1883 1883 if part[0] in "HMS":
1884 1884 b = "00"
1885 1885 else:
1886 1886 b = "0"
1887 1887
1888 1888 # this piece is for matching the generic end to today's date
1889 1889 n = datestr(now, "%" + part[0])
1890 1890
1891 1891 defaults[part] = (b, n)
1892 1892
1893 1893 for format in formats:
1894 1894 try:
1895 1895 when, offset = strdate(date, format, defaults)
1896 1896 except (ValueError, OverflowError):
1897 1897 pass
1898 1898 else:
1899 1899 break
1900 1900 else:
1901 1901 raise Abort(_('invalid date: %r') % date)
1902 1902 # validate explicit (probably user-specified) date and
1903 1903 # time zone offset. values must fit in signed 32 bits for
1904 1904 # current 32-bit linux runtimes. timezones go from UTC-12
1905 1905 # to UTC+14
1906 1906 if when < -0x80000000 or when > 0x7fffffff:
1907 1907 raise Abort(_('date exceeds 32 bits: %d') % when)
1908 1908 if offset < -50400 or offset > 43200:
1909 1909 raise Abort(_('impossible time zone offset: %d') % offset)
1910 1910 return when, offset
1911 1911
1912 1912 def matchdate(date):
1913 1913 """Return a function that matches a given date match specifier
1914 1914
1915 1915 Formats include:
1916 1916
1917 1917 '{date}' match a given date to the accuracy provided
1918 1918
1919 1919 '<{date}' on or before a given date
1920 1920
1921 1921 '>{date}' on or after a given date
1922 1922
1923 1923 >>> p1 = parsedate("10:29:59")
1924 1924 >>> p2 = parsedate("10:30:00")
1925 1925 >>> p3 = parsedate("10:30:59")
1926 1926 >>> p4 = parsedate("10:31:00")
1927 1927 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1928 1928 >>> f = matchdate("10:30")
1929 1929 >>> f(p1[0])
1930 1930 False
1931 1931 >>> f(p2[0])
1932 1932 True
1933 1933 >>> f(p3[0])
1934 1934 True
1935 1935 >>> f(p4[0])
1936 1936 False
1937 1937 >>> f(p5[0])
1938 1938 False
1939 1939 """
1940 1940
1941 1941 def lower(date):
1942 1942 d = {'mb': "1", 'd': "1"}
1943 1943 return parsedate(date, extendeddateformats, d)[0]
1944 1944
1945 1945 def upper(date):
1946 1946 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1947 1947 for days in ("31", "30", "29"):
1948 1948 try:
1949 1949 d["d"] = days
1950 1950 return parsedate(date, extendeddateformats, d)[0]
1951 1951 except Abort:
1952 1952 pass
1953 1953 d["d"] = "28"
1954 1954 return parsedate(date, extendeddateformats, d)[0]
1955 1955
1956 1956 date = date.strip()
1957 1957
1958 1958 if not date:
1959 1959 raise Abort(_("dates cannot consist entirely of whitespace"))
1960 1960 elif date[0] == "<":
1961 1961 if not date[1:]:
1962 1962 raise Abort(_("invalid day spec, use '<DATE'"))
1963 1963 when = upper(date[1:])
1964 1964 return lambda x: x <= when
1965 1965 elif date[0] == ">":
1966 1966 if not date[1:]:
1967 1967 raise Abort(_("invalid day spec, use '>DATE'"))
1968 1968 when = lower(date[1:])
1969 1969 return lambda x: x >= when
1970 1970 elif date[0] == "-":
1971 1971 try:
1972 1972 days = int(date[1:])
1973 1973 except ValueError:
1974 1974 raise Abort(_("invalid day spec: %s") % date[1:])
1975 1975 if days < 0:
1976 1976 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1977 1977 % date[1:])
1978 1978 when = makedate()[0] - days * 3600 * 24
1979 1979 return lambda x: x >= when
1980 1980 elif " to " in date:
1981 1981 a, b = date.split(" to ")
1982 1982 start, stop = lower(a), upper(b)
1983 1983 return lambda x: x >= start and x <= stop
1984 1984 else:
1985 1985 start, stop = lower(date), upper(date)
1986 1986 return lambda x: x >= start and x <= stop
1987 1987
1988 1988 def stringmatcher(pattern):
1989 1989 """
1990 1990 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1991 1991 returns the matcher name, pattern, and matcher function.
1992 1992 missing or unknown prefixes are treated as literal matches.
1993 1993
1994 1994 helper for tests:
1995 1995 >>> def test(pattern, *tests):
1996 1996 ... kind, pattern, matcher = stringmatcher(pattern)
1997 1997 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1998 1998
1999 1999 exact matching (no prefix):
2000 2000 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2001 2001 ('literal', 'abcdefg', [False, False, True])
2002 2002
2003 2003 regex matching ('re:' prefix)
2004 2004 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2005 2005 ('re', 'a.+b', [False, False, True])
2006 2006
2007 2007 force exact matches ('literal:' prefix)
2008 2008 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2009 2009 ('literal', 're:foobar', [False, True])
2010 2010
2011 2011 unknown prefixes are ignored and treated as literals
2012 2012 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2013 2013 ('literal', 'foo:bar', [False, False, True])
2014 2014 """
2015 2015 if pattern.startswith('re:'):
2016 2016 pattern = pattern[3:]
2017 2017 try:
2018 2018 regex = remod.compile(pattern)
2019 2019 except remod.error as e:
2020 2020 raise error.ParseError(_('invalid regular expression: %s')
2021 2021 % e)
2022 2022 return 're', pattern, regex.search
2023 2023 elif pattern.startswith('literal:'):
2024 2024 pattern = pattern[8:]
2025 2025 return 'literal', pattern, pattern.__eq__
2026 2026
2027 2027 def shortuser(user):
2028 2028 """Return a short representation of a user name or email address."""
2029 2029 f = user.find('@')
2030 2030 if f >= 0:
2031 2031 user = user[:f]
2032 2032 f = user.find('<')
2033 2033 if f >= 0:
2034 2034 user = user[f + 1:]
2035 2035 f = user.find(' ')
2036 2036 if f >= 0:
2037 2037 user = user[:f]
2038 2038 f = user.find('.')
2039 2039 if f >= 0:
2040 2040 user = user[:f]
2041 2041 return user
2042 2042
2043 2043 def emailuser(user):
2044 2044 """Return the user portion of an email address."""
2045 2045 f = user.find('@')
2046 2046 if f >= 0:
2047 2047 user = user[:f]
2048 2048 f = user.find('<')
2049 2049 if f >= 0:
2050 2050 user = user[f + 1:]
2051 2051 return user
2052 2052
2053 2053 def email(author):
2054 2054 '''get email of author.'''
2055 2055 r = author.find('>')
2056 2056 if r == -1:
2057 2057 r = None
2058 2058 return author[author.find('<') + 1:r]
2059 2059
2060 2060 def ellipsis(text, maxlength=400):
2061 2061 """Trim string to at most maxlength (default: 400) columns in display."""
2062 2062 return encoding.trim(text, maxlength, ellipsis='...')
2063 2063
2064 2064 def unitcountfn(*unittable):
2065 2065 '''return a function that renders a readable count of some quantity'''
2066 2066
2067 2067 def go(count):
2068 2068 for multiplier, divisor, format in unittable:
2069 2069 if count >= divisor * multiplier:
2070 2070 return format % (count / float(divisor))
2071 2071 return unittable[-1][2] % count
2072 2072
2073 2073 return go
2074 2074
2075 2075 bytecount = unitcountfn(
2076 2076 (100, 1 << 30, _('%.0f GB')),
2077 2077 (10, 1 << 30, _('%.1f GB')),
2078 2078 (1, 1 << 30, _('%.2f GB')),
2079 2079 (100, 1 << 20, _('%.0f MB')),
2080 2080 (10, 1 << 20, _('%.1f MB')),
2081 2081 (1, 1 << 20, _('%.2f MB')),
2082 2082 (100, 1 << 10, _('%.0f KB')),
2083 2083 (10, 1 << 10, _('%.1f KB')),
2084 2084 (1, 1 << 10, _('%.2f KB')),
2085 2085 (1, 1, _('%.0f bytes')),
2086 2086 )
2087 2087
2088 2088 def uirepr(s):
2089 2089 # Avoid double backslash in Windows path repr()
2090 2090 return repr(s).replace('\\\\', '\\')
2091 2091
2092 2092 # delay import of textwrap
2093 2093 def MBTextWrapper(**kwargs):
2094 2094 class tw(textwrap.TextWrapper):
2095 2095 """
2096 2096 Extend TextWrapper for width-awareness.
2097 2097
2098 2098 Neither number of 'bytes' in any encoding nor 'characters' is
2099 2099 appropriate to calculate terminal columns for specified string.
2100 2100
2101 2101 Original TextWrapper implementation uses built-in 'len()' directly,
2102 2102 so overriding is needed to use width information of each characters.
2103 2103
2104 2104 In addition, characters classified into 'ambiguous' width are
2105 2105 treated as wide in East Asian area, but as narrow in other.
2106 2106
2107 2107 This requires use decision to determine width of such characters.
2108 2108 """
2109 2109 def _cutdown(self, ucstr, space_left):
2110 2110 l = 0
2111 2111 colwidth = encoding.ucolwidth
2112 2112 for i in xrange(len(ucstr)):
2113 2113 l += colwidth(ucstr[i])
2114 2114 if space_left < l:
2115 2115 return (ucstr[:i], ucstr[i:])
2116 2116 return ucstr, ''
2117 2117
2118 2118 # overriding of base class
2119 2119 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2120 2120 space_left = max(width - cur_len, 1)
2121 2121
2122 2122 if self.break_long_words:
2123 2123 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2124 2124 cur_line.append(cut)
2125 2125 reversed_chunks[-1] = res
2126 2126 elif not cur_line:
2127 2127 cur_line.append(reversed_chunks.pop())
2128 2128
2129 2129 # this overriding code is imported from TextWrapper of Python 2.6
2130 2130 # to calculate columns of string by 'encoding.ucolwidth()'
2131 2131 def _wrap_chunks(self, chunks):
2132 2132 colwidth = encoding.ucolwidth
2133 2133
2134 2134 lines = []
2135 2135 if self.width <= 0:
2136 2136 raise ValueError("invalid width %r (must be > 0)" % self.width)
2137 2137
2138 2138 # Arrange in reverse order so items can be efficiently popped
2139 2139 # from a stack of chucks.
2140 2140 chunks.reverse()
2141 2141
2142 2142 while chunks:
2143 2143
2144 2144 # Start the list of chunks that will make up the current line.
2145 2145 # cur_len is just the length of all the chunks in cur_line.
2146 2146 cur_line = []
2147 2147 cur_len = 0
2148 2148
2149 2149 # Figure out which static string will prefix this line.
2150 2150 if lines:
2151 2151 indent = self.subsequent_indent
2152 2152 else:
2153 2153 indent = self.initial_indent
2154 2154
2155 2155 # Maximum width for this line.
2156 2156 width = self.width - len(indent)
2157 2157
2158 2158 # First chunk on line is whitespace -- drop it, unless this
2159 2159 # is the very beginning of the text (i.e. no lines started yet).
2160 2160 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2161 2161 del chunks[-1]
2162 2162
2163 2163 while chunks:
2164 2164 l = colwidth(chunks[-1])
2165 2165
2166 2166 # Can at least squeeze this chunk onto the current line.
2167 2167 if cur_len + l <= width:
2168 2168 cur_line.append(chunks.pop())
2169 2169 cur_len += l
2170 2170
2171 2171 # Nope, this line is full.
2172 2172 else:
2173 2173 break
2174 2174
2175 2175 # The current line is full, and the next chunk is too big to
2176 2176 # fit on *any* line (not just this one).
2177 2177 if chunks and colwidth(chunks[-1]) > width:
2178 2178 self._handle_long_word(chunks, cur_line, cur_len, width)
2179 2179
2180 2180 # If the last chunk on this line is all whitespace, drop it.
2181 2181 if (self.drop_whitespace and
2182 2182 cur_line and cur_line[-1].strip() == ''):
2183 2183 del cur_line[-1]
2184 2184
2185 2185 # Convert current line back to a string and store it in list
2186 2186 # of all lines (return value).
2187 2187 if cur_line:
2188 2188 lines.append(indent + ''.join(cur_line))
2189 2189
2190 2190 return lines
2191 2191
2192 2192 global MBTextWrapper
2193 2193 MBTextWrapper = tw
2194 2194 return tw(**kwargs)
2195 2195
2196 2196 def wrap(line, width, initindent='', hangindent=''):
2197 2197 maxindent = max(len(hangindent), len(initindent))
2198 2198 if width <= maxindent:
2199 2199 # adjust for weird terminal size
2200 2200 width = max(78, maxindent + 1)
2201 2201 line = line.decode(encoding.encoding, encoding.encodingmode)
2202 2202 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2203 2203 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2204 2204 wrapper = MBTextWrapper(width=width,
2205 2205 initial_indent=initindent,
2206 2206 subsequent_indent=hangindent)
2207 2207 return wrapper.fill(line).encode(encoding.encoding)
2208 2208
2209 2209 if (pyplatform.python_implementation() == 'CPython' and
2210 2210 sys.version_info < (3, 0)):
2211 2211 # There is an issue in CPython that some IO methods do not handle EINTR
2212 2212 # correctly. The following table shows what CPython version (and functions)
2213 2213 # are affected (buggy: has the EINTR bug, okay: otherwise):
2214 2214 #
2215 2215 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2216 2216 # --------------------------------------------------
2217 2217 # fp.__iter__ | buggy | buggy | okay
2218 2218 # fp.read* | buggy | okay [1] | okay
2219 2219 #
2220 2220 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2221 2221 #
2222 2222 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2223 2223 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2224 2224 #
2225 2225 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2226 2226 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2227 2227 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2228 2228 # fp.__iter__ but not other fp.read* methods.
2229 2229 #
2230 2230 # On modern systems like Linux, the "read" syscall cannot be interrupted
2231 2231 # when reading "fast" files like on-disk files. So the EINTR issue only
2232 2232 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2233 2233 # files approximately as "fast" files and use the fast (unsafe) code path,
2234 2234 # to minimize the performance impact.
2235 2235 if sys.version_info >= (2, 7, 4):
2236 2236 # fp.readline deals with EINTR correctly, use it as a workaround.
2237 2237 def _safeiterfile(fp):
2238 2238 return iter(fp.readline, '')
2239 2239 else:
2240 2240 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2241 2241 # note: this may block longer than necessary because of bufsize.
2242 2242 def _safeiterfile(fp, bufsize=4096):
2243 2243 fd = fp.fileno()
2244 2244 line = ''
2245 2245 while True:
2246 2246 try:
2247 2247 buf = os.read(fd, bufsize)
2248 2248 except OSError as ex:
2249 2249 # os.read only raises EINTR before any data is read
2250 2250 if ex.errno == errno.EINTR:
2251 2251 continue
2252 2252 else:
2253 2253 raise
2254 2254 line += buf
2255 2255 if '\n' in buf:
2256 2256 splitted = line.splitlines(True)
2257 2257 line = ''
2258 2258 for l in splitted:
2259 2259 if l[-1] == '\n':
2260 2260 yield l
2261 2261 else:
2262 2262 line = l
2263 2263 if not buf:
2264 2264 break
2265 2265 if line:
2266 2266 yield line
2267 2267
2268 2268 def iterfile(fp):
2269 2269 fastpath = True
2270 2270 if type(fp) is file:
2271 2271 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2272 2272 if fastpath:
2273 2273 return fp
2274 2274 else:
2275 2275 return _safeiterfile(fp)
2276 2276 else:
2277 2277 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2278 2278 def iterfile(fp):
2279 2279 return fp
2280 2280
2281 2281 def iterlines(iterator):
2282 2282 for chunk in iterator:
2283 2283 for line in chunk.splitlines():
2284 2284 yield line
2285 2285
2286 2286 def expandpath(path):
2287 2287 return os.path.expanduser(os.path.expandvars(path))
2288 2288
2289 2289 def hgcmd():
2290 2290 """Return the command used to execute current hg
2291 2291
2292 2292 This is different from hgexecutable() because on Windows we want
2293 2293 to avoid things opening new shell windows like batch files, so we
2294 2294 get either the python call or current executable.
2295 2295 """
2296 2296 if mainfrozen():
2297 2297 if getattr(sys, 'frozen', None) == 'macosx_app':
2298 2298 # Env variable set by py2app
2299 2299 return [os.environ['EXECUTABLEPATH']]
2300 2300 else:
2301 2301 return [sys.executable]
2302 2302 return gethgcmd()
2303 2303
2304 2304 def rundetached(args, condfn):
2305 2305 """Execute the argument list in a detached process.
2306 2306
2307 2307 condfn is a callable which is called repeatedly and should return
2308 2308 True once the child process is known to have started successfully.
2309 2309 At this point, the child process PID is returned. If the child
2310 2310 process fails to start or finishes before condfn() evaluates to
2311 2311 True, return -1.
2312 2312 """
2313 2313 # Windows case is easier because the child process is either
2314 2314 # successfully starting and validating the condition or exiting
2315 2315 # on failure. We just poll on its PID. On Unix, if the child
2316 2316 # process fails to start, it will be left in a zombie state until
2317 2317 # the parent wait on it, which we cannot do since we expect a long
2318 2318 # running process on success. Instead we listen for SIGCHLD telling
2319 2319 # us our child process terminated.
2320 2320 terminated = set()
2321 2321 def handler(signum, frame):
2322 2322 terminated.add(os.wait())
2323 2323 prevhandler = None
2324 2324 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2325 2325 if SIGCHLD is not None:
2326 2326 prevhandler = signal.signal(SIGCHLD, handler)
2327 2327 try:
2328 2328 pid = spawndetached(args)
2329 2329 while not condfn():
2330 2330 if ((pid in terminated or not testpid(pid))
2331 2331 and not condfn()):
2332 2332 return -1
2333 2333 time.sleep(0.1)
2334 2334 return pid
2335 2335 finally:
2336 2336 if prevhandler is not None:
2337 2337 signal.signal(signal.SIGCHLD, prevhandler)
2338 2338
2339 2339 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2340 2340 """Return the result of interpolating items in the mapping into string s.
2341 2341
2342 2342 prefix is a single character string, or a two character string with
2343 2343 a backslash as the first character if the prefix needs to be escaped in
2344 2344 a regular expression.
2345 2345
2346 2346 fn is an optional function that will be applied to the replacement text
2347 2347 just before replacement.
2348 2348
2349 2349 escape_prefix is an optional flag that allows using doubled prefix for
2350 2350 its escaping.
2351 2351 """
2352 2352 fn = fn or (lambda s: s)
2353 2353 patterns = '|'.join(mapping.keys())
2354 2354 if escape_prefix:
2355 2355 patterns += '|' + prefix
2356 2356 if len(prefix) > 1:
2357 2357 prefix_char = prefix[1:]
2358 2358 else:
2359 2359 prefix_char = prefix
2360 2360 mapping[prefix_char] = prefix_char
2361 2361 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2362 2362 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2363 2363
2364 2364 def getport(port):
2365 2365 """Return the port for a given network service.
2366 2366
2367 2367 If port is an integer, it's returned as is. If it's a string, it's
2368 2368 looked up using socket.getservbyname(). If there's no matching
2369 2369 service, error.Abort is raised.
2370 2370 """
2371 2371 try:
2372 2372 return int(port)
2373 2373 except ValueError:
2374 2374 pass
2375 2375
2376 2376 try:
2377 2377 return socket.getservbyname(port)
2378 2378 except socket.error:
2379 2379 raise Abort(_("no port number associated with service '%s'") % port)
2380 2380
2381 2381 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2382 2382 '0': False, 'no': False, 'false': False, 'off': False,
2383 2383 'never': False}
2384 2384
2385 2385 def parsebool(s):
2386 2386 """Parse s into a boolean.
2387 2387
2388 2388 If s is not a valid boolean, returns None.
2389 2389 """
2390 2390 return _booleans.get(s.lower(), None)
2391 2391
2392 2392 _hextochr = dict((a + b, chr(int(a + b, 16)))
2393 2393 for a in string.hexdigits for b in string.hexdigits)
2394 2394
2395 2395 class url(object):
2396 2396 r"""Reliable URL parser.
2397 2397
2398 2398 This parses URLs and provides attributes for the following
2399 2399 components:
2400 2400
2401 2401 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2402 2402
2403 2403 Missing components are set to None. The only exception is
2404 2404 fragment, which is set to '' if present but empty.
2405 2405
2406 2406 If parsefragment is False, fragment is included in query. If
2407 2407 parsequery is False, query is included in path. If both are
2408 2408 False, both fragment and query are included in path.
2409 2409
2410 2410 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2411 2411
2412 2412 Note that for backward compatibility reasons, bundle URLs do not
2413 2413 take host names. That means 'bundle://../' has a path of '../'.
2414 2414
2415 2415 Examples:
2416 2416
2417 2417 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2418 2418 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2419 2419 >>> url('ssh://[::1]:2200//home/joe/repo')
2420 2420 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2421 2421 >>> url('file:///home/joe/repo')
2422 2422 <url scheme: 'file', path: '/home/joe/repo'>
2423 2423 >>> url('file:///c:/temp/foo/')
2424 2424 <url scheme: 'file', path: 'c:/temp/foo/'>
2425 2425 >>> url('bundle:foo')
2426 2426 <url scheme: 'bundle', path: 'foo'>
2427 2427 >>> url('bundle://../foo')
2428 2428 <url scheme: 'bundle', path: '../foo'>
2429 2429 >>> url(r'c:\foo\bar')
2430 2430 <url path: 'c:\\foo\\bar'>
2431 2431 >>> url(r'\\blah\blah\blah')
2432 2432 <url path: '\\\\blah\\blah\\blah'>
2433 2433 >>> url(r'\\blah\blah\blah#baz')
2434 2434 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2435 2435 >>> url(r'file:///C:\users\me')
2436 2436 <url scheme: 'file', path: 'C:\\users\\me'>
2437 2437
2438 2438 Authentication credentials:
2439 2439
2440 2440 >>> url('ssh://joe:xyz@x/repo')
2441 2441 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2442 2442 >>> url('ssh://joe@x/repo')
2443 2443 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2444 2444
2445 2445 Query strings and fragments:
2446 2446
2447 2447 >>> url('http://host/a?b#c')
2448 2448 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2449 2449 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2450 2450 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2451 2451
2452 2452 Empty path:
2453 2453
2454 2454 >>> url('')
2455 2455 <url path: ''>
2456 2456 >>> url('#a')
2457 2457 <url path: '', fragment: 'a'>
2458 2458 >>> url('http://host/')
2459 2459 <url scheme: 'http', host: 'host', path: ''>
2460 2460 >>> url('http://host/#a')
2461 2461 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2462 2462
2463 2463 Only scheme:
2464 2464
2465 2465 >>> url('http:')
2466 2466 <url scheme: 'http'>
2467 2467 """
2468 2468
2469 2469 _safechars = "!~*'()+"
2470 2470 _safepchars = "/!~*'()+:\\"
2471 2471 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2472 2472
2473 2473 def __init__(self, path, parsequery=True, parsefragment=True):
2474 2474 # We slowly chomp away at path until we have only the path left
2475 2475 self.scheme = self.user = self.passwd = self.host = None
2476 2476 self.port = self.path = self.query = self.fragment = None
2477 2477 self._localpath = True
2478 2478 self._hostport = ''
2479 2479 self._origpath = path
2480 2480
2481 2481 if parsefragment and '#' in path:
2482 2482 path, self.fragment = path.split('#', 1)
2483 2483
2484 2484 # special case for Windows drive letters and UNC paths
2485 2485 if hasdriveletter(path) or path.startswith('\\\\'):
2486 2486 self.path = path
2487 2487 return
2488 2488
2489 2489 # For compatibility reasons, we can't handle bundle paths as
2490 2490 # normal URLS
2491 2491 if path.startswith('bundle:'):
2492 2492 self.scheme = 'bundle'
2493 2493 path = path[7:]
2494 2494 if path.startswith('//'):
2495 2495 path = path[2:]
2496 2496 self.path = path
2497 2497 return
2498 2498
2499 2499 if self._matchscheme(path):
2500 2500 parts = path.split(':', 1)
2501 2501 if parts[0]:
2502 2502 self.scheme, path = parts
2503 2503 self._localpath = False
2504 2504
2505 2505 if not path:
2506 2506 path = None
2507 2507 if self._localpath:
2508 2508 self.path = ''
2509 2509 return
2510 2510 else:
2511 2511 if self._localpath:
2512 2512 self.path = path
2513 2513 return
2514 2514
2515 2515 if parsequery and '?' in path:
2516 2516 path, self.query = path.split('?', 1)
2517 2517 if not path:
2518 2518 path = None
2519 2519 if not self.query:
2520 2520 self.query = None
2521 2521
2522 2522 # // is required to specify a host/authority
2523 2523 if path and path.startswith('//'):
2524 2524 parts = path[2:].split('/', 1)
2525 2525 if len(parts) > 1:
2526 2526 self.host, path = parts
2527 2527 else:
2528 2528 self.host = parts[0]
2529 2529 path = None
2530 2530 if not self.host:
2531 2531 self.host = None
2532 2532 # path of file:///d is /d
2533 2533 # path of file:///d:/ is d:/, not /d:/
2534 2534 if path and not hasdriveletter(path):
2535 2535 path = '/' + path
2536 2536
2537 2537 if self.host and '@' in self.host:
2538 2538 self.user, self.host = self.host.rsplit('@', 1)
2539 2539 if ':' in self.user:
2540 2540 self.user, self.passwd = self.user.split(':', 1)
2541 2541 if not self.host:
2542 2542 self.host = None
2543 2543
2544 2544 # Don't split on colons in IPv6 addresses without ports
2545 2545 if (self.host and ':' in self.host and
2546 2546 not (self.host.startswith('[') and self.host.endswith(']'))):
2547 2547 self._hostport = self.host
2548 2548 self.host, self.port = self.host.rsplit(':', 1)
2549 2549 if not self.host:
2550 2550 self.host = None
2551 2551
2552 2552 if (self.host and self.scheme == 'file' and
2553 2553 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2554 2554 raise Abort(_('file:// URLs can only refer to localhost'))
2555 2555
2556 2556 self.path = path
2557 2557
2558 2558 # leave the query string escaped
2559 2559 for a in ('user', 'passwd', 'host', 'port',
2560 2560 'path', 'fragment'):
2561 2561 v = getattr(self, a)
2562 2562 if v is not None:
2563 2563 setattr(self, a, pycompat.urlunquote(v))
2564 2564
2565 2565 def __repr__(self):
2566 2566 attrs = []
2567 2567 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2568 2568 'query', 'fragment'):
2569 2569 v = getattr(self, a)
2570 2570 if v is not None:
2571 2571 attrs.append('%s: %r' % (a, v))
2572 2572 return '<url %s>' % ', '.join(attrs)
2573 2573
2574 2574 def __str__(self):
2575 2575 r"""Join the URL's components back into a URL string.
2576 2576
2577 2577 Examples:
2578 2578
2579 2579 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2580 2580 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2581 2581 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2582 2582 'http://user:pw@host:80/?foo=bar&baz=42'
2583 2583 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2584 2584 'http://user:pw@host:80/?foo=bar%3dbaz'
2585 2585 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2586 2586 'ssh://user:pw@[::1]:2200//home/joe#'
2587 2587 >>> str(url('http://localhost:80//'))
2588 2588 'http://localhost:80//'
2589 2589 >>> str(url('http://localhost:80/'))
2590 2590 'http://localhost:80/'
2591 2591 >>> str(url('http://localhost:80'))
2592 2592 'http://localhost:80/'
2593 2593 >>> str(url('bundle:foo'))
2594 2594 'bundle:foo'
2595 2595 >>> str(url('bundle://../foo'))
2596 2596 'bundle:../foo'
2597 2597 >>> str(url('path'))
2598 2598 'path'
2599 2599 >>> str(url('file:///tmp/foo/bar'))
2600 2600 'file:///tmp/foo/bar'
2601 2601 >>> str(url('file:///c:/tmp/foo/bar'))
2602 2602 'file:///c:/tmp/foo/bar'
2603 2603 >>> print url(r'bundle:foo\bar')
2604 2604 bundle:foo\bar
2605 2605 >>> print url(r'file:///D:\data\hg')
2606 2606 file:///D:\data\hg
2607 2607 """
2608 2608 if self._localpath:
2609 2609 s = self.path
2610 2610 if self.scheme == 'bundle':
2611 2611 s = 'bundle:' + s
2612 2612 if self.fragment:
2613 2613 s += '#' + self.fragment
2614 2614 return s
2615 2615
2616 2616 s = self.scheme + ':'
2617 2617 if self.user or self.passwd or self.host:
2618 2618 s += '//'
2619 2619 elif self.scheme and (not self.path or self.path.startswith('/')
2620 2620 or hasdriveletter(self.path)):
2621 2621 s += '//'
2622 2622 if hasdriveletter(self.path):
2623 2623 s += '/'
2624 2624 if self.user:
2625 2625 s += urlreq.quote(self.user, safe=self._safechars)
2626 2626 if self.passwd:
2627 2627 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2628 2628 if self.user or self.passwd:
2629 2629 s += '@'
2630 2630 if self.host:
2631 2631 if not (self.host.startswith('[') and self.host.endswith(']')):
2632 2632 s += urlreq.quote(self.host)
2633 2633 else:
2634 2634 s += self.host
2635 2635 if self.port:
2636 2636 s += ':' + urlreq.quote(self.port)
2637 2637 if self.host:
2638 2638 s += '/'
2639 2639 if self.path:
2640 2640 # TODO: similar to the query string, we should not unescape the
2641 2641 # path when we store it, the path might contain '%2f' = '/',
2642 2642 # which we should *not* escape.
2643 2643 s += urlreq.quote(self.path, safe=self._safepchars)
2644 2644 if self.query:
2645 2645 # we store the query in escaped form.
2646 2646 s += '?' + self.query
2647 2647 if self.fragment is not None:
2648 2648 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2649 2649 return s
2650 2650
2651 2651 def authinfo(self):
2652 2652 user, passwd = self.user, self.passwd
2653 2653 try:
2654 2654 self.user, self.passwd = None, None
2655 2655 s = str(self)
2656 2656 finally:
2657 2657 self.user, self.passwd = user, passwd
2658 2658 if not self.user:
2659 2659 return (s, None)
2660 2660 # authinfo[1] is passed to urllib2 password manager, and its
2661 2661 # URIs must not contain credentials. The host is passed in the
2662 2662 # URIs list because Python < 2.4.3 uses only that to search for
2663 2663 # a password.
2664 2664 return (s, (None, (s, self.host),
2665 2665 self.user, self.passwd or ''))
2666 2666
2667 2667 def isabs(self):
2668 2668 if self.scheme and self.scheme != 'file':
2669 2669 return True # remote URL
2670 2670 if hasdriveletter(self.path):
2671 2671 return True # absolute for our purposes - can't be joined()
2672 2672 if self.path.startswith(r'\\'):
2673 2673 return True # Windows UNC path
2674 2674 if self.path.startswith('/'):
2675 2675 return True # POSIX-style
2676 2676 return False
2677 2677
2678 2678 def localpath(self):
2679 2679 if self.scheme == 'file' or self.scheme == 'bundle':
2680 2680 path = self.path or '/'
2681 2681 # For Windows, we need to promote hosts containing drive
2682 2682 # letters to paths with drive letters.
2683 2683 if hasdriveletter(self._hostport):
2684 2684 path = self._hostport + '/' + self.path
2685 2685 elif (self.host is not None and self.path
2686 2686 and not hasdriveletter(path)):
2687 2687 path = '/' + path
2688 2688 return path
2689 2689 return self._origpath
2690 2690
2691 2691 def islocal(self):
2692 2692 '''whether localpath will return something that posixfile can open'''
2693 2693 return (not self.scheme or self.scheme == 'file'
2694 2694 or self.scheme == 'bundle')
2695 2695
2696 2696 def hasscheme(path):
2697 2697 return bool(url(path).scheme)
2698 2698
2699 2699 def hasdriveletter(path):
2700 2700 return path and path[1:2] == ':' and path[0:1].isalpha()
2701 2701
2702 2702 def urllocalpath(path):
2703 2703 return url(path, parsequery=False, parsefragment=False).localpath()
2704 2704
2705 2705 def hidepassword(u):
2706 2706 '''hide user credential in a url string'''
2707 2707 u = url(u)
2708 2708 if u.passwd:
2709 2709 u.passwd = '***'
2710 2710 return str(u)
2711 2711
2712 2712 def removeauth(u):
2713 2713 '''remove all authentication information from a url string'''
2714 2714 u = url(u)
2715 2715 u.user = u.passwd = None
2716 2716 return str(u)
2717 2717
2718 2718 def isatty(fp):
2719 2719 try:
2720 2720 return fp.isatty()
2721 2721 except AttributeError:
2722 2722 return False
2723 2723
2724 2724 timecount = unitcountfn(
2725 2725 (1, 1e3, _('%.0f s')),
2726 2726 (100, 1, _('%.1f s')),
2727 2727 (10, 1, _('%.2f s')),
2728 2728 (1, 1, _('%.3f s')),
2729 2729 (100, 0.001, _('%.1f ms')),
2730 2730 (10, 0.001, _('%.2f ms')),
2731 2731 (1, 0.001, _('%.3f ms')),
2732 2732 (100, 0.000001, _('%.1f us')),
2733 2733 (10, 0.000001, _('%.2f us')),
2734 2734 (1, 0.000001, _('%.3f us')),
2735 2735 (100, 0.000000001, _('%.1f ns')),
2736 2736 (10, 0.000000001, _('%.2f ns')),
2737 2737 (1, 0.000000001, _('%.3f ns')),
2738 2738 )
2739 2739
2740 2740 _timenesting = [0]
2741 2741
2742 2742 def timed(func):
2743 2743 '''Report the execution time of a function call to stderr.
2744 2744
2745 2745 During development, use as a decorator when you need to measure
2746 2746 the cost of a function, e.g. as follows:
2747 2747
2748 2748 @util.timed
2749 2749 def foo(a, b, c):
2750 2750 pass
2751 2751 '''
2752 2752
2753 2753 def wrapper(*args, **kwargs):
2754 2754 start = time.time()
2755 2755 indent = 2
2756 2756 _timenesting[0] += indent
2757 2757 try:
2758 2758 return func(*args, **kwargs)
2759 2759 finally:
2760 2760 elapsed = time.time() - start
2761 2761 _timenesting[0] -= indent
2762 sys.stderr.write('%s%s: %s\n' %
2763 (' ' * _timenesting[0], func.__name__,
2764 timecount(elapsed)))
2762 stderr.write('%s%s: %s\n' %
2763 (' ' * _timenesting[0], func.__name__,
2764 timecount(elapsed)))
2765 2765 return wrapper
2766 2766
2767 2767 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2768 2768 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2769 2769
2770 2770 def sizetoint(s):
2771 2771 '''Convert a space specifier to a byte count.
2772 2772
2773 2773 >>> sizetoint('30')
2774 2774 30
2775 2775 >>> sizetoint('2.2kb')
2776 2776 2252
2777 2777 >>> sizetoint('6M')
2778 2778 6291456
2779 2779 '''
2780 2780 t = s.strip().lower()
2781 2781 try:
2782 2782 for k, u in _sizeunits:
2783 2783 if t.endswith(k):
2784 2784 return int(float(t[:-len(k)]) * u)
2785 2785 return int(t)
2786 2786 except ValueError:
2787 2787 raise error.ParseError(_("couldn't parse size: %s") % s)
2788 2788
2789 2789 class hooks(object):
2790 2790 '''A collection of hook functions that can be used to extend a
2791 2791 function's behavior. Hooks are called in lexicographic order,
2792 2792 based on the names of their sources.'''
2793 2793
2794 2794 def __init__(self):
2795 2795 self._hooks = []
2796 2796
2797 2797 def add(self, source, hook):
2798 2798 self._hooks.append((source, hook))
2799 2799
2800 2800 def __call__(self, *args):
2801 2801 self._hooks.sort(key=lambda x: x[0])
2802 2802 results = []
2803 2803 for source, hook in self._hooks:
2804 2804 results.append(hook(*args))
2805 2805 return results
2806 2806
2807 2807 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2808 2808 '''Yields lines for a nicely formatted stacktrace.
2809 2809 Skips the 'skip' last entries.
2810 2810 Each file+linenumber is formatted according to fileline.
2811 2811 Each line is formatted according to line.
2812 2812 If line is None, it yields:
2813 2813 length of longest filepath+line number,
2814 2814 filepath+linenumber,
2815 2815 function
2816 2816
2817 2817 Not be used in production code but very convenient while developing.
2818 2818 '''
2819 2819 entries = [(fileline % (fn, ln), func)
2820 2820 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2821 2821 if entries:
2822 2822 fnmax = max(len(entry[0]) for entry in entries)
2823 2823 for fnln, func in entries:
2824 2824 if line is None:
2825 2825 yield (fnmax, fnln, func)
2826 2826 else:
2827 2827 yield line % (fnmax, fnln, func)
2828 2828
2829 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2829 def debugstacktrace(msg='stacktrace', skip=0, f=stderr, otherf=stdout):
2830 2830 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2831 2831 Skips the 'skip' last entries. By default it will flush stdout first.
2832 2832 It can be used everywhere and intentionally does not require an ui object.
2833 2833 Not be used in production code but very convenient while developing.
2834 2834 '''
2835 2835 if otherf:
2836 2836 otherf.flush()
2837 2837 f.write('%s at:\n' % msg)
2838 2838 for line in getstackframes(skip + 1):
2839 2839 f.write(line)
2840 2840 f.flush()
2841 2841
2842 2842 class dirs(object):
2843 2843 '''a multiset of directory names from a dirstate or manifest'''
2844 2844
2845 2845 def __init__(self, map, skip=None):
2846 2846 self._dirs = {}
2847 2847 addpath = self.addpath
2848 2848 if safehasattr(map, 'iteritems') and skip is not None:
2849 2849 for f, s in map.iteritems():
2850 2850 if s[0] != skip:
2851 2851 addpath(f)
2852 2852 else:
2853 2853 for f in map:
2854 2854 addpath(f)
2855 2855
2856 2856 def addpath(self, path):
2857 2857 dirs = self._dirs
2858 2858 for base in finddirs(path):
2859 2859 if base in dirs:
2860 2860 dirs[base] += 1
2861 2861 return
2862 2862 dirs[base] = 1
2863 2863
2864 2864 def delpath(self, path):
2865 2865 dirs = self._dirs
2866 2866 for base in finddirs(path):
2867 2867 if dirs[base] > 1:
2868 2868 dirs[base] -= 1
2869 2869 return
2870 2870 del dirs[base]
2871 2871
2872 2872 def __iter__(self):
2873 2873 return self._dirs.iterkeys()
2874 2874
2875 2875 def __contains__(self, d):
2876 2876 return d in self._dirs
2877 2877
2878 2878 if safehasattr(parsers, 'dirs'):
2879 2879 dirs = parsers.dirs
2880 2880
2881 2881 def finddirs(path):
2882 2882 pos = path.rfind('/')
2883 2883 while pos != -1:
2884 2884 yield path[:pos]
2885 2885 pos = path.rfind('/', 0, pos)
2886 2886
2887 2887 class ctxmanager(object):
2888 2888 '''A context manager for use in 'with' blocks to allow multiple
2889 2889 contexts to be entered at once. This is both safer and more
2890 2890 flexible than contextlib.nested.
2891 2891
2892 2892 Once Mercurial supports Python 2.7+, this will become mostly
2893 2893 unnecessary.
2894 2894 '''
2895 2895
2896 2896 def __init__(self, *args):
2897 2897 '''Accepts a list of no-argument functions that return context
2898 2898 managers. These will be invoked at __call__ time.'''
2899 2899 self._pending = args
2900 2900 self._atexit = []
2901 2901
2902 2902 def __enter__(self):
2903 2903 return self
2904 2904
2905 2905 def enter(self):
2906 2906 '''Create and enter context managers in the order in which they were
2907 2907 passed to the constructor.'''
2908 2908 values = []
2909 2909 for func in self._pending:
2910 2910 obj = func()
2911 2911 values.append(obj.__enter__())
2912 2912 self._atexit.append(obj.__exit__)
2913 2913 del self._pending
2914 2914 return values
2915 2915
2916 2916 def atexit(self, func, *args, **kwargs):
2917 2917 '''Add a function to call when this context manager exits. The
2918 2918 ordering of multiple atexit calls is unspecified, save that
2919 2919 they will happen before any __exit__ functions.'''
2920 2920 def wrapper(exc_type, exc_val, exc_tb):
2921 2921 func(*args, **kwargs)
2922 2922 self._atexit.append(wrapper)
2923 2923 return func
2924 2924
2925 2925 def __exit__(self, exc_type, exc_val, exc_tb):
2926 2926 '''Context managers are exited in the reverse order from which
2927 2927 they were created.'''
2928 2928 received = exc_type is not None
2929 2929 suppressed = False
2930 2930 pending = None
2931 2931 self._atexit.reverse()
2932 2932 for exitfunc in self._atexit:
2933 2933 try:
2934 2934 if exitfunc(exc_type, exc_val, exc_tb):
2935 2935 suppressed = True
2936 2936 exc_type = None
2937 2937 exc_val = None
2938 2938 exc_tb = None
2939 2939 except BaseException:
2940 2940 pending = sys.exc_info()
2941 2941 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2942 2942 del self._atexit
2943 2943 if pending:
2944 2944 raise exc_val
2945 2945 return received and suppressed
2946 2946
2947 2947 # compression code
2948 2948
2949 2949 class compressormanager(object):
2950 2950 """Holds registrations of various compression engines.
2951 2951
2952 2952 This class essentially abstracts the differences between compression
2953 2953 engines to allow new compression formats to be added easily, possibly from
2954 2954 extensions.
2955 2955
2956 2956 Compressors are registered against the global instance by calling its
2957 2957 ``register()`` method.
2958 2958 """
2959 2959 def __init__(self):
2960 2960 self._engines = {}
2961 2961 # Bundle spec human name to engine name.
2962 2962 self._bundlenames = {}
2963 2963 # Internal bundle identifier to engine name.
2964 2964 self._bundletypes = {}
2965 2965
2966 2966 def __getitem__(self, key):
2967 2967 return self._engines[key]
2968 2968
2969 2969 def __contains__(self, key):
2970 2970 return key in self._engines
2971 2971
2972 2972 def __iter__(self):
2973 2973 return iter(self._engines.keys())
2974 2974
2975 2975 def register(self, engine):
2976 2976 """Register a compression engine with the manager.
2977 2977
2978 2978 The argument must be a ``compressionengine`` instance.
2979 2979 """
2980 2980 if not isinstance(engine, compressionengine):
2981 2981 raise ValueError(_('argument must be a compressionengine'))
2982 2982
2983 2983 name = engine.name()
2984 2984
2985 2985 if name in self._engines:
2986 2986 raise error.Abort(_('compression engine %s already registered') %
2987 2987 name)
2988 2988
2989 2989 bundleinfo = engine.bundletype()
2990 2990 if bundleinfo:
2991 2991 bundlename, bundletype = bundleinfo
2992 2992
2993 2993 if bundlename in self._bundlenames:
2994 2994 raise error.Abort(_('bundle name %s already registered') %
2995 2995 bundlename)
2996 2996 if bundletype in self._bundletypes:
2997 2997 raise error.Abort(_('bundle type %s already registered by %s') %
2998 2998 (bundletype, self._bundletypes[bundletype]))
2999 2999
3000 3000 # No external facing name declared.
3001 3001 if bundlename:
3002 3002 self._bundlenames[bundlename] = name
3003 3003
3004 3004 self._bundletypes[bundletype] = name
3005 3005
3006 3006 self._engines[name] = engine
3007 3007
3008 3008 @property
3009 3009 def supportedbundlenames(self):
3010 3010 return set(self._bundlenames.keys())
3011 3011
3012 3012 @property
3013 3013 def supportedbundletypes(self):
3014 3014 return set(self._bundletypes.keys())
3015 3015
3016 3016 def forbundlename(self, bundlename):
3017 3017 """Obtain a compression engine registered to a bundle name.
3018 3018
3019 3019 Will raise KeyError if the bundle type isn't registered.
3020 3020
3021 3021 Will abort if the engine is known but not available.
3022 3022 """
3023 3023 engine = self._engines[self._bundlenames[bundlename]]
3024 3024 if not engine.available():
3025 3025 raise error.Abort(_('compression engine %s could not be loaded') %
3026 3026 engine.name())
3027 3027 return engine
3028 3028
3029 3029 def forbundletype(self, bundletype):
3030 3030 """Obtain a compression engine registered to a bundle type.
3031 3031
3032 3032 Will raise KeyError if the bundle type isn't registered.
3033 3033
3034 3034 Will abort if the engine is known but not available.
3035 3035 """
3036 3036 engine = self._engines[self._bundletypes[bundletype]]
3037 3037 if not engine.available():
3038 3038 raise error.Abort(_('compression engine %s could not be loaded') %
3039 3039 engine.name())
3040 3040 return engine
3041 3041
3042 3042 compengines = compressormanager()
3043 3043
3044 3044 class compressionengine(object):
3045 3045 """Base class for compression engines.
3046 3046
3047 3047 Compression engines must implement the interface defined by this class.
3048 3048 """
3049 3049 def name(self):
3050 3050 """Returns the name of the compression engine.
3051 3051
3052 3052 This is the key the engine is registered under.
3053 3053
3054 3054 This method must be implemented.
3055 3055 """
3056 3056 raise NotImplementedError()
3057 3057
3058 3058 def available(self):
3059 3059 """Whether the compression engine is available.
3060 3060
3061 3061 The intent of this method is to allow optional compression engines
3062 3062 that may not be available in all installations (such as engines relying
3063 3063 on C extensions that may not be present).
3064 3064 """
3065 3065 return True
3066 3066
3067 3067 def bundletype(self):
3068 3068 """Describes bundle identifiers for this engine.
3069 3069
3070 3070 If this compression engine isn't supported for bundles, returns None.
3071 3071
3072 3072 If this engine can be used for bundles, returns a 2-tuple of strings of
3073 3073 the user-facing "bundle spec" compression name and an internal
3074 3074 identifier used to denote the compression format within bundles. To
3075 3075 exclude the name from external usage, set the first element to ``None``.
3076 3076
3077 3077 If bundle compression is supported, the class must also implement
3078 3078 ``compressstream`` and `decompressorreader``.
3079 3079 """
3080 3080 return None
3081 3081
3082 3082 def compressstream(self, it, opts=None):
3083 3083 """Compress an iterator of chunks.
3084 3084
3085 3085 The method receives an iterator (ideally a generator) of chunks of
3086 3086 bytes to be compressed. It returns an iterator (ideally a generator)
3087 3087 of bytes of chunks representing the compressed output.
3088 3088
3089 3089 Optionally accepts an argument defining how to perform compression.
3090 3090 Each engine treats this argument differently.
3091 3091 """
3092 3092 raise NotImplementedError()
3093 3093
3094 3094 def decompressorreader(self, fh):
3095 3095 """Perform decompression on a file object.
3096 3096
3097 3097 Argument is an object with a ``read(size)`` method that returns
3098 3098 compressed data. Return value is an object with a ``read(size)`` that
3099 3099 returns uncompressed data.
3100 3100 """
3101 3101 raise NotImplementedError()
3102 3102
3103 3103 class _zlibengine(compressionengine):
3104 3104 def name(self):
3105 3105 return 'zlib'
3106 3106
3107 3107 def bundletype(self):
3108 3108 return 'gzip', 'GZ'
3109 3109
3110 3110 def compressstream(self, it, opts=None):
3111 3111 opts = opts or {}
3112 3112
3113 3113 z = zlib.compressobj(opts.get('level', -1))
3114 3114 for chunk in it:
3115 3115 data = z.compress(chunk)
3116 3116 # Not all calls to compress emit data. It is cheaper to inspect
3117 3117 # here than to feed empty chunks through generator.
3118 3118 if data:
3119 3119 yield data
3120 3120
3121 3121 yield z.flush()
3122 3122
3123 3123 def decompressorreader(self, fh):
3124 3124 def gen():
3125 3125 d = zlib.decompressobj()
3126 3126 for chunk in filechunkiter(fh):
3127 3127 yield d.decompress(chunk)
3128 3128
3129 3129 return chunkbuffer(gen())
3130 3130
3131 3131 compengines.register(_zlibengine())
3132 3132
3133 3133 class _bz2engine(compressionengine):
3134 3134 def name(self):
3135 3135 return 'bz2'
3136 3136
3137 3137 def bundletype(self):
3138 3138 return 'bzip2', 'BZ'
3139 3139
3140 3140 def compressstream(self, it, opts=None):
3141 3141 opts = opts or {}
3142 3142 z = bz2.BZ2Compressor(opts.get('level', 9))
3143 3143 for chunk in it:
3144 3144 data = z.compress(chunk)
3145 3145 if data:
3146 3146 yield data
3147 3147
3148 3148 yield z.flush()
3149 3149
3150 3150 def decompressorreader(self, fh):
3151 3151 def gen():
3152 3152 d = bz2.BZ2Decompressor()
3153 3153 for chunk in filechunkiter(fh):
3154 3154 yield d.decompress(chunk)
3155 3155
3156 3156 return chunkbuffer(gen())
3157 3157
3158 3158 compengines.register(_bz2engine())
3159 3159
3160 3160 class _truncatedbz2engine(compressionengine):
3161 3161 def name(self):
3162 3162 return 'bz2truncated'
3163 3163
3164 3164 def bundletype(self):
3165 3165 return None, '_truncatedBZ'
3166 3166
3167 3167 # We don't implement compressstream because it is hackily handled elsewhere.
3168 3168
3169 3169 def decompressorreader(self, fh):
3170 3170 def gen():
3171 3171 # The input stream doesn't have the 'BZ' header. So add it back.
3172 3172 d = bz2.BZ2Decompressor()
3173 3173 d.decompress('BZ')
3174 3174 for chunk in filechunkiter(fh):
3175 3175 yield d.decompress(chunk)
3176 3176
3177 3177 return chunkbuffer(gen())
3178 3178
3179 3179 compengines.register(_truncatedbz2engine())
3180 3180
3181 3181 class _noopengine(compressionengine):
3182 3182 def name(self):
3183 3183 return 'none'
3184 3184
3185 3185 def bundletype(self):
3186 3186 return 'none', 'UN'
3187 3187
3188 3188 def compressstream(self, it, opts=None):
3189 3189 return it
3190 3190
3191 3191 def decompressorreader(self, fh):
3192 3192 return fh
3193 3193
3194 3194 compengines.register(_noopengine())
3195 3195
3196 3196 class _zstdengine(compressionengine):
3197 3197 def name(self):
3198 3198 return 'zstd'
3199 3199
3200 3200 @propertycache
3201 3201 def _module(self):
3202 3202 # Not all installs have the zstd module available. So defer importing
3203 3203 # until first access.
3204 3204 try:
3205 3205 from . import zstd
3206 3206 # Force delayed import.
3207 3207 zstd.__version__
3208 3208 return zstd
3209 3209 except ImportError:
3210 3210 return None
3211 3211
3212 3212 def available(self):
3213 3213 return bool(self._module)
3214 3214
3215 3215 def bundletype(self):
3216 3216 return 'zstd', 'ZS'
3217 3217
3218 3218 def compressstream(self, it, opts=None):
3219 3219 opts = opts or {}
3220 3220 # zstd level 3 is almost always significantly faster than zlib
3221 3221 # while providing no worse compression. It strikes a good balance
3222 3222 # between speed and compression.
3223 3223 level = opts.get('level', 3)
3224 3224
3225 3225 zstd = self._module
3226 3226 z = zstd.ZstdCompressor(level=level).compressobj()
3227 3227 for chunk in it:
3228 3228 data = z.compress(chunk)
3229 3229 if data:
3230 3230 yield data
3231 3231
3232 3232 yield z.flush()
3233 3233
3234 3234 def decompressorreader(self, fh):
3235 3235 zstd = self._module
3236 3236 dctx = zstd.ZstdDecompressor()
3237 3237 return chunkbuffer(dctx.read_from(fh))
3238 3238
3239 3239 compengines.register(_zstdengine())
3240 3240
3241 3241 # convenient shortcut
3242 3242 dst = debugstacktrace
@@ -1,959 +1,958 b''
1 1 # wireproto.py - generic wire protocol support functions
2 2 #
3 3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import hashlib
11 11 import itertools
12 12 import os
13 import sys
14 13 import tempfile
15 14
16 15 from .i18n import _
17 16 from .node import (
18 17 bin,
19 18 hex,
20 19 )
21 20
22 21 from . import (
23 22 bundle2,
24 23 changegroup as changegroupmod,
25 24 encoding,
26 25 error,
27 26 exchange,
28 27 peer,
29 28 pushkey as pushkeymod,
30 29 streamclone,
31 30 util,
32 31 )
33 32
34 33 urlerr = util.urlerr
35 34 urlreq = util.urlreq
36 35
37 36 bundle2required = _(
38 37 'incompatible Mercurial client; bundle2 required\n'
39 38 '(see https://www.mercurial-scm.org/wiki/IncompatibleClient)\n')
40 39
41 40 class abstractserverproto(object):
42 41 """abstract class that summarizes the protocol API
43 42
44 43 Used as reference and documentation.
45 44 """
46 45
47 46 def getargs(self, args):
48 47 """return the value for arguments in <args>
49 48
50 49 returns a list of values (same order as <args>)"""
51 50 raise NotImplementedError()
52 51
53 52 def getfile(self, fp):
54 53 """write the whole content of a file into a file like object
55 54
56 55 The file is in the form::
57 56
58 57 (<chunk-size>\n<chunk>)+0\n
59 58
60 59 chunk size is the ascii version of the int.
61 60 """
62 61 raise NotImplementedError()
63 62
64 63 def redirect(self):
65 64 """may setup interception for stdout and stderr
66 65
67 66 See also the `restore` method."""
68 67 raise NotImplementedError()
69 68
70 69 # If the `redirect` function does install interception, the `restore`
71 70 # function MUST be defined. If interception is not used, this function
72 71 # MUST NOT be defined.
73 72 #
74 73 # left commented here on purpose
75 74 #
76 75 #def restore(self):
77 76 # """reinstall previous stdout and stderr and return intercepted stdout
78 77 # """
79 78 # raise NotImplementedError()
80 79
81 80 class remotebatch(peer.batcher):
82 81 '''batches the queued calls; uses as few roundtrips as possible'''
83 82 def __init__(self, remote):
84 83 '''remote must support _submitbatch(encbatch) and
85 84 _submitone(op, encargs)'''
86 85 peer.batcher.__init__(self)
87 86 self.remote = remote
88 87 def submit(self):
89 88 req, rsp = [], []
90 89 for name, args, opts, resref in self.calls:
91 90 mtd = getattr(self.remote, name)
92 91 batchablefn = getattr(mtd, 'batchable', None)
93 92 if batchablefn is not None:
94 93 batchable = batchablefn(mtd.im_self, *args, **opts)
95 94 encargsorres, encresref = next(batchable)
96 95 if encresref:
97 96 req.append((name, encargsorres,))
98 97 rsp.append((batchable, encresref, resref,))
99 98 else:
100 99 resref.set(encargsorres)
101 100 else:
102 101 if req:
103 102 self._submitreq(req, rsp)
104 103 req, rsp = [], []
105 104 resref.set(mtd(*args, **opts))
106 105 if req:
107 106 self._submitreq(req, rsp)
108 107 def _submitreq(self, req, rsp):
109 108 encresults = self.remote._submitbatch(req)
110 109 for encres, r in zip(encresults, rsp):
111 110 batchable, encresref, resref = r
112 111 encresref.set(encres)
113 112 resref.set(next(batchable))
114 113
115 114 class remoteiterbatcher(peer.iterbatcher):
116 115 def __init__(self, remote):
117 116 super(remoteiterbatcher, self).__init__()
118 117 self._remote = remote
119 118
120 119 def __getattr__(self, name):
121 120 if not getattr(self._remote, name, False):
122 121 raise AttributeError(
123 122 'Attempted to iterbatch non-batchable call to %r' % name)
124 123 return super(remoteiterbatcher, self).__getattr__(name)
125 124
126 125 def submit(self):
127 126 """Break the batch request into many patch calls and pipeline them.
128 127
129 128 This is mostly valuable over http where request sizes can be
130 129 limited, but can be used in other places as well.
131 130 """
132 131 req, rsp = [], []
133 132 for name, args, opts, resref in self.calls:
134 133 mtd = getattr(self._remote, name)
135 134 batchable = mtd.batchable(mtd.im_self, *args, **opts)
136 135 encargsorres, encresref = next(batchable)
137 136 assert encresref
138 137 req.append((name, encargsorres))
139 138 rsp.append((batchable, encresref))
140 139 if req:
141 140 self._resultiter = self._remote._submitbatch(req)
142 141 self._rsp = rsp
143 142
144 143 def results(self):
145 144 for (batchable, encresref), encres in itertools.izip(
146 145 self._rsp, self._resultiter):
147 146 encresref.set(encres)
148 147 yield next(batchable)
149 148
150 149 # Forward a couple of names from peer to make wireproto interactions
151 150 # slightly more sensible.
152 151 batchable = peer.batchable
153 152 future = peer.future
154 153
155 154 # list of nodes encoding / decoding
156 155
157 156 def decodelist(l, sep=' '):
158 157 if l:
159 158 return map(bin, l.split(sep))
160 159 return []
161 160
162 161 def encodelist(l, sep=' '):
163 162 try:
164 163 return sep.join(map(hex, l))
165 164 except TypeError:
166 165 raise
167 166
168 167 # batched call argument encoding
169 168
170 169 def escapearg(plain):
171 170 return (plain
172 171 .replace(':', ':c')
173 172 .replace(',', ':o')
174 173 .replace(';', ':s')
175 174 .replace('=', ':e'))
176 175
177 176 def unescapearg(escaped):
178 177 return (escaped
179 178 .replace(':e', '=')
180 179 .replace(':s', ';')
181 180 .replace(':o', ',')
182 181 .replace(':c', ':'))
183 182
184 183 def encodebatchcmds(req):
185 184 """Return a ``cmds`` argument value for the ``batch`` command."""
186 185 cmds = []
187 186 for op, argsdict in req:
188 187 # Old servers didn't properly unescape argument names. So prevent
189 188 # the sending of argument names that may not be decoded properly by
190 189 # servers.
191 190 assert all(escapearg(k) == k for k in argsdict)
192 191
193 192 args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
194 193 for k, v in argsdict.iteritems())
195 194 cmds.append('%s %s' % (op, args))
196 195
197 196 return ';'.join(cmds)
198 197
199 198 # mapping of options accepted by getbundle and their types
200 199 #
201 200 # Meant to be extended by extensions. It is extensions responsibility to ensure
202 201 # such options are properly processed in exchange.getbundle.
203 202 #
204 203 # supported types are:
205 204 #
206 205 # :nodes: list of binary nodes
207 206 # :csv: list of comma-separated values
208 207 # :scsv: list of comma-separated values return as set
209 208 # :plain: string with no transformation needed.
210 209 gboptsmap = {'heads': 'nodes',
211 210 'common': 'nodes',
212 211 'obsmarkers': 'boolean',
213 212 'bundlecaps': 'scsv',
214 213 'listkeys': 'csv',
215 214 'cg': 'boolean',
216 215 'cbattempted': 'boolean'}
217 216
218 217 # client side
219 218
220 219 class wirepeer(peer.peerrepository):
221 220 """Client-side interface for communicating with a peer repository.
222 221
223 222 Methods commonly call wire protocol commands of the same name.
224 223
225 224 See also httppeer.py and sshpeer.py for protocol-specific
226 225 implementations of this interface.
227 226 """
228 227 def batch(self):
229 228 if self.capable('batch'):
230 229 return remotebatch(self)
231 230 else:
232 231 return peer.localbatch(self)
233 232 def _submitbatch(self, req):
234 233 """run batch request <req> on the server
235 234
236 235 Returns an iterator of the raw responses from the server.
237 236 """
238 237 rsp = self._callstream("batch", cmds=encodebatchcmds(req))
239 238 chunk = rsp.read(1024)
240 239 work = [chunk]
241 240 while chunk:
242 241 while ';' not in chunk and chunk:
243 242 chunk = rsp.read(1024)
244 243 work.append(chunk)
245 244 merged = ''.join(work)
246 245 while ';' in merged:
247 246 one, merged = merged.split(';', 1)
248 247 yield unescapearg(one)
249 248 chunk = rsp.read(1024)
250 249 work = [merged, chunk]
251 250 yield unescapearg(''.join(work))
252 251
253 252 def _submitone(self, op, args):
254 253 return self._call(op, **args)
255 254
256 255 def iterbatch(self):
257 256 return remoteiterbatcher(self)
258 257
259 258 @batchable
260 259 def lookup(self, key):
261 260 self.requirecap('lookup', _('look up remote revision'))
262 261 f = future()
263 262 yield {'key': encoding.fromlocal(key)}, f
264 263 d = f.value
265 264 success, data = d[:-1].split(" ", 1)
266 265 if int(success):
267 266 yield bin(data)
268 267 self._abort(error.RepoError(data))
269 268
270 269 @batchable
271 270 def heads(self):
272 271 f = future()
273 272 yield {}, f
274 273 d = f.value
275 274 try:
276 275 yield decodelist(d[:-1])
277 276 except ValueError:
278 277 self._abort(error.ResponseError(_("unexpected response:"), d))
279 278
280 279 @batchable
281 280 def known(self, nodes):
282 281 f = future()
283 282 yield {'nodes': encodelist(nodes)}, f
284 283 d = f.value
285 284 try:
286 285 yield [bool(int(b)) for b in d]
287 286 except ValueError:
288 287 self._abort(error.ResponseError(_("unexpected response:"), d))
289 288
290 289 @batchable
291 290 def branchmap(self):
292 291 f = future()
293 292 yield {}, f
294 293 d = f.value
295 294 try:
296 295 branchmap = {}
297 296 for branchpart in d.splitlines():
298 297 branchname, branchheads = branchpart.split(' ', 1)
299 298 branchname = encoding.tolocal(urlreq.unquote(branchname))
300 299 branchheads = decodelist(branchheads)
301 300 branchmap[branchname] = branchheads
302 301 yield branchmap
303 302 except TypeError:
304 303 self._abort(error.ResponseError(_("unexpected response:"), d))
305 304
306 305 def branches(self, nodes):
307 306 n = encodelist(nodes)
308 307 d = self._call("branches", nodes=n)
309 308 try:
310 309 br = [tuple(decodelist(b)) for b in d.splitlines()]
311 310 return br
312 311 except ValueError:
313 312 self._abort(error.ResponseError(_("unexpected response:"), d))
314 313
315 314 def between(self, pairs):
316 315 batch = 8 # avoid giant requests
317 316 r = []
318 317 for i in xrange(0, len(pairs), batch):
319 318 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
320 319 d = self._call("between", pairs=n)
321 320 try:
322 321 r.extend(l and decodelist(l) or [] for l in d.splitlines())
323 322 except ValueError:
324 323 self._abort(error.ResponseError(_("unexpected response:"), d))
325 324 return r
326 325
327 326 @batchable
328 327 def pushkey(self, namespace, key, old, new):
329 328 if not self.capable('pushkey'):
330 329 yield False, None
331 330 f = future()
332 331 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
333 332 yield {'namespace': encoding.fromlocal(namespace),
334 333 'key': encoding.fromlocal(key),
335 334 'old': encoding.fromlocal(old),
336 335 'new': encoding.fromlocal(new)}, f
337 336 d = f.value
338 337 d, output = d.split('\n', 1)
339 338 try:
340 339 d = bool(int(d))
341 340 except ValueError:
342 341 raise error.ResponseError(
343 342 _('push failed (unexpected response):'), d)
344 343 for l in output.splitlines(True):
345 344 self.ui.status(_('remote: '), l)
346 345 yield d
347 346
348 347 @batchable
349 348 def listkeys(self, namespace):
350 349 if not self.capable('pushkey'):
351 350 yield {}, None
352 351 f = future()
353 352 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
354 353 yield {'namespace': encoding.fromlocal(namespace)}, f
355 354 d = f.value
356 355 self.ui.debug('received listkey for "%s": %i bytes\n'
357 356 % (namespace, len(d)))
358 357 yield pushkeymod.decodekeys(d)
359 358
360 359 def stream_out(self):
361 360 return self._callstream('stream_out')
362 361
363 362 def changegroup(self, nodes, kind):
364 363 n = encodelist(nodes)
365 364 f = self._callcompressable("changegroup", roots=n)
366 365 return changegroupmod.cg1unpacker(f, 'UN')
367 366
368 367 def changegroupsubset(self, bases, heads, kind):
369 368 self.requirecap('changegroupsubset', _('look up remote changes'))
370 369 bases = encodelist(bases)
371 370 heads = encodelist(heads)
372 371 f = self._callcompressable("changegroupsubset",
373 372 bases=bases, heads=heads)
374 373 return changegroupmod.cg1unpacker(f, 'UN')
375 374
376 375 def getbundle(self, source, **kwargs):
377 376 self.requirecap('getbundle', _('look up remote changes'))
378 377 opts = {}
379 378 bundlecaps = kwargs.get('bundlecaps')
380 379 if bundlecaps is not None:
381 380 kwargs['bundlecaps'] = sorted(bundlecaps)
382 381 else:
383 382 bundlecaps = () # kwargs could have it to None
384 383 for key, value in kwargs.iteritems():
385 384 if value is None:
386 385 continue
387 386 keytype = gboptsmap.get(key)
388 387 if keytype is None:
389 388 assert False, 'unexpected'
390 389 elif keytype == 'nodes':
391 390 value = encodelist(value)
392 391 elif keytype in ('csv', 'scsv'):
393 392 value = ','.join(value)
394 393 elif keytype == 'boolean':
395 394 value = '%i' % bool(value)
396 395 elif keytype != 'plain':
397 396 raise KeyError('unknown getbundle option type %s'
398 397 % keytype)
399 398 opts[key] = value
400 399 f = self._callcompressable("getbundle", **opts)
401 400 if any((cap.startswith('HG2') for cap in bundlecaps)):
402 401 return bundle2.getunbundler(self.ui, f)
403 402 else:
404 403 return changegroupmod.cg1unpacker(f, 'UN')
405 404
406 405 def unbundle(self, cg, heads, url):
407 406 '''Send cg (a readable file-like object representing the
408 407 changegroup to push, typically a chunkbuffer object) to the
409 408 remote server as a bundle.
410 409
411 410 When pushing a bundle10 stream, return an integer indicating the
412 411 result of the push (see localrepository.addchangegroup()).
413 412
414 413 When pushing a bundle20 stream, return a bundle20 stream.
415 414
416 415 `url` is the url the client thinks it's pushing to, which is
417 416 visible to hooks.
418 417 '''
419 418
420 419 if heads != ['force'] and self.capable('unbundlehash'):
421 420 heads = encodelist(['hashed',
422 421 hashlib.sha1(''.join(sorted(heads))).digest()])
423 422 else:
424 423 heads = encodelist(heads)
425 424
426 425 if util.safehasattr(cg, 'deltaheader'):
427 426 # this a bundle10, do the old style call sequence
428 427 ret, output = self._callpush("unbundle", cg, heads=heads)
429 428 if ret == "":
430 429 raise error.ResponseError(
431 430 _('push failed:'), output)
432 431 try:
433 432 ret = int(ret)
434 433 except ValueError:
435 434 raise error.ResponseError(
436 435 _('push failed (unexpected response):'), ret)
437 436
438 437 for l in output.splitlines(True):
439 438 self.ui.status(_('remote: '), l)
440 439 else:
441 440 # bundle2 push. Send a stream, fetch a stream.
442 441 stream = self._calltwowaystream('unbundle', cg, heads=heads)
443 442 ret = bundle2.getunbundler(self.ui, stream)
444 443 return ret
445 444
446 445 def debugwireargs(self, one, two, three=None, four=None, five=None):
447 446 # don't pass optional arguments left at their default value
448 447 opts = {}
449 448 if three is not None:
450 449 opts['three'] = three
451 450 if four is not None:
452 451 opts['four'] = four
453 452 return self._call('debugwireargs', one=one, two=two, **opts)
454 453
455 454 def _call(self, cmd, **args):
456 455 """execute <cmd> on the server
457 456
458 457 The command is expected to return a simple string.
459 458
460 459 returns the server reply as a string."""
461 460 raise NotImplementedError()
462 461
463 462 def _callstream(self, cmd, **args):
464 463 """execute <cmd> on the server
465 464
466 465 The command is expected to return a stream. Note that if the
467 466 command doesn't return a stream, _callstream behaves
468 467 differently for ssh and http peers.
469 468
470 469 returns the server reply as a file like object.
471 470 """
472 471 raise NotImplementedError()
473 472
474 473 def _callcompressable(self, cmd, **args):
475 474 """execute <cmd> on the server
476 475
477 476 The command is expected to return a stream.
478 477
479 478 The stream may have been compressed in some implementations. This
480 479 function takes care of the decompression. This is the only difference
481 480 with _callstream.
482 481
483 482 returns the server reply as a file like object.
484 483 """
485 484 raise NotImplementedError()
486 485
487 486 def _callpush(self, cmd, fp, **args):
488 487 """execute a <cmd> on server
489 488
490 489 The command is expected to be related to a push. Push has a special
491 490 return method.
492 491
493 492 returns the server reply as a (ret, output) tuple. ret is either
494 493 empty (error) or a stringified int.
495 494 """
496 495 raise NotImplementedError()
497 496
498 497 def _calltwowaystream(self, cmd, fp, **args):
499 498 """execute <cmd> on server
500 499
501 500 The command will send a stream to the server and get a stream in reply.
502 501 """
503 502 raise NotImplementedError()
504 503
505 504 def _abort(self, exception):
506 505 """clearly abort the wire protocol connection and raise the exception
507 506 """
508 507 raise NotImplementedError()
509 508
510 509 # server side
511 510
512 511 # wire protocol command can either return a string or one of these classes.
513 512 class streamres(object):
514 513 """wireproto reply: binary stream
515 514
516 515 The call was successful and the result is a stream.
517 516
518 517 Accepts either a generator or an object with a ``read(size)`` method.
519 518
520 519 ``v1compressible`` indicates whether this data can be compressed to
521 520 "version 1" clients (technically: HTTP peers using
522 521 application/mercurial-0.1 media type). This flag should NOT be used on
523 522 new commands because new clients should support a more modern compression
524 523 mechanism.
525 524 """
526 525 def __init__(self, gen=None, reader=None, v1compressible=False):
527 526 self.gen = gen
528 527 self.reader = reader
529 528 self.v1compressible = v1compressible
530 529
531 530 class pushres(object):
532 531 """wireproto reply: success with simple integer return
533 532
534 533 The call was successful and returned an integer contained in `self.res`.
535 534 """
536 535 def __init__(self, res):
537 536 self.res = res
538 537
539 538 class pusherr(object):
540 539 """wireproto reply: failure
541 540
542 541 The call failed. The `self.res` attribute contains the error message.
543 542 """
544 543 def __init__(self, res):
545 544 self.res = res
546 545
547 546 class ooberror(object):
548 547 """wireproto reply: failure of a batch of operation
549 548
550 549 Something failed during a batch call. The error message is stored in
551 550 `self.message`.
552 551 """
553 552 def __init__(self, message):
554 553 self.message = message
555 554
556 555 def getdispatchrepo(repo, proto, command):
557 556 """Obtain the repo used for processing wire protocol commands.
558 557
559 558 The intent of this function is to serve as a monkeypatch point for
560 559 extensions that need commands to operate on different repo views under
561 560 specialized circumstances.
562 561 """
563 562 return repo.filtered('served')
564 563
565 564 def dispatch(repo, proto, command):
566 565 repo = getdispatchrepo(repo, proto, command)
567 566 func, spec = commands[command]
568 567 args = proto.getargs(spec)
569 568 return func(repo, proto, *args)
570 569
571 570 def options(cmd, keys, others):
572 571 opts = {}
573 572 for k in keys:
574 573 if k in others:
575 574 opts[k] = others[k]
576 575 del others[k]
577 576 if others:
578 sys.stderr.write("warning: %s ignored unexpected arguments %s\n"
579 % (cmd, ",".join(others)))
577 util.stderr.write("warning: %s ignored unexpected arguments %s\n"
578 % (cmd, ",".join(others)))
580 579 return opts
581 580
582 581 def bundle1allowed(repo, action):
583 582 """Whether a bundle1 operation is allowed from the server.
584 583
585 584 Priority is:
586 585
587 586 1. server.bundle1gd.<action> (if generaldelta active)
588 587 2. server.bundle1.<action>
589 588 3. server.bundle1gd (if generaldelta active)
590 589 4. server.bundle1
591 590 """
592 591 ui = repo.ui
593 592 gd = 'generaldelta' in repo.requirements
594 593
595 594 if gd:
596 595 v = ui.configbool('server', 'bundle1gd.%s' % action, None)
597 596 if v is not None:
598 597 return v
599 598
600 599 v = ui.configbool('server', 'bundle1.%s' % action, None)
601 600 if v is not None:
602 601 return v
603 602
604 603 if gd:
605 604 v = ui.configbool('server', 'bundle1gd', None)
606 605 if v is not None:
607 606 return v
608 607
609 608 return ui.configbool('server', 'bundle1', True)
610 609
611 610 # list of commands
612 611 commands = {}
613 612
614 613 def wireprotocommand(name, args=''):
615 614 """decorator for wire protocol command"""
616 615 def register(func):
617 616 commands[name] = (func, args)
618 617 return func
619 618 return register
620 619
621 620 @wireprotocommand('batch', 'cmds *')
622 621 def batch(repo, proto, cmds, others):
623 622 repo = repo.filtered("served")
624 623 res = []
625 624 for pair in cmds.split(';'):
626 625 op, args = pair.split(' ', 1)
627 626 vals = {}
628 627 for a in args.split(','):
629 628 if a:
630 629 n, v = a.split('=')
631 630 vals[unescapearg(n)] = unescapearg(v)
632 631 func, spec = commands[op]
633 632 if spec:
634 633 keys = spec.split()
635 634 data = {}
636 635 for k in keys:
637 636 if k == '*':
638 637 star = {}
639 638 for key in vals.keys():
640 639 if key not in keys:
641 640 star[key] = vals[key]
642 641 data['*'] = star
643 642 else:
644 643 data[k] = vals[k]
645 644 result = func(repo, proto, *[data[k] for k in keys])
646 645 else:
647 646 result = func(repo, proto)
648 647 if isinstance(result, ooberror):
649 648 return result
650 649 res.append(escapearg(result))
651 650 return ';'.join(res)
652 651
653 652 @wireprotocommand('between', 'pairs')
654 653 def between(repo, proto, pairs):
655 654 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
656 655 r = []
657 656 for b in repo.between(pairs):
658 657 r.append(encodelist(b) + "\n")
659 658 return "".join(r)
660 659
661 660 @wireprotocommand('branchmap')
662 661 def branchmap(repo, proto):
663 662 branchmap = repo.branchmap()
664 663 heads = []
665 664 for branch, nodes in branchmap.iteritems():
666 665 branchname = urlreq.quote(encoding.fromlocal(branch))
667 666 branchnodes = encodelist(nodes)
668 667 heads.append('%s %s' % (branchname, branchnodes))
669 668 return '\n'.join(heads)
670 669
671 670 @wireprotocommand('branches', 'nodes')
672 671 def branches(repo, proto, nodes):
673 672 nodes = decodelist(nodes)
674 673 r = []
675 674 for b in repo.branches(nodes):
676 675 r.append(encodelist(b) + "\n")
677 676 return "".join(r)
678 677
679 678 @wireprotocommand('clonebundles', '')
680 679 def clonebundles(repo, proto):
681 680 """Server command for returning info for available bundles to seed clones.
682 681
683 682 Clients will parse this response and determine what bundle to fetch.
684 683
685 684 Extensions may wrap this command to filter or dynamically emit data
686 685 depending on the request. e.g. you could advertise URLs for the closest
687 686 data center given the client's IP address.
688 687 """
689 688 return repo.opener.tryread('clonebundles.manifest')
690 689
691 690 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
692 691 'known', 'getbundle', 'unbundlehash', 'batch']
693 692
694 693 def _capabilities(repo, proto):
695 694 """return a list of capabilities for a repo
696 695
697 696 This function exists to allow extensions to easily wrap capabilities
698 697 computation
699 698
700 699 - returns a lists: easy to alter
701 700 - change done here will be propagated to both `capabilities` and `hello`
702 701 command without any other action needed.
703 702 """
704 703 # copy to prevent modification of the global list
705 704 caps = list(wireprotocaps)
706 705 if streamclone.allowservergeneration(repo.ui):
707 706 if repo.ui.configbool('server', 'preferuncompressed', False):
708 707 caps.append('stream-preferred')
709 708 requiredformats = repo.requirements & repo.supportedformats
710 709 # if our local revlogs are just revlogv1, add 'stream' cap
711 710 if not requiredformats - set(('revlogv1',)):
712 711 caps.append('stream')
713 712 # otherwise, add 'streamreqs' detailing our local revlog format
714 713 else:
715 714 caps.append('streamreqs=%s' % ','.join(sorted(requiredformats)))
716 715 if repo.ui.configbool('experimental', 'bundle2-advertise', True):
717 716 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
718 717 caps.append('bundle2=' + urlreq.quote(capsblob))
719 718 caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority))
720 719 caps.append(
721 720 'httpheader=%d' % repo.ui.configint('server', 'maxhttpheaderlen', 1024))
722 721 if repo.ui.configbool('experimental', 'httppostargs', False):
723 722 caps.append('httppostargs')
724 723 return caps
725 724
726 725 # If you are writing an extension and consider wrapping this function. Wrap
727 726 # `_capabilities` instead.
728 727 @wireprotocommand('capabilities')
729 728 def capabilities(repo, proto):
730 729 return ' '.join(_capabilities(repo, proto))
731 730
732 731 @wireprotocommand('changegroup', 'roots')
733 732 def changegroup(repo, proto, roots):
734 733 nodes = decodelist(roots)
735 734 cg = changegroupmod.changegroup(repo, nodes, 'serve')
736 735 return streamres(reader=cg, v1compressible=True)
737 736
738 737 @wireprotocommand('changegroupsubset', 'bases heads')
739 738 def changegroupsubset(repo, proto, bases, heads):
740 739 bases = decodelist(bases)
741 740 heads = decodelist(heads)
742 741 cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
743 742 return streamres(reader=cg, v1compressible=True)
744 743
745 744 @wireprotocommand('debugwireargs', 'one two *')
746 745 def debugwireargs(repo, proto, one, two, others):
747 746 # only accept optional args from the known set
748 747 opts = options('debugwireargs', ['three', 'four'], others)
749 748 return repo.debugwireargs(one, two, **opts)
750 749
751 750 @wireprotocommand('getbundle', '*')
752 751 def getbundle(repo, proto, others):
753 752 opts = options('getbundle', gboptsmap.keys(), others)
754 753 for k, v in opts.iteritems():
755 754 keytype = gboptsmap[k]
756 755 if keytype == 'nodes':
757 756 opts[k] = decodelist(v)
758 757 elif keytype == 'csv':
759 758 opts[k] = list(v.split(','))
760 759 elif keytype == 'scsv':
761 760 opts[k] = set(v.split(','))
762 761 elif keytype == 'boolean':
763 762 # Client should serialize False as '0', which is a non-empty string
764 763 # so it evaluates as a True bool.
765 764 if v == '0':
766 765 opts[k] = False
767 766 else:
768 767 opts[k] = bool(v)
769 768 elif keytype != 'plain':
770 769 raise KeyError('unknown getbundle option type %s'
771 770 % keytype)
772 771
773 772 if not bundle1allowed(repo, 'pull'):
774 773 if not exchange.bundle2requested(opts.get('bundlecaps')):
775 774 return ooberror(bundle2required)
776 775
777 776 chunks = exchange.getbundlechunks(repo, 'serve', **opts)
778 777 return streamres(gen=chunks, v1compressible=True)
779 778
780 779 @wireprotocommand('heads')
781 780 def heads(repo, proto):
782 781 h = repo.heads()
783 782 return encodelist(h) + "\n"
784 783
785 784 @wireprotocommand('hello')
786 785 def hello(repo, proto):
787 786 '''the hello command returns a set of lines describing various
788 787 interesting things about the server, in an RFC822-like format.
789 788 Currently the only one defined is "capabilities", which
790 789 consists of a line in the form:
791 790
792 791 capabilities: space separated list of tokens
793 792 '''
794 793 return "capabilities: %s\n" % (capabilities(repo, proto))
795 794
796 795 @wireprotocommand('listkeys', 'namespace')
797 796 def listkeys(repo, proto, namespace):
798 797 d = repo.listkeys(encoding.tolocal(namespace)).items()
799 798 return pushkeymod.encodekeys(d)
800 799
801 800 @wireprotocommand('lookup', 'key')
802 801 def lookup(repo, proto, key):
803 802 try:
804 803 k = encoding.tolocal(key)
805 804 c = repo[k]
806 805 r = c.hex()
807 806 success = 1
808 807 except Exception as inst:
809 808 r = str(inst)
810 809 success = 0
811 810 return "%s %s\n" % (success, r)
812 811
813 812 @wireprotocommand('known', 'nodes *')
814 813 def known(repo, proto, nodes, others):
815 814 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
816 815
817 816 @wireprotocommand('pushkey', 'namespace key old new')
818 817 def pushkey(repo, proto, namespace, key, old, new):
819 818 # compatibility with pre-1.8 clients which were accidentally
820 819 # sending raw binary nodes rather than utf-8-encoded hex
821 820 if len(new) == 20 and new.encode('string-escape') != new:
822 821 # looks like it could be a binary node
823 822 try:
824 823 new.decode('utf-8')
825 824 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
826 825 except UnicodeDecodeError:
827 826 pass # binary, leave unmodified
828 827 else:
829 828 new = encoding.tolocal(new) # normal path
830 829
831 830 if util.safehasattr(proto, 'restore'):
832 831
833 832 proto.redirect()
834 833
835 834 try:
836 835 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
837 836 encoding.tolocal(old), new) or False
838 837 except error.Abort:
839 838 r = False
840 839
841 840 output = proto.restore()
842 841
843 842 return '%s\n%s' % (int(r), output)
844 843
845 844 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
846 845 encoding.tolocal(old), new)
847 846 return '%s\n' % int(r)
848 847
849 848 @wireprotocommand('stream_out')
850 849 def stream(repo, proto):
851 850 '''If the server supports streaming clone, it advertises the "stream"
852 851 capability with a value representing the version and flags of the repo
853 852 it is serving. Client checks to see if it understands the format.
854 853 '''
855 854 if not streamclone.allowservergeneration(repo.ui):
856 855 return '1\n'
857 856
858 857 def getstream(it):
859 858 yield '0\n'
860 859 for chunk in it:
861 860 yield chunk
862 861
863 862 try:
864 863 # LockError may be raised before the first result is yielded. Don't
865 864 # emit output until we're sure we got the lock successfully.
866 865 it = streamclone.generatev1wireproto(repo)
867 866 return streamres(gen=getstream(it))
868 867 except error.LockError:
869 868 return '2\n'
870 869
871 870 @wireprotocommand('unbundle', 'heads')
872 871 def unbundle(repo, proto, heads):
873 872 their_heads = decodelist(heads)
874 873
875 874 try:
876 875 proto.redirect()
877 876
878 877 exchange.check_heads(repo, their_heads, 'preparing changes')
879 878
880 879 # write bundle data to temporary file because it can be big
881 880 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
882 881 fp = os.fdopen(fd, 'wb+')
883 882 r = 0
884 883 try:
885 884 proto.getfile(fp)
886 885 fp.seek(0)
887 886 gen = exchange.readbundle(repo.ui, fp, None)
888 887 if (isinstance(gen, changegroupmod.cg1unpacker)
889 888 and not bundle1allowed(repo, 'push')):
890 889 return ooberror(bundle2required)
891 890
892 891 r = exchange.unbundle(repo, gen, their_heads, 'serve',
893 892 proto._client())
894 893 if util.safehasattr(r, 'addpart'):
895 894 # The return looks streamable, we are in the bundle2 case and
896 895 # should return a stream.
897 896 return streamres(gen=r.getchunks())
898 897 return pushres(r)
899 898
900 899 finally:
901 900 fp.close()
902 901 os.unlink(tempname)
903 902
904 903 except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
905 904 # handle non-bundle2 case first
906 905 if not getattr(exc, 'duringunbundle2', False):
907 906 try:
908 907 raise
909 908 except error.Abort:
910 # The old code we moved used sys.stderr directly.
909 # The old code we moved used util.stderr directly.
911 910 # We did not change it to minimise code change.
912 911 # This need to be moved to something proper.
913 912 # Feel free to do it.
914 sys.stderr.write("abort: %s\n" % exc)
913 util.stderr.write("abort: %s\n" % exc)
915 914 return pushres(0)
916 915 except error.PushRaced:
917 916 return pusherr(str(exc))
918 917
919 918 bundler = bundle2.bundle20(repo.ui)
920 919 for out in getattr(exc, '_bundle2salvagedoutput', ()):
921 920 bundler.addpart(out)
922 921 try:
923 922 try:
924 923 raise
925 924 except error.PushkeyFailed as exc:
926 925 # check client caps
927 926 remotecaps = getattr(exc, '_replycaps', None)
928 927 if (remotecaps is not None
929 928 and 'pushkey' not in remotecaps.get('error', ())):
930 929 # no support remote side, fallback to Abort handler.
931 930 raise
932 931 part = bundler.newpart('error:pushkey')
933 932 part.addparam('in-reply-to', exc.partid)
934 933 if exc.namespace is not None:
935 934 part.addparam('namespace', exc.namespace, mandatory=False)
936 935 if exc.key is not None:
937 936 part.addparam('key', exc.key, mandatory=False)
938 937 if exc.new is not None:
939 938 part.addparam('new', exc.new, mandatory=False)
940 939 if exc.old is not None:
941 940 part.addparam('old', exc.old, mandatory=False)
942 941 if exc.ret is not None:
943 942 part.addparam('ret', exc.ret, mandatory=False)
944 943 except error.BundleValueError as exc:
945 944 errpart = bundler.newpart('error:unsupportedcontent')
946 945 if exc.parttype is not None:
947 946 errpart.addparam('parttype', exc.parttype)
948 947 if exc.params:
949 948 errpart.addparam('params', '\0'.join(exc.params))
950 949 except error.Abort as exc:
951 950 manargs = [('message', str(exc))]
952 951 advargs = []
953 952 if exc.hint is not None:
954 953 advargs.append(('hint', exc.hint))
955 954 bundler.addpart(bundle2.bundlepart('error:abort',
956 955 manargs, advargs))
957 956 except error.PushRaced as exc:
958 957 bundler.newpart('error:pushraced', [('message', str(exc))])
959 958 return streamres(gen=bundler.getchunks())
General Comments 0
You need to be logged in to leave comments. Login now