##// END OF EJS Templates
Reduce the number of test on Appveyor....
Reduce the number of test on Appveyor. Appveyor is way slower than Travis, in part because we test on more architecture. In particular 32 and 64 bits. And accumulate delay sometime leading to 30 min between travis success and AppVeyor response. 32 Bits OSes are starting to be rare (or not our target, like tablets). Less that 1/5 market share is some survey, and account for more than 2/3 of our testing time. So slash 3 out of 4 testing on 32 bits. Test only python 3.6 32 bits. (I know that's paradoxal are mostly old system are 32 bits... but do we expect people with old system and old python to use new IPython ?) For example: Windows Arch Share Windows 10 64 bit 36.97% Windows 7 64 bit 32.99% Windows 8.1 64 bit 12.93% Windows 8 64 bit 1.64% Windows Vista 64 bit 0.13% Windows 7 32 bit 6.97% Windows XP 32 bit 2.00% Windows 10 32 bit 1.31% Windows 8.1 32 bit 0.34% Windows Vista 32 bit 0.24% Windows 8 32 bit 0.15% Total about 83ish % of 64 bits. Source: http://www.digitaltrends.com/computing/steam-users-windows-10-market-share/ and http://store.steampowered.com/hwsurvey?platform=pc

File last commit:

r22963:2961b531
r23236:a8f84d57
Show More
tokenutil.py
127 lines | 3.7 KiB | text/x-python | PythonLexer
"""Token-related utilities"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from collections import namedtuple
from io import StringIO
from keyword import iskeyword
from . import tokenize2
from .py3compat import cast_unicode_py2
Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])
def generate_tokens(readline):
"""wrap generate_tokens to catch EOF errors"""
try:
for token in tokenize2.generate_tokens(readline):
yield token
except tokenize2.TokenError:
# catch EOF error
return
def line_at_cursor(cell, cursor_pos=0):
"""Return the line in a cell at a given cursor position
Used for calling line-based APIs that don't support multi-line input, yet.
Parameters
----------
cell: str
multiline block of text
cursor_pos: integer
the cursor position
Returns
-------
(line, offset): (text, integer)
The line with the current cursor, and the character offset of the start of the line.
"""
offset = 0
lines = cell.splitlines(True)
for line in lines:
next_offset = offset + len(line)
if next_offset >= cursor_pos:
break
offset = next_offset
else:
line = ""
return (line, offset)
def token_at_cursor(cell, cursor_pos=0):
"""Get the token at a given cursor
Used for introspection.
Function calls are prioritized, so the token for the callable will be returned
if the cursor is anywhere inside the call.
Parameters
----------
cell : unicode
A block of Python code
cursor_pos : int
The location of the cursor in the block where the token should be found
"""
cell = cast_unicode_py2(cell)
names = []
tokens = []
call_names = []
offsets = {1: 0} # lines start at 1
for tup in generate_tokens(StringIO(cell).readline):
tok = Token(*tup)
# token, text, start, end, line = tup
start_line, start_col = tok.start
end_line, end_col = tok.end
if end_line + 1 not in offsets:
# keep track of offsets for each line
lines = tok.line.splitlines(True)
for lineno, line in zip(range(start_line + 1, end_line + 2), lines):
if lineno not in offsets:
offsets[lineno] = offsets[lineno-1] + len(line)
offset = offsets[start_line]
# allow '|foo' to find 'foo' at the beginning of a line
boundary = cursor_pos + 1 if start_col == 0 else cursor_pos
if offset + start_col >= boundary:
# current token starts after the cursor,
# don't consume it
break
if tok.token == tokenize2.NAME and not iskeyword(tok.text):
if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.':
names[-1] = "%s.%s" % (names[-1], tok.text)
else:
names.append(tok.text)
elif tok.token == tokenize2.OP:
if tok.text == '=' and names:
# don't inspect the lhs of an assignment
names.pop(-1)
if tok.text == '(' and names:
# if we are inside a function call, inspect the function
call_names.append(names[-1])
elif tok.text == ')' and call_names:
call_names.pop(-1)
tokens.append(tok)
if offsets[end_line] + end_col > cursor_pos:
# we found the cursor, stop reading
break
if call_names:
return call_names[-1]
elif names:
return names[-1]
else:
return ''