test_codeblocks.py
336 lines
| 9.7 KiB
| text/x-python
|
PythonLexer
r5087 | ||||
r1025 | ||||
r5088 | # Copyright (C) 2016-2023 RhodeCode GmbH | |||
r1025 | # | |||
# This program is free software: you can redistribute it and/or modify | ||||
# it under the terms of the GNU Affero General Public License, version 3 | ||||
# (only), as published by the Free Software Foundation. | ||||
# | ||||
# This program is distributed in the hope that it will be useful, | ||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||||
# GNU General Public License for more details. | ||||
# | ||||
# You should have received a copy of the GNU Affero General Public License | ||||
# along with this program. If not, see <http://www.gnu.org/licenses/>. | ||||
# | ||||
# This program is dual-licensed. If you wish to learn more about the | ||||
# RhodeCode Enterprise Edition, including its added features, Support services, | ||||
# and proprietary license terms, please see https://rhodecode.com/licenses/ | ||||
import pytest | ||||
r1774 | from pygments.lexers import get_lexer_by_name | |||
r1025 | ||||
r1774 | from rhodecode.tests import no_newline_id_generator | |||
r1025 | from rhodecode.lib.codeblocks import ( | |||
tokenize_string, split_token_stream, rollup_tokenstream, | ||||
render_tokenstream) | ||||
class TestTokenizeString(object): | ||||
python_code = ''' | ||||
import this | ||||
var = 6 | ||||
r3057 | print("this") | |||
r1025 | ||||
''' | ||||
def test_tokenize_as_python(self): | ||||
lexer = get_lexer_by_name('python') | ||||
tokens = list(tokenize_string(self.python_code, lexer)) | ||||
r5087 | expected_tokens = [ | |||
('w', '\n'), | ||||
('', ' '), | ||||
('kn', 'import'), | ||||
('', ' '), | ||||
('nn', 'this'), | ||||
('w', '\n'), | ||||
('w', '\n'), | ||||
('', ' '), | ||||
('n', 'var'), | ||||
('', ' '), | ||||
('o', '='), | ||||
('', ' '), | ||||
('mi', '6'), | ||||
('w', '\n'), | ||||
('', ' '), | ||||
('nb', 'print'), | ||||
('p', '('), | ||||
('s2', '"'), | ||||
('s2', 'this'), | ||||
('s2', '"'), | ||||
('p', ')'), | ||||
('w', '\n'), | ||||
('w', '\n'), | ||||
('', ' ') | ||||
] | ||||
r1025 | ||||
r5087 | assert tokens == expected_tokens | |||
r1025 | ||||
def test_tokenize_as_text(self): | ||||
lexer = get_lexer_by_name('text') | ||||
tokens = list(tokenize_string(self.python_code, lexer)) | ||||
assert tokens == [ | ||||
('', | ||||
r5087 | '\n import this\n\n var = 6\n print("this")\n\n ') | |||
r1025 | ] | |||
class TestSplitTokenStream(object): | ||||
def test_split_token_stream(self): | ||||
r3444 | tokens = [('type1', 'some\ntext'), ('type2', 'more\n')] | |||
content = [x + y for x, y in tokens] | ||||
lines = list(split_token_stream(tokens, content)) | ||||
r1025 | ||||
assert lines == [ | ||||
r5087 | [('type1', 'some')], | |||
[('type1', 'text'), ('type2', 'more')], | ||||
[('type2', '')], | ||||
r1025 | ] | |||
def test_split_token_stream_single(self): | ||||
r3444 | tokens = [('type1', '\n')] | |||
content = [x + y for x, y in tokens] | ||||
lines = list(split_token_stream(tokens, content)) | ||||
r1025 | assert lines == [ | |||
[('type1', '')], | ||||
[('type1', '')], | ||||
] | ||||
def test_split_token_stream_single_repeat(self): | ||||
r3444 | tokens = [('type1', '\n\n\n')] | |||
content = [x + y for x, y in tokens] | ||||
lines = list(split_token_stream(tokens, content)) | ||||
r1025 | assert lines == [ | |||
[('type1', '')], | ||||
[('type1', '')], | ||||
[('type1', '')], | ||||
[('type1', '')], | ||||
] | ||||
def test_split_token_stream_multiple_repeat(self): | ||||
r3444 | tokens = [('type1', '\n\n'), ('type2', '\n\n')] | |||
content = [x + y for x, y in tokens] | ||||
r1025 | ||||
r3444 | lines = list(split_token_stream(tokens, content)) | |||
r1025 | assert lines == [ | |||
[('type1', '')], | ||||
[('type1', '')], | ||||
[('type1', ''), ('type2', '')], | ||||
[('type2', '')], | ||||
[('type2', '')], | ||||
] | ||||
r3444 | def test_no_tokens_by_content(self): | |||
tokens = [] | ||||
r5087 | content = '\ufeff' | |||
r3444 | lines = list(split_token_stream(tokens, content)) | |||
assert lines == [ | ||||
[('', content)], | ||||
] | ||||
def test_no_tokens_by_valid_content(self): | ||||
from pygments.lexers.css import CssLexer | ||||
r5087 | content = '\ufeff table.dataTable' | |||
r3444 | tokens = tokenize_string(content, CssLexer()) | |||
lines = list(split_token_stream(tokens, content)) | ||||
assert lines == [ | ||||
r5087 | [('w', ' '), | |||
('nt', 'table'), | ||||
('p', '.'), | ||||
('nc', 'dataTable')], | ||||
r3444 | ] | |||
r1025 | ||||
class TestRollupTokens(object): | ||||
@pytest.mark.parametrize('tokenstream,output', [ | ||||
([], | ||||
[]), | ||||
([('A', 'hell'), ('A', 'o')], [ | ||||
('A', [ | ||||
('', 'hello')]), | ||||
]), | ||||
([('A', 'hell'), ('B', 'o')], [ | ||||
('A', [ | ||||
('', 'hell')]), | ||||
('B', [ | ||||
('', 'o')]), | ||||
]), | ||||
([('A', 'hel'), ('A', 'lo'), ('B', ' '), ('A', 'there')], [ | ||||
('A', [ | ||||
('', 'hello')]), | ||||
('B', [ | ||||
('', ' ')]), | ||||
('A', [ | ||||
('', 'there')]), | ||||
]), | ||||
]) | ||||
def test_rollup_tokenstream_without_ops(self, tokenstream, output): | ||||
assert list(rollup_tokenstream(tokenstream)) == output | ||||
@pytest.mark.parametrize('tokenstream,output', [ | ||||
([], | ||||
[]), | ||||
([('A', '', 'hell'), ('A', '', 'o')], [ | ||||
('A', [ | ||||
('', 'hello')]), | ||||
]), | ||||
([('A', '', 'hell'), ('B', '', 'o')], [ | ||||
('A', [ | ||||
('', 'hell')]), | ||||
('B', [ | ||||
('', 'o')]), | ||||
]), | ||||
([('A', '', 'h'), ('B', '', 'e'), ('C', '', 'y')], [ | ||||
('A', [ | ||||
('', 'h')]), | ||||
('B', [ | ||||
('', 'e')]), | ||||
('C', [ | ||||
('', 'y')]), | ||||
]), | ||||
([('A', '', 'h'), ('A', '', 'e'), ('C', '', 'y')], [ | ||||
('A', [ | ||||
('', 'he')]), | ||||
('C', [ | ||||
('', 'y')]), | ||||
]), | ||||
([('A', 'ins', 'h'), ('A', 'ins', 'e')], [ | ||||
('A', [ | ||||
('ins', 'he') | ||||
]), | ||||
]), | ||||
([('A', 'ins', 'h'), ('A', 'del', 'e')], [ | ||||
('A', [ | ||||
('ins', 'h'), | ||||
('del', 'e') | ||||
]), | ||||
]), | ||||
([('A', 'ins', 'h'), ('B', 'del', 'e'), ('B', 'del', 'y')], [ | ||||
('A', [ | ||||
('ins', 'h'), | ||||
]), | ||||
('B', [ | ||||
('del', 'ey'), | ||||
]), | ||||
]), | ||||
([('A', 'ins', 'h'), ('A', 'del', 'e'), ('B', 'del', 'y')], [ | ||||
('A', [ | ||||
('ins', 'h'), | ||||
('del', 'e'), | ||||
]), | ||||
('B', [ | ||||
('del', 'y'), | ||||
]), | ||||
]), | ||||
([('A', '', 'some'), ('A', 'ins', 'new'), ('A', '', 'name')], [ | ||||
('A', [ | ||||
('', 'some'), | ||||
('ins', 'new'), | ||||
('', 'name'), | ||||
]), | ||||
]), | ||||
]) | ||||
def test_rollup_tokenstream_with_ops(self, tokenstream, output): | ||||
assert list(rollup_tokenstream(tokenstream)) == output | ||||
class TestRenderTokenStream(object): | ||||
@pytest.mark.parametrize('tokenstream,output', [ | ||||
( | ||||
[], | ||||
'', | ||||
), | ||||
( | ||||
r5087 | [('', '', '')], | |||
r1025 | '<span></span>', | |||
), | ||||
( | ||||
r5087 | [('', '', 'text')], | |||
r1025 | '<span>text</span>', | |||
), | ||||
( | ||||
r5087 | [('A', '', '')], | |||
r1025 | '<span class="A"></span>', | |||
), | ||||
( | ||||
r5087 | [('A', '', 'hello')], | |||
r1025 | '<span class="A">hello</span>', | |||
), | ||||
( | ||||
r5087 | [('A', '', 'hel'), ('A', '', 'lo')], | |||
r1025 | '<span class="A">hello</span>', | |||
), | ||||
( | ||||
r5087 | [('A', '', 'two\n'), ('A', '', 'lines')], | |||
r1030 | '<span class="A">two\nlines</span>', | |||
r1025 | ), | |||
( | ||||
r5087 | [('A', '', '\nthree\n'), ('A', '', 'lines')], | |||
r1030 | '<span class="A">\nthree\nlines</span>', | |||
r1025 | ), | |||
( | ||||
r5087 | [('', '', '\n'), ('A', '', 'line')], | |||
r1030 | '<span>\n</span><span class="A">line</span>', | |||
r1025 | ), | |||
( | ||||
r5087 | [('', 'ins', '\n'), ('A', '', 'line')], | |||
r1030 | '<span><ins>\n</ins></span><span class="A">line</span>', | |||
r1025 | ), | |||
( | ||||
r5087 | [('A', '', 'hel'), ('A', 'ins', 'lo')], | |||
r1025 | '<span class="A">hel<ins>lo</ins></span>', | |||
), | ||||
( | ||||
r5087 | [('A', '', 'hel'), ('A', 'ins', 'l'), ('A', 'ins', 'o')], | |||
r1025 | '<span class="A">hel<ins>lo</ins></span>', | |||
), | ||||
( | ||||
r5087 | [('A', '', 'hel'), ('A', 'ins', 'l'), ('A', 'del', 'o')], | |||
r1025 | '<span class="A">hel<ins>l</ins><del>o</del></span>', | |||
), | ||||
( | ||||
r5087 | [('A', '', 'hel'), ('B', '', 'lo')], | |||
r1025 | '<span class="A">hel</span><span class="B">lo</span>', | |||
), | ||||
( | ||||
r5087 | [('A', '', 'hel'), ('B', 'ins', 'lo')], | |||
r1025 | '<span class="A">hel</span><span class="B"><ins>lo</ins></span>', | |||
), | ||||
r1774 | ], ids=no_newline_id_generator) | |||
r1025 | def test_render_tokenstream_with_ops(self, tokenstream, output): | |||
html = render_tokenstream(tokenstream) | ||||
assert html == output | ||||
@pytest.mark.parametrize('tokenstream,output', [ | ||||
( | ||||
r5087 | [('A', 'hel'), ('A', 'lo')], | |||
r1025 | '<span class="A">hello</span>', | |||
), | ||||
( | ||||
r5087 | [('A', 'hel'), ('A', 'l'), ('A', 'o')], | |||
r1025 | '<span class="A">hello</span>', | |||
), | ||||
( | ||||
r5087 | [('A', 'hel'), ('A', 'l'), ('A', 'o')], | |||
r1025 | '<span class="A">hello</span>', | |||
), | ||||
( | ||||
r5087 | [('A', 'hel'), ('B', 'lo')], | |||
r1025 | '<span class="A">hel</span><span class="B">lo</span>', | |||
), | ||||
( | ||||
r5087 | [('A', 'hel'), ('B', 'lo')], | |||
r1025 | '<span class="A">hel</span><span class="B">lo</span>', | |||
), | ||||
]) | ||||
def test_render_tokenstream_without_ops(self, tokenstream, output): | ||||
html = render_tokenstream(tokenstream) | ||||
assert html == output | ||||