summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorJonathan Slenders <jonathan@slenders.be>2014-10-11 16:19:04 +0200
committerJonathan Slenders <jonathan@slenders.be>2014-12-07 20:40:05 +0100
commit89d428a9888ef3f4fcd1cf7815d7b971bf3ff630 (patch)
tree999187b8ce7b19f9acf1ae3229709b0c489e6c3d /tests
parent43f5ffd62c62b36e8ef41e80107811ee9b892d13 (diff)
- prompt_toolkit.contrib.regular_languages added.
- prompt_toolkit.contrib.pdb added (Python debugger.) - This adds path based completion for strings in pt(i)python. - Some examples and tests added. - Support for multiline toolbars
Diffstat (limited to 'tests')
-rw-r--r--tests/layout_tests/__init__.py55
-rw-r--r--tests/old_tests.py151
-rw-r--r--tests/regular_languages_tests/__init__.py100
-rwxr-xr-xtests/run_tests.py14
4 files changed, 164 insertions, 156 deletions
diff --git a/tests/layout_tests/__init__.py b/tests/layout_tests/__init__.py
new file mode 100644
index 00000000..a0996449
--- /dev/null
+++ b/tests/layout_tests/__init__.py
@@ -0,0 +1,55 @@
+from __future__ import unicode_literals
+
+from prompt_toolkit.layout.utils import fit_tokens_in_size
+from pygments.token import Token
+
+import unittest
+
+
+class FitTokensInSizeTest(unittest.TestCase):
+ def setUp(self):
+ self.tokens = [(Token, 'Hello world'), (Token, '\n'), (Token, 'line2')]
+
+ def test_1(self):
+ result = fit_tokens_in_size(self.tokens, width=5, height=3, default_token=Token)
+
+ self.assertEqual(result, [
+ [(Token, u'H'), (Token, u'e'), (Token, u'l'), (Token, u'l'), (Token, u'o')],
+ [(Token, u'l'), (Token, u'i'), (Token, u'n'), (Token, u'e'), (Token, u'2')],
+ [(Token, u' ')],
+ ])
+
+ def test_2(self):
+ result = fit_tokens_in_size(self.tokens, width=3, height=3, default_token=Token)
+
+ self.assertEqual(result, [
+ [(Token, u'H'), (Token, u'e'), (Token, u'l')],
+ [(Token, u'l'), (Token, u'i'), (Token, u'n')],
+ [(Token, u' ')],
+ ])
+
+ def test_3(self):
+ result = fit_tokens_in_size(self.tokens, width=3, height=2, default_token=Token)
+
+ self.assertEqual(result, [
+ [(Token, u'H'), (Token, u'e'), (Token, u'l')],
+ [(Token, u'l'), (Token, u'i'), (Token, u'n')],
+ ])
+
+ def test_4(self):
+ result = fit_tokens_in_size(self.tokens, width=3, height=1, default_token=Token)
+
+ self.assertEqual(result, [
+ [(Token, u'H'), (Token, u'e'), (Token, u'l')],
+ ])
+
+ def test_5(self):
+ result = fit_tokens_in_size(self.tokens, width=15, height=4, default_token=Token)
+
+ self.assertEqual(result, [
+ [(Token, u'H'), (Token, u'e'), (Token, u'l'), (Token, u'l'), (Token, u'o'), (Token, u' '),
+ (Token, u'w'), (Token, u'o'), (Token, u'r'), (Token, u'l'), (Token, u'd'), (Token, u' ')],
+ [(Token, u'l'), (Token, u'i'), (Token, u'n'), (Token, u'e'), (Token, u'2'), (Token, u' ')],
+ [(Token, u' ' * 15)],
+ [(Token, u' ' * 15)],
+ ])
diff --git a/tests/old_tests.py b/tests/old_tests.py
deleted file mode 100644
index 8a6bcf2c..00000000
--- a/tests/old_tests.py
+++ /dev/null
@@ -1,151 +0,0 @@
-#!/usr/bin/env python
-"""
-
-Old tests. To be cleaned up.
-
-"""
-from __future__ import unicode_literals
-
-import unittest
-
-from prompt_toolkit.contrib.shell.lexer import ShellLexer, TextToken
-from pygments.token import Token
-
-
-class ShellLexerTest(unittest.TestCase):
- def setUp(self):
- self.lexer = ShellLexer(stripnl=False, stripall=False, ensurenl=False)
-
- def test_simple(self):
- t = list(self.lexer.get_tokens('aaa bbb ccc'))
- self.assertEqual(t, [
- (Token.Text, 'aaa'),
- (Token.WhiteSpace, ' '),
- (Token.Text, 'bbb'),
- (Token.WhiteSpace, ' '),
- (Token.Text, 'ccc') ])
-
- def test_complex(self):
- t = list(self.lexer.get_tokens('''a'a 'a " b "bb ccc\\'''))
- # The tokenizer separates text and whitespace, but keeps all the characters.
- self.assertEqual(t, [
- (Token.Text, "a'a 'a"),
- (Token.WhiteSpace, ' '),
- (Token.Text, '" b "bb'),
- (Token.WhiteSpace, ' '),
- (Token.Text, 'ccc\\') ])
-
-
-class TextTokenTest(unittest.TestCase):
- def test_simple(self):
- t = TextToken('hello')
- t.unescaped_text = 'hello'
-
- def test_double_quotes(self):
- t = TextToken('h"e"llo" wor"ld')
- self.assertEqual(t.unescaped_text, 'hello world')
- self.assertEqual(t.inside_double_quotes, False)
- self.assertEqual(t.inside_single_quotes, False)
- self.assertEqual(t.trailing_backslash, False)
-
- def test_single_quotes(self):
- t = TextToken("h'e'llo' wo'rld")
- self.assertEqual(t.unescaped_text, 'hello world')
- self.assertEqual(t.inside_double_quotes, False)
- self.assertEqual(t.inside_single_quotes, False)
- self.assertEqual(t.trailing_backslash, False)
-
- def test_backslashes(self):
- t = TextToken("hello\ wo\\rld")
- self.assertEqual(t.unescaped_text, 'hello world')
- self.assertEqual(t.inside_double_quotes, False)
- self.assertEqual(t.inside_single_quotes, False)
- self.assertEqual(t.trailing_backslash, False)
-
- def test_open_double_quote(self):
- t = TextToken('he"llo world')
- self.assertEqual(t.unescaped_text, 'hello world')
- self.assertEqual(t.inside_double_quotes, True)
- self.assertEqual(t.inside_single_quotes, False)
- self.assertEqual(t.trailing_backslash, False)
-
- def test_open_single_quote(self):
- t = TextToken("he'llo world")
- self.assertEqual(t.unescaped_text, 'hello world')
- self.assertEqual(t.inside_double_quotes, False)
- self.assertEqual(t.inside_single_quotes, True)
- self.assertEqual(t.trailing_backslash, False)
-
- def test_trailing_backslash(self):
- t = TextToken("hello\\ world\\")
- self.assertEqual(t.unescaped_text, 'hello world')
- self.assertEqual(t.inside_double_quotes, False)
- self.assertEqual(t.inside_single_quotes, False)
- self.assertEqual(t.trailing_backslash, True)
-
-#---
-
-from prompt_toolkit.contrib.shell.rules import TokenStream
-
-class TokenStreamTest(unittest.TestCase):
- def test_tokenstream(self):
- s = TokenStream([ 'aaa', 'bbb', 'ccc', ])
-
- # Test top
- self.assertEqual(s.first_token, 'aaa')
- self.assertEqual(s.has_more_tokens, True)
-
- # Pop
- self.assertEqual(s.pop(), 'aaa')
- self.assertEqual(s.first_token, 'bbb')
- self.assertEqual(s.has_more_tokens, True)
-
- # Test restore point
- with s.restore_point:
- self.assertEqual(s.pop(), 'bbb')
- self.assertEqual(s.first_token, 'ccc')
- self.assertEqual(s.pop(), 'ccc')
-
- self.assertEqual(s.has_more_tokens, False)
- self.assertEqual(s.first_token, None)
-
- # State should have been restored after the with block.
- self.assertEqual(s.first_token, 'bbb')
- self.assertEqual(s.has_more_tokens, True)
-
-#--
-
-from prompt_toolkit.contrib.shell.rules import Literal
-from prompt_toolkit.contrib.shell.nodes import LiteralNode
-
-class LiteralTest(unittest.TestCase):
- def setUp(self):
- self.literal = Literal('my-variable', dest='key')
-
- def test_literal_match(self):
- stream = TokenStream([ 'my-variable' ])
- result = list(self.literal.parse(stream))
-
- self.assertEqual(len(result), 1)
- self.assertIsInstance(result[0], LiteralNode)
- self.assertEqual(result[0].rule, self.literal)
- self.assertEqual(result[0]._text, 'my-variable')
- self.assertEqual(result[0].get_variables(), { 'key': 'my-variable' })
-
-# def test_literal_nomatch_suffix(self):
-# stream = TokenStream([ 'my-variable', 'suffix' ])
-# result = list(self.literal.parse(stream))
-#
-# self.assertEqual(len(result), 0)
-
- def test_literal_nomatch_invalid(self):
- stream = TokenStream([ 'invalid' ])
- result = list(self.literal.parse(stream))
-
- self.assertEqual(len(result), 0)
-
-
-#class VariableTest(unittest.TestCase):
-# def setUp(self):
-# self.variable = Variable(placeholder='my-variable', dest='destination')
-
diff --git a/tests/regular_languages_tests/__init__.py b/tests/regular_languages_tests/__init__.py
new file mode 100644
index 00000000..070c0547
--- /dev/null
+++ b/tests/regular_languages_tests/__init__.py
@@ -0,0 +1,100 @@
+from __future__ import unicode_literals
+
+from prompt_toolkit.contrib.regular_languages import compile
+from prompt_toolkit.contrib.regular_languages.compiler import Match, Variables
+from prompt_toolkit.contrib.regular_languages.completion import GrammarCompleter
+from prompt_toolkit.completion import Completer, Completion, CompleteEvent
+from prompt_toolkit.document import Document
+
+import unittest
+
+
+class GrammarTest(unittest.TestCase):
+ def test_simple_match(self):
+ g = compile('hello|world')
+
+ m = g.match('hello')
+ self.assertTrue(isinstance(m, Match))
+
+ m = g.match('world')
+ self.assertTrue(isinstance(m, Match))
+
+ m = g.match('somethingelse')
+ self.assertEqual(m, None)
+
+ def test_variable_varname(self):
+ """
+ Test `Variable` with varname.
+ """
+ g = compile('((?P<varname>hello|world)|test)')
+
+ m = g.match('hello')
+ variables = m.variables()
+ self.assertTrue(isinstance(variables, Variables))
+ self.assertEqual(variables.get('varname'), 'hello')
+ self.assertEqual(variables['varname'], 'hello')
+
+ m = g.match('world')
+ variables = m.variables()
+ self.assertTrue(isinstance(variables, Variables))
+ self.assertEqual(variables.get('varname'), 'world')
+ self.assertEqual(variables['varname'], 'world')
+
+ m = g.match('test')
+ variables = m.variables()
+ self.assertTrue(isinstance(variables, Variables))
+ self.assertEqual(variables.get('varname'), None)
+ self.assertEqual(variables['varname'], None)
+
+ def test_prefix(self):
+ """
+ Test `match_prefix`.
+ """
+ g = compile(r'(hello\ world|something\ else)')
+
+ m = g.match_prefix('hello world')
+ self.assertTrue(isinstance(m, Match))
+
+ m = g.match_prefix('he')
+ self.assertTrue(isinstance(m, Match))
+
+ m = g.match_prefix('')
+ self.assertTrue(isinstance(m, Match))
+
+ m = g.match_prefix('som')
+ self.assertTrue(isinstance(m, Match))
+
+ m = g.match_prefix('hello wor')
+ self.assertTrue(isinstance(m, Match))
+
+ m = g.match_prefix('no-match')
+ self.assertEqual(m, None)
+
+ m = g.match_prefix('ello')
+ self.assertEqual(m, None)
+
+ def test_completer(self):
+ class completer1(Completer):
+ def get_completions(self, document, complete_event):
+ yield Completion('before-%s-after' % document.text, -len(document.text))
+ yield Completion('before-%s-after-B' % document.text, -len(document.text))
+
+ class completer2(Completer):
+ def get_completions(self, document, complete_event):
+ yield Completion('before2-%s-after2' % document.text, -len(document.text))
+ yield Completion('before2-%s-after2-B' % document.text, -len(document.text))
+
+ # Create grammar. "var1" + "whitespace" + "var2"
+ g = compile(r'(?P<var1>[a-z]*) \s+ (?P<var2>[a-z]*)')
+
+ # Test 'get_completions()'
+ completer = GrammarCompleter(g, {'var1': completer1(), 'var2': completer2()})
+ completions = list(completer.get_completions(
+ Document('abc def', len('abc def')),
+ CompleteEvent()))
+
+ self.assertEqual(len(completions), 2)
+ self.assertEqual(completions[0].text, 'before2-def-after2')
+ self.assertEqual(completions[0].start_position, -3)
+ self.assertEqual(completions[1].text, 'before2-def-after2-B')
+ self.assertEqual(completions[1].start_position, -3)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 328a832d..1a1969da 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -1,9 +1,13 @@
#!/usr/bin/env python
-from line_tests import LineTest
-from document_tests import DocumentTest
-from inputstream_tests import InputStreamTest
-from key_binding_tests import KeyBindingTest
-from screen_tests import ScreenTest
+from __future__ import unicode_literals
+
+from line_tests import *
+from document_tests import *
+from inputstream_tests import *
+from key_binding_tests import *
+from screen_tests import *
+from regular_languages_tests import *
+from layout_tests import *
import unittest