summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAmjith Ramanujam <amjith@newrelic.com>2014-12-24 23:56:12 -0800
committerAmjith Ramanujam <amjith@newrelic.com>2014-12-24 23:56:12 -0800
commit09c5f28abecdfc19fd0667cba48a3aa52da5cc31 (patch)
treec16b3ca58a1a656cf918a41a936f37c359cefda0
parent389777e89c0b006049e40a8484d94b15939b9adf (diff)
First attempt at dot completion and alias detection.
-rw-r--r--pgcli/packages/parseutils.py19
-rw-r--r--pgcli/packages/sqlcompletion.py59
-rw-r--r--tests/test_sqlcompletion.py12
3 files changed, 56 insertions, 34 deletions
diff --git a/pgcli/packages/parseutils.py b/pgcli/packages/parseutils.py
index bc9c6e47..4ae795af 100644
--- a/pgcli/packages/parseutils.py
+++ b/pgcli/packages/parseutils.py
@@ -7,7 +7,7 @@ cleanup_regex = {
# This matches only alphanumerics and underscores.
'alphanum_underscore': re.compile(r'(\w+)$'),
# This matches everything except spaces, parens and comma.
- 'most_punctuations': re.compile(r'([^(),\s]+)$'),
+ 'most_punctuations': re.compile(r'([^\.(),\s]+)$'),
# This matches everything except a space.
'all_punctuations': re.compile('([^\s]+)$'),
}
@@ -93,18 +93,23 @@ def extract_table_identifiers(token_stream):
for item in token_stream:
if isinstance(item, IdentifierList):
for identifier in item.get_identifiers():
- yield identifier.get_real_name()
+ real_name = identifier.get_real_name()
+ yield (real_name, identifier.get_alias() or real_name)
elif isinstance(item, Identifier):
- yield item.get_real_name()
+ real_name = item.get_real_name()
+ yield (real_name, item.get_alias() or real_name)
elif isinstance(item, Function):
- yield item.get_name()
+ yield (item.get_name(), item.get_name())
# It's a bug to check for Keyword here, but in the example
# above some tables names are identified as keywords...
elif item.ttype is Keyword:
- yield item.value
+ yield (item.value, item.value)
-def extract_tables(sql):
+def extract_tables(sql, include_alias=False):
if not sql:
return []
stream = extract_from_part(sqlparse.parse(sql)[0])
- return list(extract_table_identifiers(stream))
+ if include_alias:
+ return dict((alias, t) for t, alias in extract_table_identifiers(stream))
+ else:
+ return [x[0] for x in extract_table_identifiers(stream)]
diff --git a/pgcli/packages/sqlcompletion.py b/pgcli/packages/sqlcompletion.py
index e7726eb9..ae630859 100644
--- a/pgcli/packages/sqlcompletion.py
+++ b/pgcli/packages/sqlcompletion.py
@@ -11,7 +11,6 @@ def suggest_type(full_text, text_before_cursor):
A scope for a column category will be a list of tables.
"""
- tables = extract_tables(full_text)
word_before_cursor = last_word(text_before_cursor,
include='all_punctuations')
@@ -22,45 +21,53 @@ def suggest_type(full_text, text_before_cursor):
# partially typed string which renders the smart completion useless because
# it will always return the list of keywords as completion.
if word_before_cursor:
- parsed = sqlparse.parse(
- text_before_cursor[:-len(word_before_cursor)])
+ if word_before_cursor[-1] in ('.'):
+ parsed = sqlparse.parse(text_before_cursor)
+ else:
+ parsed = sqlparse.parse(
+ text_before_cursor[:-len(word_before_cursor)])
else:
parsed = sqlparse.parse(text_before_cursor)
# Need to check if `p` is not empty, since an empty string will result in
# an empty tuple.
p = parsed[0] if parsed else None
- n = p and len(p.tokens) or 0
- last_token = p and p.token_prev(n) or ''
- last_token_v = last_token.value if last_token else ''
+ last_token = p and p.token_prev(len(p.tokens)) or ''
def is_function_word(word):
- return word and len(word) > 1 and word[-1] == '('
+ return word.endswith('(')
+
if is_function_word(word_before_cursor):
- return ('columns', tables)
+ return ('columns', extract_tables(full_text))
- return (suggest_based_on_last_token(last_token_v, text_before_cursor),
- tables)
+ return suggest_based_on_last_token(last_token, text_before_cursor, full_text)
+def suggest_based_on_last_token(token, text_before_cursor, full_text):
+ if isinstance(token, basestring):
+ token_v = token
+ else:
+ token_v = token.value
-def suggest_based_on_last_token(last_token_v, text_before_cursor):
- if last_token_v.lower().endswith('('):
- return 'columns'
- if last_token_v.lower() in ('set', 'by', 'distinct'):
- return 'columns'
- elif last_token_v.lower() in ('select', 'where', 'having'):
- return 'columns-and-functions'
- elif last_token_v.lower() in ('from', 'update', 'into', 'describe'):
- return 'tables'
- elif last_token_v in ('d',): # \d
- return 'tables'
- elif last_token_v.lower() in ('c', 'use'): # \c
- return 'databases'
- elif last_token_v.endswith(','):
+ if token_v.lower().endswith('('):
+ return 'columns', extract_tables(full_text)
+ if token_v.lower() in ('set', 'by', 'distinct'):
+ return 'columns', extract_tables(full_text)
+ elif token_v.lower() in ('select', 'where', 'having'):
+ return 'columns-and-functions', extract_tables(full_text)
+ elif token_v.lower() in ('from', 'update', 'into', 'describe'):
+ return 'tables', []
+ elif token_v in ('d',): # \d
+ return 'tables', []
+ elif token_v.lower() in ('c', 'use'): # \c
+ return 'databases', []
+ elif token_v.endswith(','):
prev_keyword = find_prev_keyword(text_before_cursor)
- return suggest_based_on_last_token(prev_keyword, text_before_cursor)
+ return suggest_based_on_last_token(prev_keyword, text_before_cursor, full_text)
+ elif token_v.endswith('.'):
+ tables = extract_tables(full_text, include_alias=True)
+ return 'columns', [tables.get(token.token_first().value)]
else:
- return 'keywords'
+ return 'keywords', []
def find_prev_keyword(sql):
if not sql.strip():
diff --git a/tests/test_sqlcompletion.py b/tests/test_sqlcompletion.py
index c4e3efae..394cd697 100644
--- a/tests/test_sqlcompletion.py
+++ b/tests/test_sqlcompletion.py
@@ -33,7 +33,7 @@ def test_col_comma_suggests_cols():
def test_table_comma_suggests_tables():
suggestion = suggest_type('SELECT a, b FROM tbl1, ',
'SELECT a, b FROM tbl1, ')
- assert suggestion == ('tables', ['tbl1'])
+ assert suggestion == ('tables', [])
def test_into_suggests_tables():
suggestion = suggest_type('INSERT INTO ',
@@ -44,3 +44,13 @@ def test_partially_typed_col_name_suggests_col_names():
suggestion = suggest_type('SELECT * FROM tabl WHERE col_n',
'SELECT * FROM tabl WHERE col_n')
assert suggestion == ('columns-and-functions', ['tabl'])
+
+def test_dot_suggests_cols_of_a_table():
+ suggestion = suggest_type('SELECT tabl. FROM tabl',
+ 'SELECT tabl.')
+ assert suggestion == ('columns', ['tabl'])
+
+def test_dot_suggests_cols_of_an_alias():
+ suggestion = suggest_type('SELECT t1. FROM tabl1 t1, tabl2 t2',
+ 'SELECT t1.')
+ assert suggestion == ('columns', ['tabl1'])