summaryrefslogtreecommitdiffstats
path: root/pgcli/packages/sqlcompletion.py
blob: cb5c51c03416c237fb4309250c46dfc6785029bb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
from __future__ import print_function
import sqlparse
from parseutils import last_word, extract_tables


def suggest_type(full_text, text_before_cursor):
    """Takes the full_text that is typed so far and also the text before the
    cursor to suggest completion type and scope.

    Returns a tuple with a type of entity ('table', 'column' etc) and a scope.
    A scope for a column category will be a list of tables.
    """

    #word_before_cursor = last_word(text_before_cursor,
            #include='all_punctuations')
    word_before_cursor = last_word(text_before_cursor,
            include='most_punctuations')

    # If we've partially typed a word then word_before_cursor won't be an empty
    # string. In that case we want to remove the partially typed string before
    # sending it to the sqlparser. Otherwise the last token will always be the
    # partially typed string which renders the smart completion useless because
    # it will always return the list of keywords as completion.
    if word_before_cursor:
        if word_before_cursor[-1] in ('(', '.'):
            parsed = sqlparse.parse(text_before_cursor)
        else:
            parsed = sqlparse.parse(
                    text_before_cursor[:-len(word_before_cursor)])
    else:
        parsed = sqlparse.parse(text_before_cursor)

    # Need to check if `p` is not empty, since an empty string will result in
    # an empty tuple.
    p = parsed[0] if parsed else None
    last_token = p and p.token_prev(len(p.tokens)) or ''

    return suggest_based_on_last_token(last_token, text_before_cursor, full_text)

def suggest_based_on_last_token(token, text_before_cursor, full_text):
    if isinstance(token, basestring):
        token_v = token
    else:
        token_v = token.value

    if token_v.lower().endswith('('):
        return 'columns', extract_tables(full_text)
    if token_v.lower() in ('set', 'by', 'distinct'):
        return 'columns', extract_tables(full_text)
    elif token_v.lower() in ('select', 'where', 'having'):
        return 'columns-and-functions', extract_tables(full_text)
    elif token_v.lower() in ('from', 'update', 'into', 'describe'):
        return 'tables', []
    elif token_v in ('d',):  # \d
        return 'tables', []
    elif token_v.lower() in ('c', 'use'):  # \c
        return 'databases', []
    elif token_v.endswith(','):
        prev_keyword = find_prev_keyword(text_before_cursor)
        return suggest_based_on_last_token(prev_keyword, text_before_cursor, full_text)
    elif token_v.endswith('.'):
        tables = extract_tables(full_text, include_alias=True)
        return 'columns', [tables.get(token.token_first().value)]
    else:
        return 'keywords', []

def find_prev_keyword(sql):
    if not sql.strip():
        return None

    for t in reversed(list(sqlparse.parse(sql)[0].flatten())):
        if t.is_keyword or t.value == '(':
            return t.value