summaryrefslogtreecommitdiffstats
path: root/pgcli
diff options
context:
space:
mode:
authorMiroslav Šedivý <6774676+eumiro@users.noreply.github.com>2021-02-12 20:34:56 +0100
committerGitHub <noreply@github.com>2021-02-12 21:34:56 +0200
commit762fb4b8da98fdf6792e6c5586060ed37224f894 (patch)
treedca17819ff2a516988e2f74691dcef0554637464 /pgcli
parent87ffae295edf4fb2a9c33c552b12f09921def29f (diff)
Modernize code to Python 3.6+ (#1229)
1. `class A(object)` can be written as `class A:` 2. replace `dict([…])` and `set([…])` with `{…}` 3. use f-strings or compact `.format` 4. use `yield from` instead of `yield` in a `for` loop 5. import `mock` from `unittest` 6. expect `OSError` instead of `IOError` or `select` error 7. use Python3 defaults for file reading or `super()` 8. remove redundant parenthesis (keep those in tuples though) 9. shorten set intersection instead of creating lists 10. backslashes in strings do not have to be escaped if prepended with `r`
Diffstat (limited to 'pgcli')
-rw-r--r--pgcli/completion_refresher.py4
-rw-r--r--pgcli/magic.py2
-rw-r--r--pgcli/main.py28
-rw-r--r--pgcli/packages/parseutils/meta.py2
-rw-r--r--pgcli/packages/parseutils/tables.py3
-rw-r--r--pgcli/packages/prioritization.py4
-rw-r--r--pgcli/packages/sqlcompletion.py2
-rw-r--r--pgcli/pgcompleter.py32
-rw-r--r--pgcli/pgexecute.py29
9 files changed, 46 insertions, 60 deletions
diff --git a/pgcli/completion_refresher.py b/pgcli/completion_refresher.py
index cf0879fd..3e847b09 100644
--- a/pgcli/completion_refresher.py
+++ b/pgcli/completion_refresher.py
@@ -6,7 +6,7 @@ from .pgcompleter import PGCompleter
from .pgexecute import PGExecute
-class CompletionRefresher(object):
+class CompletionRefresher:
refreshers = OrderedDict()
@@ -141,7 +141,7 @@ def refresh_casing(completer, executor):
with open(casing_file, "w") as f:
f.write(casing_prefs)
if os.path.isfile(casing_file):
- with open(casing_file, "r") as f:
+ with open(casing_file) as f:
completer.extend_casing([line.strip() for line in f])
diff --git a/pgcli/magic.py b/pgcli/magic.py
index f58f4150..c2b60aa0 100644
--- a/pgcli/magic.py
+++ b/pgcli/magic.py
@@ -43,7 +43,7 @@ def pgcli_line_magic(line):
conn._pgcli = pgcli
# For convenience, print the connection alias
- print("Connected: {}".format(conn.name))
+ print(f"Connected: {conn.name}")
try:
pgcli.run_cli()
diff --git a/pgcli/main.py b/pgcli/main.py
index b1468985..7ab973a2 100644
--- a/pgcli/main.py
+++ b/pgcli/main.py
@@ -122,7 +122,7 @@ class PgCliQuitError(Exception):
pass
-class PGCli(object):
+class PGCli:
default_prompt = "\\u@\\h:\\d> "
max_len_prompt = 30
@@ -325,11 +325,11 @@ class PGCli(object):
if pattern not in TabularOutputFormatter().supported_formats:
raise ValueError()
self.table_format = pattern
- yield (None, None, None, "Changed table format to {}".format(pattern))
+ yield (None, None, None, f"Changed table format to {pattern}")
except ValueError:
- msg = "Table format {} not recognized. Allowed formats:".format(pattern)
+ msg = f"Table format {pattern} not recognized. Allowed formats:"
for table_type in TabularOutputFormatter().supported_formats:
- msg += "\n\t{}".format(table_type)
+ msg += f"\n\t{table_type}"
msg += "\nCurrently set to: %s" % self.table_format
yield (None, None, None, msg)
@@ -386,7 +386,7 @@ class PGCli(object):
try:
with open(os.path.expanduser(pattern), encoding="utf-8") as f:
query = f.read()
- except IOError as e:
+ except OSError as e:
return [(None, None, None, str(e), "", False, True)]
if self.destructive_warning and confirm_destructive_query(query) is False:
@@ -407,7 +407,7 @@ class PGCli(object):
if not os.path.isfile(filename):
try:
open(filename, "w").close()
- except IOError as e:
+ except OSError as e:
self.output_file = None
message = str(e) + "\nFile output disabled"
return [(None, None, None, message, "", False, True)]
@@ -479,7 +479,7 @@ class PGCli(object):
service_config, file = parse_service_info(service)
if service_config is None:
click.secho(
- "service '%s' was not found in %s" % (service, file), err=True, fg="red"
+ f"service '{service}' was not found in {file}", err=True, fg="red"
)
exit(1)
self.connect(
@@ -515,7 +515,7 @@ class PGCli(object):
passwd = os.environ.get("PGPASSWORD", "")
# Find password from store
- key = "%s@%s" % (user, host)
+ key = f"{user}@{host}"
keyring_error_message = dedent(
"""\
{}
@@ -677,7 +677,7 @@ class PGCli(object):
click.echo(text, file=f)
click.echo("\n".join(output), file=f)
click.echo("", file=f) # extra newline
- except IOError as e:
+ except OSError as e:
click.secho(str(e), err=True, fg="red")
else:
if output:
@@ -753,11 +753,7 @@ class PGCli(object):
while self.watch_command:
try:
query = self.execute_command(self.watch_command)
- click.echo(
- "Waiting for {0} seconds before repeating".format(
- timing
- )
- )
+ click.echo(f"Waiting for {timing} seconds before repeating")
sleep(timing)
except KeyboardInterrupt:
self.watch_command = None
@@ -1049,7 +1045,7 @@ class PGCli(object):
str(self.pgexecute.port) if self.pgexecute.port is not None else "5432",
)
string = string.replace("\\i", str(self.pgexecute.pid) or "(none)")
- string = string.replace("\\#", "#" if (self.pgexecute.superuser) else ">")
+ string = string.replace("\\#", "#" if self.pgexecute.superuser else ">")
string = string.replace("\\n", "\n")
return string
@@ -1384,7 +1380,7 @@ def is_mutating(status):
if not status:
return False
- mutating = set(["insert", "update", "delete"])
+ mutating = {"insert", "update", "delete"}
return status.split(None, 1)[0].lower() in mutating
diff --git a/pgcli/packages/parseutils/meta.py b/pgcli/packages/parseutils/meta.py
index 108c01a3..333cab55 100644
--- a/pgcli/packages/parseutils/meta.py
+++ b/pgcli/packages/parseutils/meta.py
@@ -50,7 +50,7 @@ def parse_defaults(defaults_string):
yield current
-class FunctionMetadata(object):
+class FunctionMetadata:
def __init__(
self,
schema_name,
diff --git a/pgcli/packages/parseutils/tables.py b/pgcli/packages/parseutils/tables.py
index 0ec3e693..aaa676cc 100644
--- a/pgcli/packages/parseutils/tables.py
+++ b/pgcli/packages/parseutils/tables.py
@@ -42,8 +42,7 @@ def extract_from_part(parsed, stop_at_punctuation=True):
for item in parsed.tokens:
if tbl_prefix_seen:
if is_subselect(item):
- for x in extract_from_part(item, stop_at_punctuation):
- yield x
+ yield from extract_from_part(item, stop_at_punctuation)
elif stop_at_punctuation and item.ttype is Punctuation:
return
# An incomplete nested select won't be recognized correctly as a
diff --git a/pgcli/packages/prioritization.py b/pgcli/packages/prioritization.py
index e92dcbb6..f5a9cb57 100644
--- a/pgcli/packages/prioritization.py
+++ b/pgcli/packages/prioritization.py
@@ -16,10 +16,10 @@ def _compile_regex(keyword):
keywords = get_literals("keywords")
-keyword_regexs = dict((kw, _compile_regex(kw)) for kw in keywords)
+keyword_regexs = {kw: _compile_regex(kw) for kw in keywords}
-class PrevalenceCounter(object):
+class PrevalenceCounter:
def __init__(self):
self.keyword_counts = defaultdict(int)
self.name_counts = defaultdict(int)
diff --git a/pgcli/packages/sqlcompletion.py b/pgcli/packages/sqlcompletion.py
index 6ef88595..63053018 100644
--- a/pgcli/packages/sqlcompletion.py
+++ b/pgcli/packages/sqlcompletion.py
@@ -47,7 +47,7 @@ Alias = namedtuple("Alias", ["aliases"])
Path = namedtuple("Path", [])
-class SqlStatement(object):
+class SqlStatement:
def __init__(self, full_text, text_before_cursor):
self.identifier = None
self.word_before_cursor = word_before_cursor = last_word(
diff --git a/pgcli/pgcompleter.py b/pgcli/pgcompleter.py
index 9c95a01c..79be274e 100644
--- a/pgcli/pgcompleter.py
+++ b/pgcli/pgcompleter.py
@@ -83,7 +83,7 @@ class PGCompleter(Completer):
reserved_words = set(get_literals("reserved"))
def __init__(self, smart_completion=True, pgspecial=None, settings=None):
- super(PGCompleter, self).__init__()
+ super().__init__()
self.smart_completion = smart_completion
self.pgspecial = pgspecial
self.prioritizer = PrevalenceCounter()
@@ -177,7 +177,7 @@ class PGCompleter(Completer):
:return:
"""
# casing should be a dict {lowercasename:PreferredCasingName}
- self.casing = dict((word.lower(), word) for word in words)
+ self.casing = {word.lower(): word for word in words}
def extend_relations(self, data, kind):
"""extend metadata for tables or views.
@@ -279,8 +279,8 @@ class PGCompleter(Completer):
fk = ForeignKey(
parentschema, parenttable, parcol, childschema, childtable, childcol
)
- childcolmeta.foreignkeys.append((fk))
- parcolmeta.foreignkeys.append((fk))
+ childcolmeta.foreignkeys.append(fk)
+ parcolmeta.foreignkeys.append(fk)
def extend_datatypes(self, type_data):
@@ -424,7 +424,7 @@ class PGCompleter(Completer):
# the same priority as unquoted names.
lexical_priority = (
tuple(
- 0 if c in (" _") else -ord(c)
+ 0 if c in " _" else -ord(c)
for c in self.unescape_name(item.lower())
)
+ (1,)
@@ -517,9 +517,9 @@ class PGCompleter(Completer):
# require_last_table is used for 'tb11 JOIN tbl2 USING (...' which should
# suggest only columns that appear in the last table and one more
ltbl = tables[-1].ref
- other_tbl_cols = set(
+ other_tbl_cols = {
c.name for t, cs in scoped_cols.items() if t.ref != ltbl for c in cs
- )
+ }
scoped_cols = {
t: [col for col in cols if col.name in other_tbl_cols]
for t, cols in scoped_cols.items()
@@ -574,7 +574,7 @@ class PGCompleter(Completer):
tbls - TableReference iterable of tables already in query
"""
tbl = self.case(tbl)
- tbls = set(normalize_ref(t.ref) for t in tbls)
+ tbls = {normalize_ref(t.ref) for t in tbls}
if self.generate_aliases:
tbl = generate_alias(self.unescape_name(tbl))
if normalize_ref(tbl) not in tbls:
@@ -589,10 +589,10 @@ class PGCompleter(Completer):
tbls = suggestion.table_refs
cols = self.populate_scoped_cols(tbls)
# Set up some data structures for efficient access
- qualified = dict((normalize_ref(t.ref), t.schema) for t in tbls)
- ref_prio = dict((normalize_ref(t.ref), n) for n, t in enumerate(tbls))
- refs = set(normalize_ref(t.ref) for t in tbls)
- other_tbls = set((t.schema, t.name) for t in list(cols)[:-1])
+ qualified = {normalize_ref(t.ref): t.schema for t in tbls}
+ ref_prio = {normalize_ref(t.ref): n for n, t in enumerate(tbls)}
+ refs = {normalize_ref(t.ref) for t in tbls}
+ other_tbls = {(t.schema, t.name) for t in list(cols)[:-1]}
joins = []
# Iterate over FKs in existing tables to find potential joins
fks = (
@@ -667,7 +667,7 @@ class PGCompleter(Completer):
return d
# Tables that are closer to the cursor get higher prio
- ref_prio = dict((tbl.ref, num) for num, tbl in enumerate(suggestion.table_refs))
+ ref_prio = {tbl.ref: num for num, tbl in enumerate(suggestion.table_refs)}
# Map (schema, table, col) to tables
coldict = list_dict(
((t.schema, t.name, c.name), t) for t, c in cols if t.ref != lref
@@ -721,9 +721,7 @@ class PGCompleter(Completer):
# Function overloading means we way have multiple functions of the same
# name at this point, so keep unique names only
all_functions = self.populate_functions(suggestion.schema, filt)
- funcs = set(
- self._make_cand(f, alias, suggestion, arg_mode) for f in all_functions
- )
+ funcs = {self._make_cand(f, alias, suggestion, arg_mode) for f in all_functions}
matches = self.find_matches(word_before_cursor, funcs, meta="function")
@@ -953,7 +951,7 @@ class PGCompleter(Completer):
:return: {TableReference:{colname:ColumnMetaData}}
"""
- ctes = dict((normalize_ref(t.name), t.columns) for t in local_tbls)
+ ctes = {normalize_ref(t.name): t.columns for t in local_tbls}
columns = OrderedDict()
meta = self.dbmetadata
diff --git a/pgcli/pgexecute.py b/pgcli/pgexecute.py
index d34bf26d..5cba7845 100644
--- a/pgcli/pgexecute.py
+++ b/pgcli/pgexecute.py
@@ -49,7 +49,7 @@ def _wait_select(conn):
conn.cancel()
# the loop will be broken by a server error
continue
- except select.error as e:
+ except OSError as e:
errno = e.args[0]
if errno != 4:
raise
@@ -127,7 +127,7 @@ def register_hstore_typecaster(conn):
pass
-class PGExecute(object):
+class PGExecute:
# The boolean argument to the current_schemas function indicates whether
# implicit schemas, e.g. pg_catalog
@@ -485,7 +485,7 @@ class PGExecute(object):
try:
cur.execute(sql, (spec,))
except psycopg2.ProgrammingError:
- raise RuntimeError("View {} does not exist.".format(spec))
+ raise RuntimeError(f"View {spec} does not exist.")
result = cur.fetchone()
view_type = "MATERIALIZED" if result[2] == "m" else ""
return template.format(*result + (view_type,))
@@ -501,7 +501,7 @@ class PGExecute(object):
result = cur.fetchone()
return result[0]
except psycopg2.ProgrammingError:
- raise RuntimeError("Function {} does not exist.".format(spec))
+ raise RuntimeError(f"Function {spec} does not exist.")
def schemata(self):
"""Returns a list of schema names in the database"""
@@ -527,21 +527,18 @@ class PGExecute(object):
sql = cur.mogrify(self.tables_query, [kinds])
_logger.debug("Tables Query. sql: %r", sql)
cur.execute(sql)
- for row in cur:
- yield row
+ yield from cur
def tables(self):
"""Yields (schema_name, table_name) tuples"""
- for row in self._relations(kinds=["r", "p", "f"]):
- yield row
+ yield from self._relations(kinds=["r", "p", "f"])
def views(self):
"""Yields (schema_name, view_name) tuples.
Includes both views and and materialized views
"""
- for row in self._relations(kinds=["v", "m"]):
- yield row
+ yield from self._relations(kinds=["v", "m"])
def _columns(self, kinds=("r", "p", "f", "v", "m")):
"""Get column metadata for tables and views
@@ -599,16 +596,13 @@ class PGExecute(object):
sql = cur.mogrify(columns_query, [kinds])
_logger.debug("Columns Query. sql: %r", sql)
cur.execute(sql)
- for row in cur:
- yield row
+ yield from cur
def table_columns(self):
- for row in self._columns(kinds=["r", "p", "f"]):
- yield row
+ yield from self._columns(kinds=["r", "p", "f"])
def view_columns(self):
- for row in self._columns(kinds=["v", "m"]):
- yield row
+ yield from self._columns(kinds=["v", "m"])
def databases(self):
with self.conn.cursor() as cur:
@@ -804,8 +798,7 @@ class PGExecute(object):
"""
_logger.debug("Datatypes Query. sql: %r", query)
cur.execute(query)
- for row in cur:
- yield row
+ yield from cur
def casing(self):
"""Yields the most common casing for names used in db functions"""