summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKian-Meng Ang <kianmeng.ang@gmail.com>2022-10-01 18:34:41 +0800
committerGitHub <noreply@github.com>2022-10-01 03:34:41 -0700
commitd9a73cd8eb66de4e6f75a0c883d6b29f4eea3831 (patch)
tree9f76b2de643859abb1d9fd57f307f79202fd9025
parent930cd9081af01c8f2f4bc506cd0123f3862e11ba (diff)
Fix typos (#1431)
Found via `codespell -L datas`.
-rw-r--r--docs/contributors/README.md2
-rw-r--r--docs/installation/generate.py2
-rw-r--r--extras/profiling/benchmarks.py4
3 files changed, 4 insertions, 4 deletions
diff --git a/docs/contributors/README.md b/docs/contributors/README.md
index 20b3c278..9a739cd5 100644
--- a/docs/contributors/README.md
+++ b/docs/contributors/README.md
@@ -1,3 +1,3 @@
-Here we maintain a database of contributors, from which we generate credits on release blog posts and social medias.
+Here we maintain a database of contributors, from which we generate credits on release blog posts and social media.
For the HTTPie blog see: <https://httpie.io/blog>.
diff --git a/docs/installation/generate.py b/docs/installation/generate.py
index a67389dd..0597a3a4 100644
--- a/docs/installation/generate.py
+++ b/docs/installation/generate.py
@@ -55,7 +55,7 @@ def build_docs_structure(database: Database):
tree = database[KEY_DOC_STRUCTURE]
structure = []
for platform, tools_ids in tree.items():
- assert platform.isalnum(), f'{platform=} must be alpha-numeric for generated links to work'
+ assert platform.isalnum(), f'{platform=} must be alphanumeric for generated links to work'
platform_tools = [tools[tool_id] for tool_id in tools_ids]
structure.append((platform, platform_tools))
return structure
diff --git a/extras/profiling/benchmarks.py b/extras/profiling/benchmarks.py
index c7374f6a..9d409deb 100644
--- a/extras/profiling/benchmarks.py
+++ b/extras/profiling/benchmarks.py
@@ -13,7 +13,7 @@ please run `pyperf system tune` to get even more accurate results.
Examples:
- # Run everything as usual, the default is that we do 3 warmup runs
+ # Run everything as usual, the default is that we do 3 warm-up runs
# and 5 actual runs.
$ python extras/profiling/benchmarks.py
@@ -188,7 +188,7 @@ DownloadRunner('download', '`http --download :/big_file.txt` (3GB)', '3G')
def main() -> None:
# PyPerf will bring it's own argument parser, so configure the script.
# The somewhat fast and also precise enough configuration is this. We run
- # benchmarks 3 times to warmup (e.g especially for download benchmark, this
+ # benchmarks 3 times to warm up (e.g especially for download benchmark, this
# is important). And then 5 actual runs where we record.
sys.argv.extend(
['--worker', '--loops=1', '--warmup=3', '--values=5', '--processes=2']