summaryrefslogtreecommitdiffstats
path: root/crypto/sha
diff options
context:
space:
mode:
authorJosh Soref <jsoref@users.noreply.github.com>2017-11-11 19:03:10 -0500
committerRich Salz <rsalz@openssl.org>2017-11-11 19:03:10 -0500
commit46f4e1bec51dc96fa275c168752aa34359d9ee51 (patch)
treec80b737d1fff479fd88f6c41175187ebad868299 /crypto/sha
parentb4d0fa49d9d1a43792e58b0c8066bb23b9e53ef4 (diff)
Many spelling fixes/typo's corrected.
Around 138 distinct errors found and fixed; thanks! Reviewed-by: Kurt Roeckx <kurt@roeckx.be> Reviewed-by: Tim Hudson <tjh@openssl.org> Reviewed-by: Rich Salz <rsalz@openssl.org> (Merged from https://github.com/openssl/openssl/pull/3459)
Diffstat (limited to 'crypto/sha')
-rw-r--r--crypto/sha/asm/sha1-586.pl4
-rw-r--r--crypto/sha/asm/sha1-mb-x86_64.pl10
-rwxr-xr-xcrypto/sha/asm/sha1-x86_64.pl6
-rw-r--r--crypto/sha/asm/sha256-586.pl2
-rw-r--r--crypto/sha/asm/sha256-mb-x86_64.pl8
-rw-r--r--crypto/sha/asm/sha512-586.pl2
-rw-r--r--crypto/sha/asm/sha512-armv8.pl2
-rwxr-xr-xcrypto/sha/asm/sha512-parisc.pl2
-rwxr-xr-xcrypto/sha/asm/sha512-ppc.pl2
-rwxr-xr-xcrypto/sha/asm/sha512p8-ppc.pl2
-rw-r--r--crypto/sha/keccak1600.c2
-rw-r--r--crypto/sha/sha_locl.h2
12 files changed, 22 insertions, 22 deletions
diff --git a/crypto/sha/asm/sha1-586.pl b/crypto/sha/asm/sha1-586.pl
index c6c977ab03..ca3763991c 100644
--- a/crypto/sha/asm/sha1-586.pl
+++ b/crypto/sha/asm/sha1-586.pl
@@ -35,7 +35,7 @@
# P4 +85%(!) +45%
#
# As you can see Pentium came out as looser:-( Yet I reckoned that
-# improvement on P4 outweights the loss and incorporate this
+# improvement on P4 outweighs the loss and incorporate this
# re-tuned code to 0.9.7 and later.
# ----------------------------------------------------------------
@@ -549,7 +549,7 @@ for($i=0;$i<20-4;$i+=2) {
# being implemented in SSSE3). Once 8 quadruples or 32 elements are
# collected, it switches to routine proposed by Max Locktyukhin.
#
-# Calculations inevitably require temporary reqisters, and there are
+# Calculations inevitably require temporary registers, and there are
# no %xmm registers left to spare. For this reason part of the ring
# buffer, X[2..4] to be specific, is offloaded to 3 quadriples ring
# buffer on the stack. Keep in mind that X[2] is alias X[-6], X[3] -
diff --git a/crypto/sha/asm/sha1-mb-x86_64.pl b/crypto/sha/asm/sha1-mb-x86_64.pl
index 56e15292a7..443b649830 100644
--- a/crypto/sha/asm/sha1-mb-x86_64.pl
+++ b/crypto/sha/asm/sha1-mb-x86_64.pl
@@ -444,7 +444,7 @@ for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
$code.=<<___;
movdqa (%rbx),@Xi[0] # pull counters
mov \$1,%ecx
- cmp 4*0(%rbx),%ecx # examinte counters
+ cmp 4*0(%rbx),%ecx # examine counters
pxor $t2,$t2
cmovge $Tbl,@ptr[0] # cancel input
cmp 4*1(%rbx),%ecx
@@ -1508,10 +1508,10 @@ avx2_handler:
mov -48(%rax),%r15
mov %rbx,144($context) # restore context->Rbx
mov %rbp,160($context) # restore context->Rbp
- mov %r12,216($context) # restore cotnext->R12
- mov %r13,224($context) # restore cotnext->R13
- mov %r14,232($context) # restore cotnext->R14
- mov %r15,240($context) # restore cotnext->R15
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
lea -56-10*16(%rax),%rsi
lea 512($context),%rdi # &context.Xmm6
diff --git a/crypto/sha/asm/sha1-x86_64.pl b/crypto/sha/asm/sha1-x86_64.pl
index 8b7bbfc261..60819f6186 100755
--- a/crypto/sha/asm/sha1-x86_64.pl
+++ b/crypto/sha/asm/sha1-x86_64.pl
@@ -1984,9 +1984,9 @@ ssse3_handler:
mov -40(%rax),%r14
mov %rbx,144($context) # restore context->Rbx
mov %rbp,160($context) # restore context->Rbp
- mov %r12,216($context) # restore cotnext->R12
- mov %r13,224($context) # restore cotnext->R13
- mov %r14,232($context) # restore cotnext->R14
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
.Lcommon_seh_tail:
mov 8(%rax),%rdi
diff --git a/crypto/sha/asm/sha256-586.pl b/crypto/sha/asm/sha256-586.pl
index bc25b2dab5..48830728e7 100644
--- a/crypto/sha/asm/sha256-586.pl
+++ b/crypto/sha/asm/sha256-586.pl
@@ -18,7 +18,7 @@
#
# Performance improvement over compiler generated code varies from
# 10% to 40% [see below]. Not very impressive on some ยต-archs, but
-# it's 5 times smaller and optimizies amount of writes.
+# it's 5 times smaller and optimizes amount of writes.
#
# May 2012.
#
diff --git a/crypto/sha/asm/sha256-mb-x86_64.pl b/crypto/sha/asm/sha256-mb-x86_64.pl
index 24276f9e67..73978dbd81 100644
--- a/crypto/sha/asm/sha256-mb-x86_64.pl
+++ b/crypto/sha/asm/sha256-mb-x86_64.pl
@@ -1508,10 +1508,10 @@ avx2_handler:
mov -48(%rax),%r15
mov %rbx,144($context) # restore context->Rbx
mov %rbp,160($context) # restore context->Rbp
- mov %r12,216($context) # restore cotnext->R12
- mov %r13,224($context) # restore cotnext->R13
- mov %r14,232($context) # restore cotnext->R14
- mov %r15,240($context) # restore cotnext->R15
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
lea -56-10*16(%rax),%rsi
lea 512($context),%rdi # &context.Xmm6
diff --git a/crypto/sha/asm/sha512-586.pl b/crypto/sha/asm/sha512-586.pl
index 35006e8b9f..867ce30b97 100644
--- a/crypto/sha/asm/sha512-586.pl
+++ b/crypto/sha/asm/sha512-586.pl
@@ -42,7 +42,7 @@
# (*) whichever best applicable.
# (**) x86_64 assembler performance is presented for reference
# purposes, the results are for integer-only code.
-# (***) paddq is increadibly slow on Atom.
+# (***) paddq is incredibly slow on Atom.
#
# IALU code-path is optimized for elder Pentiums. On vanilla Pentium
# performance improvement over compiler generated code reaches ~60%,
diff --git a/crypto/sha/asm/sha512-armv8.pl b/crypto/sha/asm/sha512-armv8.pl
index c55efb3085..88ad44ee5e 100644
--- a/crypto/sha/asm/sha512-armv8.pl
+++ b/crypto/sha/asm/sha512-armv8.pl
@@ -35,7 +35,7 @@
# on Cortex-A53 (or by 4 cycles per round).
# (***) Super-impressive coefficients over gcc-generated code are
# indication of some compiler "pathology", most notably code
-# generated with -mgeneral-regs-only is significanty faster
+# generated with -mgeneral-regs-only is significantly faster
# and the gap is only 40-90%.
#
# October 2016.
diff --git a/crypto/sha/asm/sha512-parisc.pl b/crypto/sha/asm/sha512-parisc.pl
index 1469bedbd6..5a082ba623 100755
--- a/crypto/sha/asm/sha512-parisc.pl
+++ b/crypto/sha/asm/sha512-parisc.pl
@@ -773,7 +773,7 @@ foreach (split("\n",$code)) {
s/shd\s+(%r[0-9]+),(%r[0-9]+),([0-9]+)/
$3>31 ? sprintf("shd\t%$2,%$1,%d",$3-32) # rotation for >=32
: sprintf("shd\t%$1,%$2,%d",$3)/e or
- # translate made up instructons: _ror, _shr, _align, _shl
+ # translate made up instructions: _ror, _shr, _align, _shl
s/_ror(\s+)(%r[0-9]+),/
($SZ==4 ? "shd" : "shrpd")."$1$2,$2,"/e or
diff --git a/crypto/sha/asm/sha512-ppc.pl b/crypto/sha/asm/sha512-ppc.pl
index fe95b01509..71699f6637 100755
--- a/crypto/sha/asm/sha512-ppc.pl
+++ b/crypto/sha/asm/sha512-ppc.pl
@@ -26,7 +26,7 @@
#
# (*) 64-bit code in 32-bit application context, which actually is
# on TODO list. It should be noted that for safe deployment in
-# 32-bit *mutli-threaded* context asyncronous signals should be
+# 32-bit *multi-threaded* context asynchronous signals should be
# blocked upon entry to SHA512 block routine. This is because
# 32-bit signaling procedure invalidates upper halves of GPRs.
# Context switch procedure preserves them, but not signaling:-(
diff --git a/crypto/sha/asm/sha512p8-ppc.pl b/crypto/sha/asm/sha512p8-ppc.pl
index 5457c4aa16..93dfef20a9 100755
--- a/crypto/sha/asm/sha512p8-ppc.pl
+++ b/crypto/sha/asm/sha512p8-ppc.pl
@@ -25,7 +25,7 @@
# sha1-ppc.pl and 1.6x slower than aes-128-cbc. Another interesting
# result is degree of computational resources' utilization. POWER8 is
# "massively multi-threaded chip" and difference between single- and
-# maximum multi-process benchmark results tells that utlization is
+# maximum multi-process benchmark results tells that utilization is
# whooping 94%. For sha512-ppc.pl we get [not unimpressive] 84% and
# for sha1-ppc.pl - 73%. 100% means that multi-process result equals
# to single-process one, given that all threads end up on the same
diff --git a/crypto/sha/keccak1600.c b/crypto/sha/keccak1600.c
index d925734a17..e7223486af 100644
--- a/crypto/sha/keccak1600.c
+++ b/crypto/sha/keccak1600.c
@@ -1055,7 +1055,7 @@ static uint64_t BitDeinterleave(uint64_t Ai)
* as blocksize. It is commonly (1600 - 256*n)/8, e.g. 168, 136, 104,
* 72, but can also be (1600 - 448)/8 = 144. All this means that message
* padding and intermediate sub-block buffering, byte- or bitwise, is
- * caller's reponsibility.
+ * caller's responsibility.
*/
size_t SHA3_absorb(uint64_t A[5][5], const unsigned char *inp, size_t len,
size_t r)
diff --git a/crypto/sha/sha_locl.h b/crypto/sha/sha_locl.h
index 93b0f1ac01..4e5a090382 100644
--- a/crypto/sha/sha_locl.h
+++ b/crypto/sha/sha_locl.h
@@ -68,7 +68,7 @@ int HASH_INIT(SHA_CTX *c)
/*
* As pointed out by Wei Dai, F() below can be simplified to the code in
- * F_00_19. Wei attributes these optimisations to Peter Gutmann's SHS code,
+ * F_00_19. Wei attributes these optimizations to Peter Gutmann's SHS code,
* and he attributes it to Rich Schroeppel.
* #define F(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
* I've just become aware of another tweak to be made, again from Wei Dai,