summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorAntoine Cœur <coeur@gmx.fr>2019-07-02 22:29:29 +0800
committerDr. Matthias St. Pierre <Matthias.St.Pierre@ncp-e.com>2019-07-31 19:48:30 +0200
commita5c83db4ae3b3a94cdb88658280c36b6a74377ef (patch)
treee5f8704a5dc631918c9d5404126d6b1e04c538c9 /crypto
parentca33a43fe21ace99a7e689f40a90e3ad42e794a0 (diff)
Fix Typos
CLA: trivial Reviewed-by: Paul Dale <paul.dale@oracle.com> Reviewed-by: Shane Lontis <shane.lontis@oracle.com> Reviewed-by: Matthias St. Pierre <Matthias.St.Pierre@ncp-e.com> (Merged from https://github.com/openssl/openssl/pull/9295)
Diffstat (limited to 'crypto')
-rw-r--r--crypto/aes/asm/aes-s390x.pl4
-rw-r--r--crypto/asn1/a_time.c2
-rw-r--r--crypto/bio/bss_dgram.c2
-rw-r--r--crypto/bn/asm/mips.pl2
-rw-r--r--crypto/bn/bn_div.c2
-rw-r--r--crypto/bn/bn_lcl.h2
-rw-r--r--crypto/dsa/dsa_ossl.c2
-rwxr-xr-xcrypto/ec/asm/ecp_nistz256-x86_64.pl2
-rwxr-xr-xcrypto/ec/asm/x25519-ppc64.pl4
-rw-r--r--crypto/ec/ec_lcl.h2
-rw-r--r--crypto/ec/ecp_nistp521.c2
-rw-r--r--crypto/ec/ecx_meth.c2
-rw-r--r--crypto/init.c2
-rw-r--r--crypto/lhash/lhash.c6
-rw-r--r--crypto/rand/drbg_lib.c2
-rw-r--r--crypto/rand/rand_lib.c2
-rw-r--r--crypto/rsa/rsa_gen.c2
-rw-r--r--crypto/sha/asm/sha512-sparcv9.pl2
-rw-r--r--crypto/sm2/sm2_sign.c4
-rw-r--r--crypto/store/loader_file.c4
20 files changed, 26 insertions, 26 deletions
diff --git a/crypto/aes/asm/aes-s390x.pl b/crypto/aes/asm/aes-s390x.pl
index 0c40059066..933a447b5c 100644
--- a/crypto/aes/asm/aes-s390x.pl
+++ b/crypto/aes/asm/aes-s390x.pl
@@ -38,14 +38,14 @@
# Implement AES_set_[en|de]crypt_key. Key schedule setup is avoided
# for 128-bit keys, if hardware support is detected.
-# Januray 2009.
+# January 2009.
#
# Add support for hardware AES192/256 and reschedule instructions to
# minimize/avoid Address Generation Interlock hazard and to favour
# dual-issue z10 pipeline. This gave ~25% improvement on z10 and
# almost 50% on z9. The gain is smaller on z10, because being dual-
# issue z10 makes it impossible to eliminate the interlock condition:
-# critial path is not long enough. Yet it spends ~24 cycles per byte
+# critical path is not long enough. Yet it spends ~24 cycles per byte
# processed with 128-bit key.
#
# Unlike previous version hardware support detection takes place only
diff --git a/crypto/asn1/a_time.c b/crypto/asn1/a_time.c
index 1babb96360..25c060cf8e 100644
--- a/crypto/asn1/a_time.c
+++ b/crypto/asn1/a_time.c
@@ -67,7 +67,7 @@ static void determine_days(struct tm *tm)
}
c = y / 100;
y %= 100;
- /* Zeller's congruance */
+ /* Zeller's congruence */
tm->tm_wday = (d + (13 * m) / 5 + y + y / 4 + c / 4 + 5 * c + 6) % 7;
}
diff --git a/crypto/bio/bss_dgram.c b/crypto/bio/bss_dgram.c
index d5fe5bb5a8..26441c9ddf 100644
--- a/crypto/bio/bss_dgram.c
+++ b/crypto/bio/bss_dgram.c
@@ -784,7 +784,7 @@ static long dgram_ctrl(BIO *b, int cmd, long num, void *ptr)
* reasons. When BIO_CTRL_DGRAM_SET_PEEK_MODE was first defined its value
* was incorrectly clashing with BIO_CTRL_DGRAM_SCTP_SET_IN_HANDSHAKE. The
* value has been updated to a non-clashing value. However to preserve
- * binary compatiblity we now respond to both the old value and the new one
+ * binary compatibility we now respond to both the old value and the new one
*/
case BIO_CTRL_DGRAM_SCTP_SET_IN_HANDSHAKE:
case BIO_CTRL_DGRAM_SET_PEEK_MODE:
diff --git a/crypto/bn/asm/mips.pl b/crypto/bn/asm/mips.pl
index 38b796e375..a205189eb6 100644
--- a/crypto/bn/asm/mips.pl
+++ b/crypto/bn/asm/mips.pl
@@ -801,7 +801,7 @@ $code.=<<___;
#if 0
/*
* The bn_div_3_words entry point is re-used for constant-time interface.
- * Implementation is retained as hystorical reference.
+ * Implementation is retained as historical reference.
*/
.align 5
.globl bn_div_3_words
diff --git a/crypto/bn/bn_div.c b/crypto/bn/bn_div.c
index 3a6fa0a1b1..44948c99b2 100644
--- a/crypto/bn/bn_div.c
+++ b/crypto/bn/bn_div.c
@@ -258,7 +258,7 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
*
* - availability of constant-time bn_div_3_words;
* - dividend is at least as "wide" as divisor, limb-wise, zero-padded
- * if so requied, which shouldn't be a privacy problem, because
+ * if so required, which shouldn't be a privacy problem, because
* divisor's length is considered public;
*/
int bn_div_fixed_top(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num,
diff --git a/crypto/bn/bn_lcl.h b/crypto/bn/bn_lcl.h
index 8a36db2e8b..6ec39ed41e 100644
--- a/crypto/bn/bn_lcl.h
+++ b/crypto/bn/bn_lcl.h
@@ -295,7 +295,7 @@ struct bn_gencb_st {
(b) > 23 ? 3 : 1)
/*
- * BN_mod_exp_mont_conttime is based on the assumption that the L1 data cache
+ * BN_mod_exp_mont_consttime is based on the assumption that the L1 data cache
* line width of the target processor is at least the following value.
*/
# define MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH ( 64 )
diff --git a/crypto/dsa/dsa_ossl.c b/crypto/dsa/dsa_ossl.c
index 9361fbdf0c..16161dcadf 100644
--- a/crypto/dsa/dsa_ossl.c
+++ b/crypto/dsa/dsa_ossl.c
@@ -256,7 +256,7 @@ static int dsa_sign_setup(DSA *dsa, BN_CTX *ctx_in,
* one bit longer than the modulus.
*
* There are some concerns about the efficacy of doing this. More
- * specificly refer to the discussion starting with:
+ * specifically refer to the discussion starting with:
* https://github.com/openssl/openssl/pull/7486#discussion_r228323705
* The fix is to rework BN so these gymnastics aren't required.
*/
diff --git a/crypto/ec/asm/ecp_nistz256-x86_64.pl b/crypto/ec/asm/ecp_nistz256-x86_64.pl
index 87149e7f68..10ccc6414a 100755
--- a/crypto/ec/asm/ecp_nistz256-x86_64.pl
+++ b/crypto/ec/asm/ecp_nistz256-x86_64.pl
@@ -1301,7 +1301,7 @@ ecp_nistz256_ord_mul_montx:
################################# reduction
mulx 8*0+128(%r14), $t0, $t1
- adcx $t0, $acc3 # guranteed to be zero
+ adcx $t0, $acc3 # guaranteed to be zero
adox $t1, $acc4
mulx 8*1+128(%r14), $t0, $t1
diff --git a/crypto/ec/asm/x25519-ppc64.pl b/crypto/ec/asm/x25519-ppc64.pl
index 3773cb27cd..bd9bf8ab11 100755
--- a/crypto/ec/asm/x25519-ppc64.pl
+++ b/crypto/ec/asm/x25519-ppc64.pl
@@ -451,7 +451,7 @@ x25519_fe64_tobytes:
and $t0,$t0,$t1
sldi $a3,$a3,1
add $t0,$t0,$t1 # compare to modulus in the same go
- srdi $a3,$a3,1 # most signifcant bit cleared
+ srdi $a3,$a3,1 # most significant bit cleared
addc $a0,$a0,$t0
addze $a1,$a1
@@ -462,7 +462,7 @@ x25519_fe64_tobytes:
sradi $t0,$a3,63 # most significant bit -> mask
sldi $a3,$a3,1
andc $t0,$t1,$t0
- srdi $a3,$a3,1 # most signifcant bit cleared
+ srdi $a3,$a3,1 # most significant bit cleared
subi $rp,$rp,1
subfc $a0,$t0,$a0
diff --git a/crypto/ec/ec_lcl.h b/crypto/ec/ec_lcl.h
index 119255f1dc..e4189d7328 100644
--- a/crypto/ec/ec_lcl.h
+++ b/crypto/ec/ec_lcl.h
@@ -154,7 +154,7 @@ struct ec_method_st {
int (*field_div) (const EC_GROUP *, BIGNUM *r, const BIGNUM *a,
const BIGNUM *b, BN_CTX *);
/*-
- * 'field_inv' computes the multipicative inverse of a in the field,
+ * 'field_inv' computes the multiplicative inverse of a in the field,
* storing the result in r.
*
* If 'a' is zero (or equivalent), you'll get an EC_R_CANNOT_INVERT error.
diff --git a/crypto/ec/ecp_nistp521.c b/crypto/ec/ecp_nistp521.c
index e31b85c5f7..1e45f1eec5 100644
--- a/crypto/ec/ecp_nistp521.c
+++ b/crypto/ec/ecp_nistp521.c
@@ -1269,7 +1269,7 @@ static void point_add(felem x3, felem y3, felem z3,
* ffffffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb
* 71e913863f7, in that case the penultimate intermediate is -9G and
* the final digit is also -9G. Since this only happens for a single
- * scalar, the timing leak is irrelevent. (Any attacker who wanted to
+ * scalar, the timing leak is irrelevant. (Any attacker who wanted to
* check whether a secret scalar was that exact value, can already do
* so.)
*/
diff --git a/crypto/ec/ecx_meth.c b/crypto/ec/ecx_meth.c
index e4cac99e2d..c87419b5db 100644
--- a/crypto/ec/ecx_meth.c
+++ b/crypto/ec/ecx_meth.c
@@ -532,7 +532,7 @@ static int ecd_item_sign25519(EVP_MD_CTX *ctx, const ASN1_ITEM *it, void *asn,
X509_ALGOR_set0(alg1, OBJ_nid2obj(NID_ED25519), V_ASN1_UNDEF, NULL);
if (alg2)
X509_ALGOR_set0(alg2, OBJ_nid2obj(NID_ED25519), V_ASN1_UNDEF, NULL);
- /* Algorithm idetifiers set: carry on as normal */
+ /* Algorithm identifiers set: carry on as normal */
return 3;
}
diff --git a/crypto/init.c b/crypto/init.c
index 62626a707e..46839e768f 100644
--- a/crypto/init.c
+++ b/crypto/init.c
@@ -40,7 +40,7 @@ static int stopped = 0;
* destructor for threads terminating before libcrypto is initialized or
* after it's de-initialized. Access to the key doesn't have to be
* serialized for the said threads, because they didn't use libcrypto
- * and it doesn't matter if they pick "impossible" or derefernce real
+ * and it doesn't matter if they pick "impossible" or dereference real
* key value and pull NULL past initialization in the first thread that
* intends to use libcrypto.
*/
diff --git a/crypto/lhash/lhash.c b/crypto/lhash/lhash.c
index 8d9f933df3..ed66f5af5c 100644
--- a/crypto/lhash/lhash.c
+++ b/crypto/lhash/lhash.c
@@ -19,14 +19,14 @@
/*
* A hashing implementation that appears to be based on the linear hashing
- * alogrithm:
+ * algorithm:
* https://en.wikipedia.org/wiki/Linear_hashing
*
* Litwin, Witold (1980), "Linear hashing: A new tool for file and table
* addressing", Proc. 6th Conference on Very Large Databases: 212-223
- * http://hackthology.com/pdfs/Litwin-1980-Linear_Hashing.pdf
+ * https://hackthology.com/pdfs/Litwin-1980-Linear_Hashing.pdf
*
- * From the wikipedia article "Linear hashing is used in the BDB Berkeley
+ * From the Wikipedia article "Linear hashing is used in the BDB Berkeley
* database system, which in turn is used by many software systems such as
* OpenLDAP, using a C implementation derived from the CACM article and first
* published on the Usenet in 1988 by Esmond Pitt."
diff --git a/crypto/rand/drbg_lib.c b/crypto/rand/drbg_lib.c
index df1e260261..7d0c8027fd 100644
--- a/crypto/rand/drbg_lib.c
+++ b/crypto/rand/drbg_lib.c
@@ -318,7 +318,7 @@ int RAND_DRBG_instantiate(RAND_DRBG *drbg,
/*
* NIST SP800-90Ar1 section 9.1 says you can combine getting the entropy
* and nonce in 1 call by increasing the entropy with 50% and increasing
- * the minimum length to accomadate the length of the nonce.
+ * the minimum length to accommodate the length of the nonce.
* We do this in case a nonce is require and get_nonce is NULL.
*/
if (drbg->min_noncelen > 0 && drbg->get_nonce == NULL) {
diff --git a/crypto/rand/rand_lib.c b/crypto/rand/rand_lib.c
index 48da2b9539..23bb2e68b1 100644
--- a/crypto/rand/rand_lib.c
+++ b/crypto/rand/rand_lib.c
@@ -367,7 +367,7 @@ void rand_cleanup_int(void)
}
/*
- * RAND_close_seed_files() ensures that any seed file decriptors are
+ * RAND_close_seed_files() ensures that any seed file descriptors are
* closed after use.
*/
void RAND_keep_random_devices_open(int keep)
diff --git a/crypto/rsa/rsa_gen.c b/crypto/rsa/rsa_gen.c
index 4997a632f2..2b81808860 100644
--- a/crypto/rsa/rsa_gen.c
+++ b/crypto/rsa/rsa_gen.c
@@ -250,7 +250,7 @@ static int rsa_builtin_keygen(RSA *rsa, int bits, int primes, BIGNUM *e_value,
*
* This strategy has the following goals:
*
- * 1. 1024-bit factors are effcient when using 3072 and 4096-bit key
+ * 1. 1024-bit factors are efficient when using 3072 and 4096-bit key
* 2. stay the same logic with normal 2-prime key
*/
bitse -= bitsr[i];
diff --git a/crypto/sha/asm/sha512-sparcv9.pl b/crypto/sha/asm/sha512-sparcv9.pl
index 4432bda65a..b4f92002f1 100644
--- a/crypto/sha/asm/sha512-sparcv9.pl
+++ b/crypto/sha/asm/sha512-sparcv9.pl
@@ -27,7 +27,7 @@
# over 2x than 32-bit code. X[16] resides on stack, but access to it
# is scheduled for L2 latency and staged through 32 least significant
# bits of %l0-%l7. The latter is done to achieve 32-/64-bit ABI
-# duality. Nevetheless it's ~40% faster than SHA256, which is pretty
+# duality. Nevertheless it's ~40% faster than SHA256, which is pretty
# good [optimal coefficient is 50%].
#
# SHA512 on UltraSPARC T1.
diff --git a/crypto/sm2/sm2_sign.c b/crypto/sm2/sm2_sign.c
index 0f9c14cb5f..de33607761 100644
--- a/crypto/sm2/sm2_sign.c
+++ b/crypto/sm2/sm2_sign.c
@@ -313,12 +313,12 @@ static int sm2_sig_verify(const EC_KEY *key, const ECDSA_SIG *sig,
/*
* B1: verify whether r' in [1,n-1], verification failed if not
- * B2: vefify whether s' in [1,n-1], verification failed if not
+ * B2: verify whether s' in [1,n-1], verification failed if not
* B3: set M'~=ZA || M'
* B4: calculate e'=Hv(M'~)
* B5: calculate t = (r' + s') modn, verification failed if t=0
* B6: calculate the point (x1', y1')=[s']G + [t]PA
- * B7: calculate R=(e'+x1') modn, verfication pass if yes, otherwise failed
+ * B7: calculate R=(e'+x1') modn, verification pass if yes, otherwise failed
*/
ECDSA_SIG_get0(sig, &r, &s);
diff --git a/crypto/store/loader_file.c b/crypto/store/loader_file.c
index 1ebbe9fe7a..41e25457e5 100644
--- a/crypto/store/loader_file.c
+++ b/crypto/store/loader_file.c
@@ -172,7 +172,7 @@ typedef OSSL_STORE_INFO *(*file_try_decode_fn)(const char *pem_name,
typedef int (*file_eof_fn)(void *handler_ctx);
/*
* The destroy_ctx function is used to destroy the handler_ctx that was
- * intiated by a repeatable try_decode fuction. This is only used when
+ * initiated by a repeatable try_decode function. This is only used when
* the handler is marked repeatable.
*/
typedef void (*file_destroy_ctx_fn)(void **handler_ctx);
@@ -470,7 +470,7 @@ static FILE_HANDLER PrivateKey_handler = {
};
/*
- * Public key decoder. Only supports SubjectPublicKeyInfo formated keys.
+ * Public key decoder. Only supports SubjectPublicKeyInfo formatted keys.
*/
static OSSL_STORE_INFO *try_decode_PUBKEY(const char *pem_name,
const char *pem_header,