From 6025001707fd65679d758c877200469d4e72ea88 Mon Sep 17 00:00:00 2001 From: klemens Date: Fri, 5 Aug 2016 19:56:58 +0200 Subject: spelling fixes, just comments and readme. Reviewed-by: Matt Caswell Reviewed-by: Rich Salz (Merged from https://github.com/openssl/openssl/pull/1413) --- crypto/rc4/asm/rc4-586.pl | 2 +- crypto/rc4/asm/rc4-x86_64.pl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'crypto/rc4') diff --git a/crypto/rc4/asm/rc4-586.pl b/crypto/rc4/asm/rc4-586.pl index 936177778d..7d6f97c59e 100644 --- a/crypto/rc4/asm/rc4-586.pl +++ b/crypto/rc4/asm/rc4-586.pl @@ -157,7 +157,7 @@ if ($alt=0) { &movd ($i>0?"mm1":"mm2",&DWP(0,$dat,$ty,4)); # (*) This is the key to Core2 and Westmere performance. - # Whithout movz out-of-order execution logic confuses + # Without movz out-of-order execution logic confuses # itself and fails to reorder loads and stores. Problem # appears to be fixed in Sandy Bridge... } diff --git a/crypto/rc4/asm/rc4-x86_64.pl b/crypto/rc4/asm/rc4-x86_64.pl index 5ae0c6dd49..aaed2b1e61 100755 --- a/crypto/rc4/asm/rc4-x86_64.pl +++ b/crypto/rc4/asm/rc4-x86_64.pl @@ -57,7 +57,7 @@ # As was shown by Zou Nanhai loop unrolling can improve Intel EM64T # performance by >30% [unlike P4 32-bit case that is]. But this is # provided that loads are reordered even more aggressively! Both code -# pathes, AMD64 and EM64T, reorder loads in essentially same manner +# paths, AMD64 and EM64T, reorder loads in essentially same manner # as my IA-64 implementation. On Opteron this resulted in modest 5% # improvement [I had to test it], while final Intel P4 performance # achieves respectful 432MBps on 2.8GHz processor now. For reference. -- cgit v1.2.3