summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig63
-rw-r--r--crypto/Makefile7
-rw-r--r--crypto/ahash.c194
-rw-r--r--crypto/api.c8
-rw-r--r--crypto/camellia.c84
-rw-r--r--crypto/crc32c.c128
-rw-r--r--crypto/cryptd.c253
-rw-r--r--crypto/digest.c83
-rw-r--r--crypto/hash.c102
-rw-r--r--crypto/hmac.c16
-rw-r--r--crypto/internal.h1
-rw-r--r--crypto/prng.c410
-rw-r--r--crypto/prng.h27
-rw-r--r--crypto/ripemd.h43
-rw-r--r--crypto/rmd128.c325
-rw-r--r--crypto/rmd160.c369
-rw-r--r--crypto/rmd256.c344
-rw-r--r--crypto/rmd320.c393
-rw-r--r--crypto/tcrypt.c188
-rw-r--r--crypto/tcrypt.h526
20 files changed, 3432 insertions, 132 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 864456c140fe..ea503572fcbe 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -65,6 +65,7 @@ config CRYPTO_NULL
config CRYPTO_CRYPTD
tristate "Software async crypto daemon"
select CRYPTO_BLKCIPHER
+ select CRYPTO_HASH
select CRYPTO_MANAGER
help
This is a generic software asynchronous crypto daemon that
@@ -212,7 +213,7 @@ comment "Digest"
config CRYPTO_CRC32C
tristate "CRC32c CRC algorithm"
- select CRYPTO_ALGAPI
+ select CRYPTO_HASH
select LIBCRC32C
help
Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used
@@ -241,6 +242,57 @@ config CRYPTO_MICHAEL_MIC
should not be used for other purposes because of the weakness
of the algorithm.
+config CRYPTO_RMD128
+ tristate "RIPEMD-128 digest algorithm"
+ select CRYPTO_ALGAPI
+ help
+ RIPEMD-128 (ISO/IEC 10118-3:2004).
+
+ RIPEMD-128 is a 128-bit cryptographic hash function. It should only
+ to be used as a secure replacement for RIPEMD. For other use cases
+ RIPEMD-160 should be used.
+
+ Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
+ See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
+
+config CRYPTO_RMD160
+ tristate "RIPEMD-160 digest algorithm"
+ select CRYPTO_ALGAPI
+ help
+ RIPEMD-160 (ISO/IEC 10118-3:2004).
+
+ RIPEMD-160 is a 160-bit cryptographic hash function. It is intended
+ to be used as a secure replacement for the 128-bit hash functions
+ MD4, MD5 and it's predecessor RIPEMD (not to be confused with RIPEMD-128).
+
+ It's speed is comparable to SHA1 and there are no known attacks against
+ RIPEMD-160.
+
+ Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
+ See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
+
+config CRYPTO_RMD256
+ tristate "RIPEMD-256 digest algorithm"
+ select CRYPTO_ALGAPI
+ help
+ RIPEMD-256 is an optional extension of RIPEMD-128 with a 256 bit hash.
+ It is intended for applications that require longer hash-results, without
+ needing a larger security level (than RIPEMD-128).
+
+ Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
+ See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
+
+config CRYPTO_RMD320
+ tristate "RIPEMD-320 digest algorithm"
+ select CRYPTO_ALGAPI
+ help
+ RIPEMD-320 is an optional extension of RIPEMD-160 with a 320 bit hash.
+ It is intended for applications that require longer hash-results, without
+ needing a larger security level (than RIPEMD-160).
+
+ Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
+ See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
+
config CRYPTO_SHA1
tristate "SHA1 digest algorithm"
select CRYPTO_ALGAPI
@@ -614,6 +666,15 @@ config CRYPTO_LZO
help
This is the LZO algorithm.
+comment "Random Number Generation"
+
+config CRYPTO_PRNG
+ tristate "Pseudo Random Number Generation for Cryptographic modules"
+ help
+ This option enables the generic pseudo random number generator
+ for cryptographic modules. Uses the Algorithm specified in
+ ANSI X9.31 A.2.4
+
source "drivers/crypto/Kconfig"
endif # if CRYPTO
diff --git a/crypto/Makefile b/crypto/Makefile
index ca024418f4fb..ef61b3b64660 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
crypto_hash-objs := hash.o
+crypto_hash-objs += ahash.o
obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o
obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o
@@ -27,6 +28,10 @@ obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
obj-$(CONFIG_CRYPTO_MD4) += md4.o
obj-$(CONFIG_CRYPTO_MD5) += md5.o
+obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o
+obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o
+obj-$(CONFIG_CRYPTO_RMD256) += rmd256.o
+obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o
obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
@@ -64,7 +69,7 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
-
+obj-$(CONFIG_CRYPTO_PRNG) += prng.o
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
#
diff --git a/crypto/ahash.c b/crypto/ahash.c
new file mode 100644
index 000000000000..27128f2c687a
--- /dev/null
+++ b/crypto/ahash.c
@@ -0,0 +1,194 @@
+/*
+ * Asynchronous Cryptographic Hash operations.
+ *
+ * This is the asynchronous version of hash.c with notification of
+ * completion via a callback.
+ *
+ * Copyright (c) 2008 Loc Ho <lho@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+
+#include "internal.h"
+
+static int hash_walk_next(struct crypto_hash_walk *walk)
+{
+ unsigned int alignmask = walk->alignmask;
+ unsigned int offset = walk->offset;
+ unsigned int nbytes = min(walk->entrylen,
+ ((unsigned int)(PAGE_SIZE)) - offset);
+
+ walk->data = crypto_kmap(walk->pg, 0);
+ walk->data += offset;
+
+ if (offset & alignmask)
+ nbytes = alignmask + 1 - (offset & alignmask);
+
+ walk->entrylen -= nbytes;
+ return nbytes;
+}
+
+static int hash_walk_new_entry(struct crypto_hash_walk *walk)
+{
+ struct scatterlist *sg;
+
+ sg = walk->sg;
+ walk->pg = sg_page(sg);
+ walk->offset = sg->offset;
+ walk->entrylen = sg->length;
+
+ if (walk->entrylen > walk->total)
+ walk->entrylen = walk->total;
+ walk->total -= walk->entrylen;
+
+ return hash_walk_next(walk);
+}
+
+int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
+{
+ unsigned int alignmask = walk->alignmask;
+ unsigned int nbytes = walk->entrylen;
+
+ walk->data -= walk->offset;
+
+ if (nbytes && walk->offset & alignmask && !err) {
+ walk->offset += alignmask - 1;
+ walk->offset = ALIGN(walk->offset, alignmask + 1);
+ walk->data += walk->offset;
+
+ nbytes = min(nbytes,
+ ((unsigned int)(PAGE_SIZE)) - walk->offset);
+ walk->entrylen -= nbytes;
+
+ return nbytes;
+ }
+
+ crypto_kunmap(walk->data, 0);
+ crypto_yield(walk->flags);
+
+ if (err)
+ return err;
+
+ walk->offset = 0;
+
+ if (nbytes)
+ return hash_walk_next(walk);
+
+ if (!walk->total)
+ return 0;
+
+ walk->sg = scatterwalk_sg_next(walk->sg);
+
+ return hash_walk_new_entry(walk);
+}
+EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
+
+int crypto_hash_walk_first(struct ahash_request *req,
+ struct crypto_hash_walk *walk)
+{
+ walk->total = req->nbytes;
+
+ if (!walk->total)
+ return 0;
+
+ walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
+ walk->sg = req->src;
+ walk->flags = req->base.flags;
+
+ return hash_walk_new_entry(walk);
+}
+EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
+
+static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct ahash_alg *ahash = crypto_ahash_alg(tfm);
+ unsigned long alignmask = crypto_ahash_alignmask(tfm);
+ int ret;
+ u8 *buffer, *alignbuffer;
+ unsigned long absize;
+
+ absize = keylen + alignmask;
+ buffer = kmalloc(absize, GFP_ATOMIC);
+ if (!buffer)
+ return -ENOMEM;
+
+ alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+ memcpy(alignbuffer, key, keylen);
+ ret = ahash->setkey(tfm, alignbuffer, keylen);
+ memset(alignbuffer, 0, keylen);
+ kfree(buffer);
+ return ret;
+}
+
+static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct ahash_alg *ahash = crypto_ahash_alg(tfm);
+ unsigned long alignmask = crypto_ahash_alignmask(tfm);
+
+ if ((unsigned long)key & alignmask)
+ return ahash_setkey_unaligned(tfm, key, keylen);
+
+ return ahash->setkey(tfm, key, keylen);
+}
+
+static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type,
+ u32 mask)
+{
+ return alg->cra_ctxsize;
+}
+
+static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
+{
+ struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash;
+ struct ahash_tfm *crt = &tfm->crt_ahash;
+
+ if (alg->digestsize > PAGE_SIZE / 8)
+ return -EINVAL;
+
+ crt->init = alg->init;
+ crt->update = alg->update;
+ crt->final = alg->final;
+ crt->digest = alg->digest;
+ crt->setkey = ahash_setkey;
+ crt->digestsize = alg->digestsize;
+
+ return 0;
+}
+
+static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
+ __attribute__ ((unused));
+static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
+{
+ seq_printf(m, "type : ahash\n");
+ seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
+ "yes" : "no");
+ seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
+ seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize);
+}
+
+const struct crypto_type crypto_ahash_type = {
+ .ctxsize = crypto_ahash_ctxsize,
+ .init = crypto_init_ahash_ops,
+#ifdef CONFIG_PROC_FS
+ .show = crypto_ahash_show,
+#endif
+};
+EXPORT_SYMBOL_GPL(crypto_ahash_type);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/api.c b/crypto/api.c
index 0a0f41ef255f..d06e33270abe 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -235,8 +235,12 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
return crypto_init_cipher_ops(tfm);
case CRYPTO_ALG_TYPE_DIGEST:
- return crypto_init_digest_ops(tfm);
-
+ if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) !=
+ CRYPTO_ALG_TYPE_HASH_MASK)
+ return crypto_init_digest_ops_async(tfm);
+ else
+ return crypto_init_digest_ops(tfm);
+
case CRYPTO_ALG_TYPE_COMPRESS:
return crypto_init_compress_ops(tfm);
diff --git a/crypto/camellia.c b/crypto/camellia.c
index 493fee7e0a8b..b1cc4de6493c 100644
--- a/crypto/camellia.c
+++ b/crypto/camellia.c
@@ -35,6 +35,8 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/bitops.h>
+#include <asm/unaligned.h>
static const u32 camellia_sp1110[256] = {
0x70707000,0x82828200,0x2c2c2c00,0xececec00,
@@ -335,20 +337,6 @@ static const u32 camellia_sp4404[256] = {
/*
* macros
*/
-#define GETU32(v, pt) \
- do { \
- /* latest breed of gcc is clever enough to use move */ \
- memcpy(&(v), (pt), 4); \
- (v) = be32_to_cpu(v); \
- } while(0)
-
-/* rotation right shift 1byte */
-#define ROR8(x) (((x) >> 8) + ((x) << 24))
-/* rotation left shift 1bit */
-#define ROL1(x) (((x) << 1) + ((x) >> 31))
-/* rotation left shift 1byte */
-#define ROL8(x) (((x) << 8) + ((x) >> 24))
-
#define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \
do { \
w0 = ll; \
@@ -383,7 +371,7 @@ static const u32 camellia_sp4404[256] = {
^ camellia_sp3033[(u8)(il >> 8)] \
^ camellia_sp4404[(u8)(il )]; \
yl ^= yr; \
- yr = ROR8(yr); \
+ yr = ror32(yr, 8); \
yr ^= yl; \
} while(0)
@@ -405,7 +393,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[7] ^= subL[1]; subR[7] ^= subR[1];
subL[1] ^= subR[1] & ~subR[9];
dw = subL[1] & subL[9],
- subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */
+ subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */
/* round 8 */
subL[11] ^= subL[1]; subR[11] ^= subR[1];
/* round 10 */
@@ -414,7 +402,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[15] ^= subL[1]; subR[15] ^= subR[1];
subL[1] ^= subR[1] & ~subR[17];
dw = subL[1] & subL[17],
- subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */
+ subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */
/* round 14 */
subL[19] ^= subL[1]; subR[19] ^= subR[1];
/* round 16 */
@@ -430,7 +418,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
} else {
subL[1] ^= subR[1] & ~subR[25];
dw = subL[1] & subL[25],
- subR[1] ^= ROL1(dw); /* modified for FLinv(kl6) */
+ subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */
/* round 20 */
subL[27] ^= subL[1]; subR[27] ^= subR[1];
/* round 22 */
@@ -450,7 +438,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[26] ^= kw4l; subR[26] ^= kw4r;
kw4l ^= kw4r & ~subR[24];
dw = kw4l & subL[24],
- kw4r ^= ROL1(dw); /* modified for FL(kl5) */
+ kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */
}
/* round 17 */
subL[22] ^= kw4l; subR[22] ^= kw4r;
@@ -460,7 +448,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[18] ^= kw4l; subR[18] ^= kw4r;
kw4l ^= kw4r & ~subR[16];
dw = kw4l & subL[16],
- kw4r ^= ROL1(dw); /* modified for FL(kl3) */
+ kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */
/* round 11 */
subL[14] ^= kw4l; subR[14] ^= kw4r;
/* round 9 */
@@ -469,7 +457,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[10] ^= kw4l; subR[10] ^= kw4r;
kw4l ^= kw4r & ~subR[8];
dw = kw4l & subL[8],
- kw4r ^= ROL1(dw); /* modified for FL(kl1) */
+ kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */
/* round 5 */
subL[6] ^= kw4l; subR[6] ^= kw4r;
/* round 3 */
@@ -494,7 +482,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(6) = subR[5] ^ subR[7];
tl = subL[10] ^ (subR[10] & ~subR[8]);
dw = tl & subL[8], /* FL(kl1) */
- tr = subR[10] ^ ROL1(dw);
+ tr = subR[10] ^ rol32(dw, 1);
SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */
SUBKEY_R(7) = subR[6] ^ tr;
SUBKEY_L(8) = subL[8]; /* FL(kl1) */
@@ -503,7 +491,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(9) = subR[9];
tl = subL[7] ^ (subR[7] & ~subR[9]);
dw = tl & subL[9], /* FLinv(kl2) */
- tr = subR[7] ^ ROL1(dw);
+ tr = subR[7] ^ rol32(dw, 1);
SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */
SUBKEY_R(10) = tr ^ subR[11];
SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */
@@ -516,7 +504,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(14) = subR[13] ^ subR[15];
tl = subL[18] ^ (subR[18] & ~subR[16]);
dw = tl & subL[16], /* FL(kl3) */
- tr = subR[18] ^ ROL1(dw);
+ tr = subR[18] ^ rol32(dw, 1);
SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */
SUBKEY_R(15) = subR[14] ^ tr;
SUBKEY_L(16) = subL[16]; /* FL(kl3) */
@@ -525,7 +513,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(17) = subR[17];
tl = subL[15] ^ (subR[15] & ~subR[17]);
dw = tl & subL[17], /* FLinv(kl4) */
- tr = subR[15] ^ ROL1(dw);
+ tr = subR[15] ^ rol32(dw, 1);
SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */
SUBKEY_R(18) = tr ^ subR[19];
SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */
@@ -544,7 +532,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
} else {
tl = subL[26] ^ (subR[26] & ~subR[24]);
dw = tl & subL[24], /* FL(kl5) */
- tr = subR[26] ^ ROL1(dw);
+ tr = subR[26] ^ rol32(dw, 1);
SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */
SUBKEY_R(23) = subR[22] ^ tr;
SUBKEY_L(24) = subL[24]; /* FL(kl5) */
@@ -553,7 +541,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(25) = subR[25];
tl = subL[23] ^ (subR[23] & ~subR[25]);
dw = tl & subL[25], /* FLinv(kl6) */
- tr = subR[23] ^ ROL1(dw);
+ tr = subR[23] ^ rol32(dw, 1);
SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */
SUBKEY_R(26) = tr ^ subR[27];
SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */
@@ -573,17 +561,17 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
/* apply the inverse of the last half of P-function */
i = 2;
do {
- dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = ROL8(dw);/* round 1 */
+ dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = rol32(dw, 8);/* round 1 */
SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw;
- dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = ROL8(dw);/* round 2 */
+ dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = rol32(dw, 8);/* round 2 */
SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw;
- dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = ROL8(dw);/* round 3 */
+ dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = rol32(dw, 8);/* round 3 */
SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw;
- dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = ROL8(dw);/* round 4 */
+ dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = rol32(dw, 8);/* round 4 */
SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw;
- dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = ROL8(dw);/* round 5 */
+ dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = rol32(dw, 9);/* round 5 */
SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw;
- dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = ROL8(dw);/* round 6 */
+ dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = rol32(dw, 8);/* round 6 */
SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw;
i += 8;
} while (i < max);
@@ -599,10 +587,10 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey)
/**
* k == kll || klr || krl || krr (|| is concatenation)
*/
- GETU32(kll, key );
- GETU32(klr, key + 4);
- GETU32(krl, key + 8);
- GETU32(krr, key + 12);
+ kll = get_unaligned_be32(key);
+ klr = get_unaligned_be32(key + 4);
+ krl = get_unaligned_be32(key + 8);
+ krr = get_unaligned_be32(key + 12);
/* generate KL dependent subkeys */
/* kw1 */
@@ -707,14 +695,14 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey)
* key = (kll || klr || krl || krr || krll || krlr || krrl || krrr)
* (|| is concatenation)
*/
- GETU32(kll, key );
- GETU32(klr, key + 4);
- GETU32(krl, key + 8);
- GETU32(krr, key + 12);
- GETU32(krll, key + 16);
- GETU32(krlr, key + 20);
- GETU32(krrl, key + 24);
- GETU32(krrr, key + 28);
+ kll = get_unaligned_be32(key);
+ klr = get_unaligned_be32(key + 4);
+ krl = get_unaligned_be32(key + 8);
+ krr = get_unaligned_be32(key + 12);
+ krll = get_unaligned_be32(key + 16);
+ krlr = get_unaligned_be32(key + 20);
+ krrl = get_unaligned_be32(key + 24);
+ krrr = get_unaligned_be32(key + 28);
/* generate KL dependent subkeys */
/* kw1 */
@@ -870,13 +858,13 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
t0 &= ll; \
t2 |= rr; \
rl ^= t2; \
- lr ^= ROL1(t0); \
+ lr ^= rol32(t0, 1); \
t3 = krl; \
t1 = klr; \
t3 &= rl; \
t1 |= lr; \
ll ^= t1; \
- rr ^= ROL1(t3); \
+ rr ^= rol32(t3, 1); \
} while(0)
#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \
@@ -892,7 +880,7 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
il ^= kl; \
ir ^= il ^ kr; \
yl ^= ir; \
- yr ^= ROR8(il) ^ ir; \
+ yr ^= ror32(il, 8) ^ ir; \
} while(0)
/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */
diff --git a/crypto/crc32c.c b/crypto/crc32c.c
index 0dcf64a74e68..a882d9e4e63e 100644
--- a/crypto/crc32c.c
+++ b/crypto/crc32c.c
@@ -5,20 +5,23 @@
*
* This module file is a wrapper to invoke the lib/crc32c routines.
*
+ * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
+
+#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
-#include <linux/crypto.h>
#include <linux/crc32c.h>
#include <linux/kernel.h>
-#define CHKSUM_BLOCK_SIZE 32
+#define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4
struct chksum_ctx {
@@ -71,7 +74,7 @@ static void chksum_final(struct crypto_tfm *tfm, u8 *out)
*(__le32 *)out = ~cpu_to_le32(mctx->crc);
}
-static int crc32c_cra_init(struct crypto_tfm *tfm)
+static int crc32c_cra_init_old(struct crypto_tfm *tfm)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
@@ -79,14 +82,14 @@ static int crc32c_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static struct crypto_alg alg = {
+static struct crypto_alg old_alg = {
.cra_name = "crc32c",
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct chksum_ctx),
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(alg.cra_list),
- .cra_init = crc32c_cra_init,
+ .cra_list = LIST_HEAD_INIT(old_alg.cra_list),
+ .cra_init = crc32c_cra_init_old,
.cra_u = {
.digest = {
.dia_digestsize= CHKSUM_DIGEST_SIZE,
@@ -98,14 +101,125 @@ static struct crypto_alg alg = {
}
};
+/*
+ * Setting the seed allows arbitrary accumulators and flexible XOR policy
+ * If your algorithm starts with ~0, then XOR with ~0 before you set
+ * the seed.
+ */
+static int crc32c_setkey(struct crypto_ahash *hash, const u8 *key,
+ unsigned int keylen)
+{
+ u32 *mctx = crypto_ahash_ctx(hash);
+
+ if (keylen != sizeof(u32)) {
+ crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ *mctx = le32_to_cpup((__le32 *)key);
+ return 0;
+}
+
+static int crc32c_init(struct ahash_request *req)
+{
+ u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ u32 *crcp = ahash_request_ctx(req);
+
+ *crcp = *mctx;
+ return 0;
+}
+
+static int crc32c_update(struct ahash_request *req)
+{
+ struct crypto_hash_walk walk;
+ u32 *crcp = ahash_request_ctx(req);
+ u32 crc = *crcp;
+ int nbytes;
+
+ for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
+ nbytes = crypto_hash_walk_done(&walk, 0))
+ crc = crc32c(crc, walk.data, nbytes);
+
+ *crcp = crc;
+ return 0;
+}
+
+static int crc32c_final(struct ahash_request *req)
+{
+ u32 *crcp = ahash_request_ctx(req);
+
+ *(__le32 *)req->result = ~cpu_to_le32p(crcp);
+ return 0;
+}
+
+static int crc32c_digest(struct ahash_request *req)
+{
+ struct crypto_hash_walk walk;
+ u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ u32 crc = *mctx;
+ int nbytes;
+
+ for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
+ nbytes = crypto_hash_walk_done(&walk, 0))
+ crc = crc32c(crc, walk.data, nbytes);
+
+ *(__le32 *)req->result = ~cpu_to_le32(crc);
+ return 0;
+}
+
+static int crc32c_cra_init(struct crypto_tfm *tfm)
+{
+ u32 *key = crypto_tfm_ctx(tfm);
+
+ *key = ~0;
+
+ tfm->crt_ahash.reqsize = sizeof(u32);
+
+ return 0;
+}
+
+static struct crypto_alg alg = {
+ .cra_name = "crc32c",
+ .cra_driver_name = "crc32c-generic",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = CHKSUM_BLOCK_SIZE,
+ .cra_alignmask = 3,
+ .cra_ctxsize = sizeof(u32),
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(alg.cra_list),
+ .cra_init = crc32c_cra_init,
+ .cra_type = &crypto_ahash_type,
+ .cra_u = {
+ .ahash = {
+ .digestsize = CHKSUM_DIGEST_SIZE,
+ .setkey = crc32c_setkey,
+ .init = crc32c_init,
+ .update = crc32c_update,
+ .final = crc32c_final,
+ .digest = crc32c_digest,
+ }
+ }
+};
+
static int __init crc32c_mod_init(void)
{
- return crypto_register_alg(&alg);
+ int err;
+
+ err = crypto_register_alg(&old_alg);
+ if (err)
+ return err;
+
+ err = crypto_register_alg(&alg);
+ if (err)
+ crypto_unregister_alg(&old_alg);
+
+ return err;
}
static void __exit crc32c_mod_fini(void)
{
crypto_unregister_alg(&alg);
+ crypto_unregister_alg(&old_alg);
}
module_init(crc32c_mod_init);
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index b150de562057..d29e06b350ff 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -11,6 +11,7 @@
*/
#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -45,6 +46,13 @@ struct cryptd_blkcipher_request_ctx {
crypto_completion_t complete;
};
+struct cryptd_hash_ctx {
+ struct crypto_hash *child;
+};
+
+struct cryptd_hash_request_ctx {
+ crypto_completion_t complete;
+};
static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm)
{
@@ -82,10 +90,8 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
rctx = ablkcipher_request_ctx(req);
- if (unlikely(err == -EINPROGRESS)) {
- rctx->complete(&req->base, err);
- return;
- }
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
desc.tfm = child;
desc.info = req->info;
@@ -95,8 +101,9 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
req->base.complete = rctx->complete;
+out:
local_bh_disable();
- req->base.complete(&req->base, err);
+ rctx->complete(&req->base, err);
local_bh_enable();
}
@@ -261,6 +268,240 @@ out_put_alg:
return inst;
}
+static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+ struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct crypto_spawn *spawn = &ictx->spawn;
+ struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_hash *cipher;
+
+ cipher = crypto_spawn_hash(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ ctx->child = cipher;
+ tfm->crt_ahash.reqsize =
+ sizeof(struct cryptd_hash_request_ctx);
+ return 0;
+}
+
+static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cryptd_state *state = cryptd_get_state(tfm);
+ int active;
+
+ mutex_lock(&state->mutex);
+ active = ahash_tfm_in_queue(&state->queue,
+ __crypto_ahash_cast(tfm));
+ mutex_unlock(&state->mutex);
+
+ BUG_ON(active);
+
+ crypto_free_hash(ctx->child);
+}
+
+static int cryptd_hash_setkey(struct crypto_ahash *parent,
+ const u8 *key, unsigned int keylen)
+{
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
+ struct crypto_hash *child = ctx->child;
+ int err;
+
+ crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_hash_setkey(child, key, keylen);
+ crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
+ return err;
+}
+
+static int cryptd_hash_enqueue(struct ahash_request *req,
+ crypto_completion_t complete)
+{
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cryptd_state *state =
+ cryptd_get_state(crypto_ahash_tfm(tfm));
+ int err;
+
+ rctx->complete = req->base.complete;
+ req->base.complete = complete;
+
+ spin_lock_bh(&state->lock);
+ err = ahash_enqueue_request(&state->queue, req);
+ spin_unlock_bh(&state->lock);
+
+ wake_up_process(state->task);
+ return err;
+}
+
+static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
+{
+ struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+ struct crypto_hash *child = ctx->child;
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx;
+ struct hash_desc desc;
+
+ rctx = ahash_request_ctx(req);
+
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+
+ desc.tfm = child;
+ desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ err = crypto_hash_crt(child)->init(&desc);
+
+ req->base.complete = rctx->complete;
+
+out:
+ local_bh_disable();
+ rctx->complete(&req->base, err);
+ local_bh_enable();
+}
+
+static int cryptd_hash_init_enqueue(struct ahash_request *req)
+{
+ return cryptd_hash_enqueue(req, cryptd_hash_init);
+}
+
+static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
+{
+ struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+ struct crypto_hash *child = ctx->child;
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx;
+ struct hash_desc desc;
+
+ rctx = ahash_request_ctx(req);
+
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+
+ desc.tfm = child;
+ desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ err = crypto_hash_crt(child)->update(&desc,
+ req->src,
+ req->nbytes);
+
+ req->base.complete = rctx->complete;
+
+out:
+ local_bh_disable();
+ rctx->complete(&req->base, err);
+ local_bh_enable();
+}
+
+static int cryptd_hash_update_enqueue(struct ahash_request *req)
+{
+ return cryptd_hash_enqueue(req, cryptd_hash_update);
+}
+
+static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
+{
+ struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+ struct crypto_hash *child = ctx->child;
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx;
+ struct hash_desc desc;
+
+ rctx = ahash_request_ctx(req);
+
+ if (unlikely(err == -EINPROGRESS))