summaryrefslogtreecommitdiffstats
path: root/include/crypto/cbc.h
blob: 2b6422db42e20af006e9f33d7046f68c9fd1c1fb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
 * CBC: Cipher Block Chaining mode
 *
 * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
 */

#ifndef _CRYPTO_CBC_H
#define _CRYPTO_CBC_H

#include <crypto/internal/skcipher.h>
#include <linux/string.h>
#include <linux/types.h>

static inline int crypto_cbc_encrypt_segment(
	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
{
	unsigned int bsize = crypto_skcipher_blocksize(tfm);
	unsigned int nbytes = walk->nbytes;
	u8 *src = walk->src.virt.addr;
	u8 *dst = walk->dst.virt.addr;
	u8 *iv = walk->iv;

	do {
		crypto_xor(iv, src, bsize);
		fn(tfm, iv, dst);
		memcpy(iv, dst, bsize);

		src += bsize;
		dst += bsize;
	} while ((nbytes -= bsize) >= bsize);

	return nbytes;
}

static inline int crypto_cbc_encrypt_inplace(
	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
{
	unsigned int bsize = crypto_skcipher_blocksize(tfm);
	unsigned int nbytes = walk->nbytes;
	u8 *src = walk->src.virt.addr;
	u8 *iv = walk->iv;

	do {
		crypto_xor(src, iv, bsize);
		fn(tfm, src, src);
		iv = src;

		src += bsize;
	} while ((nbytes -= bsize) >= bsize);

	memcpy(walk->iv, iv, bsize);

	return nbytes;
}

static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req,
					  void (*fn)(struct crypto_skcipher *,
						     const u8 *, u8 *))
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct skcipher_walk walk;
	int err;

	err = skcipher_walk_virt(&walk, req, false);

	while (walk.nbytes) {
		if (walk.src.virt.addr == walk.dst.virt.addr)
			err = crypto_cbc_encrypt_inplace(&walk, tfm, fn);
		else
			err = crypto_cbc_encrypt_segment(&walk, tfm, fn);
		err = skcipher_walk_done(&walk, err);
	}

	return err;
}

static inline int crypto_cbc_decrypt_segment(
	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
{
	unsigned int bsize = crypto_skcipher_blocksize(tfm);
	unsigned int nbytes = walk->nbytes;
	u8 *src = walk->src.virt.addr;
	u8 *dst = walk->dst.virt.addr;
	u8 *iv = walk->iv;

	do {
		fn(tfm, src, dst);
		crypto_xor(dst, iv, bsize);
		iv = src;

		src += bsize;
		dst += bsize;
	} while ((nbytes -= bsize) >= bsize);

	memcpy(walk->iv, iv, bsize);

	return nbytes;
}

static inline int crypto_cbc_decrypt_inplace(
	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
{
	unsigned int bsize = crypto_skcipher_blocksize(tfm);
	unsigned int nbytes = walk->nbytes;
	u8 *src = walk->src.virt.addr;
	u8 last_iv[MAX_CIPHER_BLOCKSIZE];

	/* Start of the last block. */
	src += nbytes - (nbytes & (bsize - 1)) - bsize;
	memcpy(last_iv, src, bsize);

	for (;;) {
		fn(tfm, src, src);
		if ((nbytes -= bsize) < bsize)
			break;
		crypto_xor(src, src - bsize, bsize);
		src -= bsize;
	}

	crypto_xor(src, walk->iv, bsize);
	memcpy(walk->iv, last_iv, bsize);

	return nbytes;
}

static inline int crypto_cbc_decrypt_blocks(
	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
{
	if (walk->src.virt.addr == walk->dst.virt.addr)
		return crypto_cbc_decrypt_inplace(walk, tfm, fn);
	else
		return crypto_cbc_decrypt_segment(walk, tfm, fn);
}

#endif	/* _CRYPTO_CBC_H */
">device *dev) { struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver, driver); if (dev->driver && drv->resume) return drv->resume(sock); return 0; } #else #define tifm_device_suspend NULL #define tifm_device_resume NULL #endif /* CONFIG_PM */ static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); return sprintf(buf, "%x", sock->type); } static DEVICE_ATTR_RO(type); static struct attribute *tifm_dev_attrs[] = { &dev_attr_type.attr, NULL, }; ATTRIBUTE_GROUPS(tifm_dev); static struct bus_type tifm_bus_type = { .name = "tifm", .dev_groups = tifm_dev_groups, .match = tifm_bus_match, .uevent = tifm_uevent, .probe = tifm_device_probe, .remove = tifm_device_remove, .suspend = tifm_device_suspend, .resume = tifm_device_resume }; static void tifm_free(struct device *dev) { struct tifm_adapter *fm = container_of(dev, struct tifm_adapter, dev); kfree(fm); } static struct class tifm_adapter_class = { .name = "tifm_adapter", .dev_release = tifm_free }; struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets, struct device *dev) { struct tifm_adapter *fm; fm = kzalloc(sizeof(struct tifm_adapter) + sizeof(struct tifm_dev*) * num_sockets, GFP_KERNEL); if (fm) { fm->dev.class = &tifm_adapter_class; fm->dev.parent = dev; device_initialize(&fm->dev); spin_lock_init(&fm->lock); fm->num_sockets = num_sockets; } return fm; } EXPORT_SYMBOL(tifm_alloc_adapter); int tifm_add_adapter(struct tifm_adapter *fm) { int rc; idr_preload(GFP_KERNEL); spin_lock(&tifm_adapter_lock); rc = idr_alloc(&tifm_adapter_idr, fm, 0, 0, GFP_NOWAIT); if (rc >= 0) fm->id = rc; spin_unlock(&tifm_adapter_lock); idr_preload_end(); if (rc < 0) return rc; dev_set_name(&fm->dev, "tifm%u", fm->id); rc = device_add(&fm->dev); if (rc) { spin_lock(&tifm_adapter_lock); idr_remove(&tifm_adapter_idr, fm->id); spin_unlock(&tifm_adapter_lock); } return rc; } EXPORT_SYMBOL(tifm_add_adapter); void tifm_remove_adapter(struct tifm_adapter *fm) { unsigned int cnt; flush_workqueue(workqueue); for (cnt = 0; cnt < fm->num_sockets; ++cnt) { if (fm->sockets[cnt]) device_unregister(&fm->sockets[cnt]->dev); } spin_lock(&tifm_adapter_lock); idr_remove(&tifm_adapter_idr, fm->id); spin_unlock(&tifm_adapter_lock); device_del(&fm->dev); } EXPORT_SYMBOL(tifm_remove_adapter); void tifm_free_adapter(struct tifm_adapter *fm) { put_device(&fm->dev); } EXPORT_SYMBOL(tifm_free_adapter); void tifm_free_device(struct device *dev) { struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); kfree(sock); } EXPORT_SYMBOL(tifm_free_device); struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id, unsigned char type) { struct tifm_dev *sock = NULL; if (!tifm_media_type_name(type, 0)) return sock; sock = kzalloc(sizeof