summaryrefslogtreecommitdiffstats
path: root/fs/ext4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-25 14:06:55 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-25 14:06:55 -0700
commitd857da7b70b3a38a846211b30442aad10ce577bd (patch)
treec18aca19c2e9f28ad51d19d1efd051d8318d797f /fs/ext4
parent77d431641e2b402fe98af3540e8fb1c77bf92c25 (diff)
parenta2fd66d069d86d793e9d39d4079b96f46d13f237 (diff)
Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
Pull ext4 updates from Ted Ts'o: "A very large number of cleanups and bug fixes --- in particular for the ext4 encryption patches, which is a new feature added in the last merge window. Also fix a number of long-standing xfstest failures. (Quota writes failing due to ENOSPC, a race between truncate and writepage in data=journalled mode that was causing generic/068 to fail, and other corner cases.) Also add support for FALLOC_FL_INSERT_RANGE, and improve jbd2 performance eliminating locking when a buffer is modified more than once during a transaction (which is very common for allocation bitmaps, for example), in which case the state of the journalled buffer head doesn't need to change" [ I renamed "ext4_follow_link()" to "ext4_encrypted_follow_link()" in the merge resolution, to make it clear that that function is _only_ used for encrypted symlinks. The function doesn't actually work for non-encrypted symlinks at all, and they use the generic helpers - Linus ] * tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (52 commits) ext4: set lazytime on remount if MS_LAZYTIME is set by mount ext4: only call ext4_truncate when size <= isize ext4: make online defrag error reporting consistent ext4: minor cleanup of ext4_da_reserve_space() ext4: don't retry file block mapping on bigalloc fs with non-extent file ext4: prevent ext4_quota_write() from failing due to ENOSPC ext4: call sync_blockdev() before invalidate_bdev() in put_super() jbd2: speedup jbd2_journal_dirty_metadata() jbd2: get rid of open coded allocation retry loop ext4: improve warning directory handling messages jbd2: fix ocfs2 corrupt when updating journal superblock fails ext4: mballoc: avoid 20-argument function call ext4: wait for existing dio workers in ext4_alloc_file_blocks() ext4: recalculate journal credits as inode depth changes jbd2: use GFP_NOFS in jbd2_cleanup_journal_tail() ext4: use swap() in mext_page_double_lock() ext4: use swap() in memswap() ext4: fix race between truncate and __ext4_journalled_writepage() ext4 crypto: fail the mount if blocksize != pagesize ext4: Add support FALLOC_FL_INSERT_RANGE for fallocate ...
Diffstat (limited to 'fs/ext4')
-rw-r--r--fs/ext4/Kconfig1
-rw-r--r--fs/ext4/balloc.c4
-rw-r--r--fs/ext4/crypto.c211
-rw-r--r--fs/ext4/crypto_fname.c490
-rw-r--r--fs/ext4/crypto_key.c152
-rw-r--r--fs/ext4/crypto_policy.c87
-rw-r--r--fs/ext4/dir.c29
-rw-r--r--fs/ext4/ext4.h159
-rw-r--r--fs/ext4/ext4_crypto.h51
-rw-r--r--fs/ext4/extents.c339
-rw-r--r--fs/ext4/file.c19
-rw-r--r--fs/ext4/ialloc.c45
-rw-r--r--fs/ext4/indirect.c4
-rw-r--r--fs/ext4/inline.c31
-rw-r--r--fs/ext4/inode.c93
-rw-r--r--fs/ext4/ioctl.c11
-rw-r--r--fs/ext4/mballoc.c43
-rw-r--r--fs/ext4/move_extent.c19
-rw-r--r--fs/ext4/namei.c542
-rw-r--r--fs/ext4/page-io.c2
-rw-r--r--fs/ext4/readpage.c10
-rw-r--r--fs/ext4/super.c60
-rw-r--r--fs/ext4/symlink.c20
23 files changed, 1227 insertions, 1195 deletions
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 024f2284d3f6..bf8bc8aba471 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -72,6 +72,7 @@ config EXT4_ENCRYPTION
select CRYPTO_ECB
select CRYPTO_XTS
select CRYPTO_CTS
+ select CRYPTO_CTR
select CRYPTO_SHA256
select KEYS
select ENCRYPTED_KEYS
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 955bf49a7945..cd6ea29be645 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -369,7 +369,7 @@ static void ext4_validate_block_bitmap(struct super_block *sb,
struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
struct ext4_sb_info *sbi = EXT4_SB(sb);
- if (buffer_verified(bh))
+ if (buffer_verified(bh) || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
return;
ext4_lock_group(sb, block_group);
@@ -446,7 +446,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
unlock_buffer(bh);
if (err)
ext4_error(sb, "Checksum bad for grp %u", block_group);
- return bh;
+ goto verify;
}
ext4_unlock_group(sb, block_group);
if (buffer_uptodate(bh)) {
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index 8ff15273ab0c..45731558138c 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -55,6 +55,9 @@ static mempool_t *ext4_bounce_page_pool;
static LIST_HEAD(ext4_free_crypto_ctxs);
static DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
+static struct kmem_cache *ext4_crypto_ctx_cachep;
+struct kmem_cache *ext4_crypt_info_cachep;
+
/**
* ext4_release_crypto_ctx() - Releases an encryption context
* @ctx: The encryption context to release.
@@ -68,18 +71,12 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
{
unsigned long flags;
- if (ctx->bounce_page) {
- if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL)
- __free_page(ctx->bounce_page);
- else
- mempool_free(ctx->bounce_page, ext4_bounce_page_pool);
- ctx->bounce_page = NULL;
- }
- ctx->control_page = NULL;
+ if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page)
+ mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool);
+ ctx->w.bounce_page = NULL;
+ ctx->w.control_page = NULL;
if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
- if (ctx->tfm)
- crypto_free_tfm(ctx->tfm);
- kfree(ctx);
+ kmem_cache_free(ext4_crypto_ctx_cachep, ctx);
} else {
spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
@@ -88,23 +85,6 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
}
/**
- * ext4_alloc_and_init_crypto_ctx() - Allocates and inits an encryption context
- * @mask: The allocation mask.
- *
- * Return: An allocated and initialized encryption context on success. An error
- * value or NULL otherwise.
- */
-static struct ext4_crypto_ctx *ext4_alloc_and_init_crypto_ctx(gfp_t mask)
-{
- struct ext4_crypto_ctx *ctx = kzalloc(sizeof(struct ext4_crypto_ctx),
- mask);
-
- if (!ctx)
- return ERR_PTR(-ENOMEM);
- return ctx;
-}
-
-/**
* ext4_get_crypto_ctx() - Gets an encryption context
* @inode: The inode for which we are doing the crypto
*
@@ -118,10 +98,10 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
struct ext4_crypto_ctx *ctx = NULL;
int res = 0;
unsigned long flags;
- struct ext4_encryption_key *key = &EXT4_I(inode)->i_encryption_key;
+ struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
- if (!ext4_read_workqueue)
- ext4_init_crypto();
+ if (ci == NULL)
+ return ERR_PTR(-ENOKEY);
/*
* We first try getting the ctx from a free list because in
@@ -140,50 +120,16 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
list_del(&ctx->free_list);
spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
if (!ctx) {
- ctx = ext4_alloc_and_init_crypto_ctx(GFP_NOFS);
- if (IS_ERR(ctx)) {
- res = PTR_ERR(ctx);
+ ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
+ if (!ctx) {
+ res = -ENOMEM;
goto out;
}
ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
} else {
ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
}
-
- /* Allocate a new Crypto API context if we don't already have
- * one or if it isn't the right mode. */
- BUG_ON(key->mode == EXT4_ENCRYPTION_MODE_INVALID);
- if (ctx->tfm && (ctx->mode != key->mode)) {
- crypto_free_tfm(ctx->tfm);
- ctx->tfm = NULL;
- ctx->mode = EXT4_ENCRYPTION_MODE_INVALID;
- }
- if (!ctx->tfm) {
- switch (key->mode) {
- case EXT4_ENCRYPTION_MODE_AES_256_XTS:
- ctx->tfm = crypto_ablkcipher_tfm(
- crypto_alloc_ablkcipher("xts(aes)", 0, 0));
- break;
- case EXT4_ENCRYPTION_MODE_AES_256_GCM:
- /* TODO(mhalcrow): AEAD w/ gcm(aes);
- * crypto_aead_setauthsize() */
- ctx->tfm = ERR_PTR(-ENOTSUPP);
- break;
- default:
- BUG();
- }
- if (IS_ERR_OR_NULL(ctx->tfm)) {
- res = PTR_ERR(ctx->tfm);
- ctx->tfm = NULL;
- goto out;
- }
- ctx->mode = key->mode;
- }
- BUG_ON(key->size != ext4_encryption_key_size(key->mode));
-
- /* There shouldn't be a bounce page attached to the crypto
- * context at this point. */
- BUG_ON(ctx->bounce_page);
+ ctx->flags &= ~EXT4_WRITE_PATH_FL;
out:
if (res) {
@@ -204,20 +150,8 @@ void ext4_exit_crypto(void)
{
struct ext4_crypto_ctx *pos, *n;
- list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) {
- if (pos->bounce_page) {
- if (pos->flags &
- EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) {
- __free_page(pos->bounce_page);
- } else {
- mempool_free(pos->bounce_page,
- ext4_bounce_page_pool);
- }
- }
- if (pos->tfm)
- crypto_free_tfm(pos->tfm);
- kfree(pos);
- }
+ list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list)
+ kmem_cache_free(ext4_crypto_ctx_cachep, pos);
INIT_LIST_HEAD(&ext4_free_crypto_ctxs);
if (ext4_bounce_page_pool)
mempool_destroy(ext4_bounce_page_pool);
@@ -225,6 +159,12 @@ void ext4_exit_crypto(void)
if (ext4_read_workqueue)
destroy_workqueue(ext4_read_workqueue);
ext4_read_workqueue = NULL;
+ if (ext4_crypto_ctx_cachep)
+ kmem_cache_destroy(ext4_crypto_ctx_cachep);
+ ext4_crypto_ctx_cachep = NULL;
+ if (ext4_crypt_info_cachep)
+ kmem_cache_destroy(ext4_crypt_info_cachep);
+ ext4_crypt_info_cachep = NULL;
}
/**
@@ -237,23 +177,31 @@ void ext4_exit_crypto(void)
*/
int ext4_init_crypto(void)
{
- int i, res;
+ int i, res = -ENOMEM;
mutex_lock(&crypto_init);
if (ext4_read_workqueue)
goto already_initialized;
ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
- if (!ext4_read_workqueue) {
- res = -ENOMEM;
+ if (!ext4_read_workqueue)
+ goto fail;
+
+ ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx,
+ SLAB_RECLAIM_ACCOUNT);
+ if (!ext4_crypto_ctx_cachep)
+ goto fail;
+
+ ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info,
+ SLAB_RECLAIM_ACCOUNT);
+ if (!ext4_crypt_info_cachep)
goto fail;
- }
for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
struct ext4_crypto_ctx *ctx;
- ctx = ext4_alloc_and_init_crypto_ctx(GFP_KERNEL);
- if (IS_ERR(ctx)) {
- res = PTR_ERR(ctx);
+ ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
+ if (!ctx) {
+ res = -ENOMEM;
goto fail;
}
list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
@@ -317,32 +265,11 @@ static int ext4_page_crypto(struct ext4_crypto_ctx *ctx,
struct ablkcipher_request *req = NULL;
DECLARE_EXT4_COMPLETION_RESULT(ecr);
struct scatterlist dst, src;
- struct ext4_inode_info *ei = EXT4_I(inode);
- struct crypto_ablkcipher *atfm = __crypto_ablkcipher_cast(ctx->tfm);
+ struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
+ struct crypto_ablkcipher *tfm = ci->ci_ctfm;
int res = 0;
- BUG_ON(!ctx->tfm);
- BUG_ON(ctx->mode != ei->i_encryption_key.mode);
-
- if (ctx->mode != EXT4_ENCRYPTION_MODE_AES_256_XTS) {
- printk_ratelimited(KERN_ERR
- "%s: unsupported crypto algorithm: %d\n",
- __func__, ctx->mode);
- return -ENOTSUPP;
- }
-
- crypto_ablkcipher_clear_flags(atfm, ~0);
- crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
-
- res = crypto_ablkcipher_setkey(atfm, ei->i_encryption_key.raw,
- ei->i_encryption_key.size);
- if (res) {
- printk_ratelimited(KERN_ERR
- "%s: crypto_ablkcipher_setkey() failed\n",
- __func__);
- return res;
- }
- req = ablkcipher_request_alloc(atfm, GFP_NOFS);
+ req = ablkcipher_request_alloc(tfm, GFP_NOFS);
if (!req) {
printk_ratelimited(KERN_ERR
"%s: crypto_request_alloc() failed\n",
@@ -384,6 +311,15 @@ static int ext4_page_crypto(struct ext4_crypto_ctx *ctx,
return 0;
}
+static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
+{
+ ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT);
+ if (ctx->w.bounce_page == NULL)
+ return ERR_PTR(-ENOMEM);
+ ctx->flags |= EXT4_WRITE_PATH_FL;
+ return ctx->w.bounce_page;
+}
+
/**
* ext4_encrypt() - Encrypts a page
* @inode: The inode for which the encryption should take place
@@ -413,27 +349,17 @@ struct page *ext4_encrypt(struct inode *inode,
return (struct page *) ctx;
/* The encryption operation will require a bounce page. */
- ciphertext_page = alloc_page(GFP_NOFS);
- if (!ciphertext_page) {
- /* This is a potential bottleneck, but at least we'll have
- * forward progress. */
- ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
- GFP_NOFS);
- if (WARN_ON_ONCE(!ciphertext_page)) {
- ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
- GFP_NOFS | __GFP_WAIT);
- }
- ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
- } else {
- ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
- }
- ctx->bounce_page = ciphertext_page;
- ctx->control_page = plaintext_page;
+ ciphertext_page = alloc_bounce_page(ctx);
+ if (IS_ERR(ciphertext_page))
+ goto errout;
+ ctx->w.control_page = plaintext_page;
err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
plaintext_page, ciphertext_page);
if (err) {
+ ciphertext_page = ERR_PTR(err);
+ errout:
ext4_release_crypto_ctx(ctx);
- return ERR_PTR(err);
+ return ciphertext_page;
}
SetPagePrivate(ciphertext_page);
set_page_private(ciphertext_page, (unsigned long)ctx);
@@ -470,8 +396,8 @@ int ext4_decrypt_one(struct inode *inode, struct page *page)
struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(inode);
- if (!ctx)
- return -ENOMEM;
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = ext4_decrypt(ctx, page);
ext4_release_crypto_ctx(ctx);
return ret;
@@ -493,21 +419,11 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ciphertext_page = alloc_page(GFP_NOFS);
- if (!ciphertext_page) {
- /* This is a potential bottleneck, but at least we'll have
- * forward progress. */
- ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
- GFP_NOFS);
- if (WARN_ON_ONCE(!ciphertext_page)) {
- ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
- GFP_NOFS | __GFP_WAIT);
- }
- ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
- } else {
- ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
+ ciphertext_page = alloc_bounce_page(ctx);
+ if (IS_ERR(ciphertext_page)) {
+ err = PTR_ERR(ciphertext_page);
+ goto errout;
}
- ctx->bounce_page = ciphertext_page;
while (len--) {
err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
@@ -529,6 +445,7 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
goto errout;
}
err = submit_bio_wait(WRITE, bio);
+ bio_put(bio);
if (err)
goto errout;
}
diff --git a/fs/ext4/crypto_fname.c b/fs/ext4/crypto_fname.c
index fded02f72299..7dc4eb55913c 100644
--- a/fs/ext4/crypto_fname.c
+++ b/fs/ext4/crypto_fname.c
@@ -48,6 +48,12 @@ bool ext4_valid_filenames_enc_mode(uint32_t mode)
return (mode == EXT4_ENCRYPTION_MODE_AES_256_CTS);
}
+static unsigned max_name_len(struct inode *inode)
+{
+ return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize :
+ EXT4_NAME_LEN;
+}
+
/**
* ext4_fname_encrypt() -
*
@@ -55,43 +61,52 @@ bool ext4_valid_filenames_enc_mode(uint32_t mode)
* ciphertext. Errors are returned as negative numbers. We trust the caller to
* allocate sufficient memory to oname string.
*/
-static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx,
+static int ext4_fname_encrypt(struct inode *inode,
const struct qstr *iname,
struct ext4_str *oname)
{
u32 ciphertext_len;
struct ablkcipher_request *req = NULL;
DECLARE_EXT4_COMPLETION_RESULT(ecr);
- struct crypto_ablkcipher *tfm = ctx->ctfm;
+ struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
+ struct crypto_ablkcipher *tfm = ci->ci_ctfm;
int res = 0;
char iv[EXT4_CRYPTO_BLOCK_SIZE];
- struct scatterlist sg[1];
- int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK);
- char *workbuf;
+ struct scatterlist src_sg, dst_sg;
+ int padding = 4 << (ci->ci_flags & EXT4_POLICY_FLAGS_PAD_MASK);
+ char *workbuf, buf[32], *alloc_buf = NULL;
+ unsigned lim = max_name_len(inode);
- if (iname->len <= 0 || iname->len > ctx->lim)
+ if (iname->len <= 0 || iname->len > lim)
return -EIO;
ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ?
EXT4_CRYPTO_BLOCK_SIZE : iname->len;
ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding);
- ciphertext_len = (ciphertext_len > ctx->lim)
- ? ctx->lim : ciphertext_len;
+ ciphertext_len = (ciphertext_len > lim)
+ ? lim : ciphertext_len;
+
+ if (ciphertext_len <= sizeof(buf)) {
+ workbuf = buf;
+ } else {
+ alloc_buf = kmalloc(ciphertext_len, GFP_NOFS);
+ if (!alloc_buf)
+ return -ENOMEM;
+ workbuf = alloc_buf;
+ }
/* Allocate request */
req = ablkcipher_request_alloc(tfm, GFP_NOFS);
if (!req) {
printk_ratelimited(
KERN_ERR "%s: crypto_request_alloc() failed\n", __func__);
+ kfree(alloc_buf);
return -ENOMEM;
}
ablkcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
ext4_dir_crypt_complete, &ecr);
- /* Map the workpage */
- workbuf = kmap(ctx->workpage);
-
/* Copy the input */
memcpy(workbuf, iname->name, iname->len);
if (iname->len < ciphertext_len)
@@ -101,21 +116,16 @@ static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx,
memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE);
/* Create encryption request */
- sg_init_table(sg, 1);
- sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0);
- ablkcipher_request_set_crypt(req, sg, sg, ciphertext_len, iv);
+ sg_init_one(&src_sg, workbuf, ciphertext_len);
+ sg_init_one(&dst_sg, oname->name, ciphertext_len);
+ ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
res = crypto_ablkcipher_encrypt(req);
if (res == -EINPROGRESS || res == -EBUSY) {
BUG_ON(req->base.data != &ecr);
wait_for_completion(&ecr.completion);
res = ecr.res;
}
- if (res >= 0) {
- /* Copy the result to output */
- memcpy(oname->name, workbuf, ciphertext_len);
- res = ciphertext_len;
- }
- kunmap(ctx->workpage);
+ kfree(alloc_buf);
ablkcipher_request_free(req);
if (res < 0) {
printk_ratelimited(
@@ -132,20 +142,21 @@ static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx,
* Errors are returned as negative numbers.
* We trust the caller to allocate sufficient memory to oname string.
*/
-static int ext4_fname_decrypt(struct ext4_fname_crypto_ctx *ctx,
+static int ext4_fname_decrypt(struct inode *inode,
const struct ext4_str *iname,
struct ext4_str *oname)
{
struct ext4_str tmp_in[2], tmp_out[1];
struct ablkcipher_request *req = NULL;
DECLARE_EXT4_COMPLETION_RESULT(ecr);
- struct scatterlist sg[1];
- struct crypto_ablkcipher *tfm = ctx->ctfm;
+ struct scatterlist src_sg, dst_sg;
+ struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
+ struct crypto_ablkcipher *tfm = ci->ci_ctfm;
int res = 0;
char iv[EXT4_CRYPTO_BLOCK_SIZE];
- char *workbuf;
+ unsigned lim = max_name_len(inode);
- if (iname->len <= 0 || iname->len > ctx->lim)
+ if (iname->len <= 0 || iname->len > lim)
return -EIO;
tmp_in[0].name = iname->name;
@@ -163,31 +174,19 @@ static int ext4_fname_decrypt(struct ext4_fname_crypto_ctx *ctx,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
ext4_dir_crypt_complete, &ecr);
- /* Map the workpage */
- workbuf = kmap(ctx->workpage);
-
- /* Copy the input */
- memcpy(workbuf, iname->name, iname->len);
-
/* Initialize IV */
memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE);
/* Create encryption request */
- sg_init_table(sg, 1);
- sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0);
- ablkcipher_request_set_crypt(req, sg, sg, iname->len, iv);
+ sg_init_one(&src_sg, iname->name, iname->len);
+ sg_init_one(&dst_sg, oname->name, oname->len);
+ ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
res = crypto_ablkcipher_decrypt(req);
if (res == -EINPROGRESS || res == -EBUSY) {
BUG_ON(req->base.data != &ecr);
wait_for_completion(&ecr.completion);
res = ecr.res;
}
- if (res >= 0) {
- /* Copy the result to output */
- memcpy(oname->name, workbuf, iname->len);
- res = iname->len;
- }
- kunmap(ctx->workpage);
ablkcipher_request_free(req);
if (res < 0) {
printk_ratelimited(
@@ -254,207 +253,6 @@ static int digest_decode(const char *src, int len, char *dst)
}
/**
- * ext4_free_fname_crypto_ctx() -
- *
- * Frees up a crypto context.
- */
-void ext4_free_fname_crypto_ctx(struct ext4_fname_crypto_ctx *ctx)
-{
- if (ctx == NULL || IS_ERR(ctx))
- return;
-
- if (ctx->ctfm && !IS_ERR(ctx->ctfm))
- crypto_free_ablkcipher(ctx->ctfm);
- if (ctx->htfm && !IS_ERR(ctx->htfm))
- crypto_free_hash(ctx->htfm);
- if (ctx->workpage && !IS_ERR(ctx->workpage))
- __free_page(ctx->workpage);
- kfree(ctx);
-}
-
-/**
- * ext4_put_fname_crypto_ctx() -
- *
- * Return: The crypto context onto free list. If the free list is above a
- * threshold, completely frees up the context, and returns the memory.
- *
- * TODO: Currently we directly free the crypto context. Eventually we should
- * add code it to return to free list. Such an approach will increase
- * efficiency of directory lookup.
- */
-void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx)
-{
- if (*ctx == NULL || IS_ERR(*ctx))
- return;
- ext4_free_fname_crypto_ctx(*ctx);
- *ctx = NULL;
-}
-
-/**
- * ext4_search_fname_crypto_ctx() -
- */
-static struct ext4_fname_crypto_ctx *ext4_search_fname_crypto_ctx(
- const struct ext4_encryption_key *key)
-{
- return NULL;
-}
-
-/**
- * ext4_alloc_fname_crypto_ctx() -
- */
-struct ext4_fname_crypto_ctx *ext4_alloc_fname_crypto_ctx(
- const struct ext4_encryption_key *key)
-{
- struct ext4_fname_crypto_ctx *ctx;
-
- ctx = kmalloc(sizeof(struct ext4_fname_crypto_ctx), GFP_NOFS);
- if (ctx == NULL)
- return ERR_PTR(-ENOMEM);
- if (key->mode == EXT4_ENCRYPTION_MODE_INVALID) {
- /* This will automatically set key mode to invalid
- * As enum for ENCRYPTION_MODE_INVALID is zero */
- memset(&ctx->key, 0, sizeof(ctx->key));
- } else {
- memcpy(&ctx->key, key, sizeof(struct ext4_encryption_key));
- }
- ctx->has_valid_key = (EXT4_ENCRYPTION_MODE_INVALID == key->mode)
- ? 0 : 1;
- ctx->ctfm_key_is_ready = 0;
- ctx->ctfm = NULL;
- ctx->htfm = NULL;
- ctx->workpage = NULL;
- return ctx;
-}
-
-/**
- * ext4_get_fname_crypto_ctx() -
- *
- * Allocates a free crypto context and initializes it to hold
- * the crypto material for the inode.
- *
- * Return: NULL if not encrypted. Error value on error. Valid pointer otherwise.
- */
-struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(
- struct inode *inode, u32 max_ciphertext_len)
-{
- struct ext4_fname_crypto_ctx *ctx;
- struct ext4_inode_info *ei = EXT4_I(inode);
- int res;
-
- /* Check if the crypto policy is set on the inode */
- res = ext4_encrypted_inode(inode);
- if (res == 0)
- return NULL;
-
- if (!ext4_has_encryption_key(inode))
- ext4_generate_encryption_key(inode);
-
- /* Get a crypto context based on the key.
- * A new context is allocated if no context matches the requested key.
- */
- ctx = ext4_search_fname_crypto_ctx(&(ei->i_encryption_key));
- if (ctx == NULL)
- ctx = ext4_alloc_fname_crypto_ctx(&(ei->i_encryption_key));
- if (IS_ERR(ctx))
- return ctx;
-
- ctx->flags = ei->i_crypt_policy_flags;
- if (ctx->has_valid_key) {
- if (ctx->key.mode != EXT4_ENCRYPTION_MODE_AES_256_CTS) {
- printk_once(KERN_WARNING
- "ext4: unsupported key mode %d\n",
- ctx->key.mode);
- return ERR_PTR(-ENOKEY);
- }
-
- /* As a first cut, we will allocate new tfm in every call.
- * later, we will keep the tfm around, in case the key gets
- * re-used */
- if (ctx->ctfm == NULL) {
- ctx->ctfm = crypto_alloc_ablkcipher("cts(cbc(aes))",
- 0, 0);
- }
- if (IS_ERR(ctx->ctfm)) {
- res = PTR_ERR(ctx->ctfm);
- printk(
- KERN_DEBUG "%s: error (%d) allocating crypto tfm\n",
- __func__, res);
- ctx->ctfm = NULL;
- ext4_put_fname_crypto_ctx(&ctx);
- return ERR_PTR(res);
- }
- if (ctx->ctfm == NULL) {
- printk(
- KERN_DEBUG "%s: could not allocate crypto tfm\n",
- __func__);
- ext4_put_fname_crypto_ctx(&ctx);
- return ERR_PTR(-ENOMEM);
- }
- if (ctx->workpage == NULL)
- ctx->workpage = alloc_page(GFP_NOFS);
- if (IS_ERR(ctx->workpage)) {
- res = PTR_ERR(ctx->workpage);
- printk(
- KERN_DEBUG "%s: error (%d) allocating work page\n",
- __func__, res);
- ctx->workpage = NULL;
- ext4_put_fname_crypto_ctx(&ctx);
- return ERR_PTR(res);
- }
- if (ctx->workpage == NULL) {
- printk(
- KERN_DEBUG "%s: could not allocate work page\n",
- __func__);
- ext4_put_fname_crypto_ctx(&ctx);
- return ERR_PTR(-ENOMEM);
- }
- ctx->lim = max_ciphertext_len;
- crypto_ablkcipher_clear_flags(ctx->ctfm, ~0);
- crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctx->ctfm),
- CRYPTO_TFM_REQ_WEAK_KEY);
-
- /* If we are lucky, we will get a context that is already
- * set up with the right key. Else, we will have to
- * set the key */
- if (!ctx->ctfm_key_is_ready) {
- /* Since our crypto objectives for filename encryption
- * are pretty weak,
- * we directly use the inode master key */
- res = crypto_ablkcipher_setkey(ctx->ctfm,
- ctx->key.raw, ctx->key.size);
- if (res) {
- ext4_put_fname_crypto_ctx(&ctx);
- return ERR_PTR(-EIO);
- }
- ctx->ctfm_key_is_ready = 1;
- } else {
- /* In the current implementation, key should never be
- * marked "ready" for a context that has just been
- * allocated. So we should never reach here */
- BUG();
- }
- }
- if (ctx->htfm == NULL)
- ctx->htfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->htfm)) {
- res = PTR_ERR(ctx->htfm);
- printk(KERN_DEBUG "%s: error (%d) allocating hash tfm\n",
- __func__, res);
- ctx->htfm = NULL;
- ext4_put_fname_crypto_ctx(&ctx);
- return ERR_PTR(res);
- }
- if (ctx->htfm == NULL) {
- printk(KERN_DEBUG "%s: could not allocate hash tfm\n",
- __func__);
- ext4_put_fname_crypto_ctx(&ctx);
- return ERR_PTR(-ENOMEM);
- }
-
- return ctx;
-}
-
-/**
* ext4_fname_crypto_round_up() -
*
* Return: The next multiple of block size
@@ -464,44 +262,29 @@ u32 ext4_fname_crypto_round_up(u32 size, u32 blksize)
return ((size+blksize-1)/blksize)*blksize;
}
-/**
- * ext4_fname_crypto_namelen_on_disk() -
- */
-int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx,
- u32 namelen)
+unsigned ext4_fname_encrypted_size(struct inode *inode, u32 ilen)
{
- u32 ciphertext_len;
- int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK);
-
- if (ctx == NULL)
- return -EIO;
- if (!(ctx->has_valid_key))
- return -EACCES;
- ciphertext_len = (namelen < EXT4_CRYPTO_BLOCK_SIZE) ?
- EXT4_CRYPTO_BLOCK_SIZE : namelen;
- ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding);
- ciphertext_len = (ciphertext_len > ctx->lim)
- ? ctx->lim : ciphertext_len;
- return (int) ciphertext_len;
+ struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
+ int padding = 32;
+
+ if (ci)
+ padding = 4 << (ci->ci_flags & EXT4_POLICY_FLAGS_PAD_MASK);
+ if (ilen < EXT4_CRYPTO_BLOCK_SIZE)
+ ilen = EXT4_CRYPTO_BLOCK_SIZE;
+ return ext4_fname_crypto_round_up(ilen, padding);
}
-/**
- * ext4_fname_crypto_alloc_obuff() -
+/*
+ * ext4_fname_crypto_alloc_buffer() -
*
* Allocates an output buffer that is sufficient for the crypto operation
* specified by the context and the direction.
*/
-int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx,
+int ext4_fname_crypto_alloc_buffer(struct inode *inode,
u32 ilen, struct ext4_str *crypto_str)
{
- unsigned int olen;
- int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK);
+ unsigned int olen = ext4_fname_encrypted_size(inode, ilen);
- if (!ctx)
- return -EIO;
- if (padding < EXT4_CRYPTO_BLOCK_SIZE)
- padding = EXT4_CRYPTO_BLOCK_SIZE;
- olen = ext4_fname_crypto_round_up(ilen, padding);
crypto_str->len = olen;
if (olen < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2)
olen = EXT4_FNAME_CRYPTO_DIGEST_SIZE*2;
@@ -529,7 +312,7 @@ void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str)
/**
* ext4_fname_disk_to_usr() - converts a filename from disk space to user space
*/
-int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
+int _ext4_fname_disk_to_usr(struct inode *inode,
struct dx_hash_info *hinfo,
const struct ext4_str *iname,
struct ext4_str *oname)
@@ -537,8 +320,6 @@ int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
char buf[24];
int ret;
- if (ctx == NULL)
- return -EIO;
if (iname->len < 3) {
/*Check for . and .. */
if (iname->name[0] == '.' && iname->name[iname->len-1] == '.') {
@@ -548,8 +329,8 @@ int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
return oname->len;
}
}
- if (ctx->has_valid_key)
- return ext4_fname_decrypt(ctx, iname, oname);
+ if (EXT4_I(inode)->i_crypt_info)
+ return ext4_fname_decrypt(inode, iname, oname);
if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) {
ret = digest_encode(iname->name, iname->len, oname->name);
@@ -568,7 +349,7 @@ int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
return ret + 1;
}
-int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
+int ext4_fname_disk_to_usr(struct inode *inode,
struct dx_hash_info *hinfo,
const struct ext4_dir_entry_2 *de,
struct ext4_str *oname)
@@ -576,21 +357,20 @@ int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
struct ext4_str iname = {.name = (unsigned char *) de->name,
.len = de->name_len };
- return _ext4_fname_disk_to_usr(ctx, hinfo, &iname, oname);
+ return _ext4_fname_disk_to_usr(inode, hinfo, &iname, oname);
}
/**
* ext4_fname_usr_to_disk() - converts a filename from user space to disk space
*/
-int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx,
+int ext4_fname_usr_to_disk(struct inode *inode,
const struct qstr *iname,
struct ext4_str *oname)
{
int res;
+ struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
- if (ctx == NULL)
- return -EIO;
if (iname->len < 3) {
/*Check for . and .. */
if (iname->name[0] == '.' &&
@@ -601,8 +381,8 @@ int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx,
return oname->len;
}
}
- if (ctx->has_valid_key) {
- res = ext4_fname_encrypt(ctx, iname, oname);