ext4 crypto: shrink size of the ext4_crypto_ctx structure
Some fields are only used when the crypto_ctx is being used on the
read path, some are only used on the write path, and some are only
used when the structure is on free list. Optimize memory use by using
a union.
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index 9969d05..28a0e4bd 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -71,14 +71,14 @@
{
unsigned long flags;
- if (ctx->bounce_page) {
+ if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page) {
if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL)
- __free_page(ctx->bounce_page);
+ __free_page(ctx->w.bounce_page);
else
- mempool_free(ctx->bounce_page, ext4_bounce_page_pool);
- ctx->bounce_page = NULL;
+ mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool);
}
- ctx->control_page = NULL;
+ ctx->w.bounce_page = NULL;
+ ctx->w.control_page = NULL;
if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
if (ctx->tfm)
crypto_free_tfm(ctx->tfm);
@@ -134,6 +134,7 @@
} else {
ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
}
+ ctx->flags &= ~EXT4_WRITE_PATH_FL;
/* Allocate a new Crypto API context if we don't already have
* one or if it isn't the right mode. */
@@ -165,10 +166,6 @@
}
BUG_ON(ci->ci_size != ext4_encryption_key_size(ci->ci_data_mode));
- /* There shouldn't be a bounce page attached to the crypto
- * context at this point. */
- BUG_ON(ctx->bounce_page);
-
out:
if (res) {
if (!IS_ERR_OR_NULL(ctx))
@@ -189,15 +186,6 @@
struct ext4_crypto_ctx *pos, *n;
list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) {
- if (pos->bounce_page) {
- if (pos->flags &
- EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) {
- __free_page(pos->bounce_page);
- } else {
- mempool_free(pos->bounce_page,
- ext4_bounce_page_pool);
- }
- }
if (pos->tfm)
crypto_free_tfm(pos->tfm);
kmem_cache_free(ext4_crypto_ctx_cachep, pos);
@@ -425,8 +413,9 @@
} else {
ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
}
- ctx->bounce_page = ciphertext_page;
- ctx->control_page = plaintext_page;
+ ctx->flags |= EXT4_WRITE_PATH_FL;
+ ctx->w.bounce_page = ciphertext_page;
+ ctx->w.control_page = plaintext_page;
err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
plaintext_page, ciphertext_page);
if (err) {
@@ -505,7 +494,7 @@
} else {
ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
}
- ctx->bounce_page = ciphertext_page;
+ ctx->w.bounce_page = ciphertext_page;
while (len--) {
err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
diff --git a/fs/ext4/ext4_crypto.h b/fs/ext4/ext4_crypto.h
index 69faf0e..c5258f2 100644
--- a/fs/ext4/ext4_crypto.h
+++ b/fs/ext4/ext4_crypto.h
@@ -86,16 +86,23 @@
#define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
#define EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL 0x00000002
+#define EXT4_WRITE_PATH_FL 0x00000004
struct ext4_crypto_ctx {
struct crypto_tfm *tfm; /* Crypto API context */
- struct page *bounce_page; /* Ciphertext page on write path */
- struct page *control_page; /* Original page on write path */
- struct bio *bio; /* The bio for this context */
- struct work_struct work; /* Work queue for read complete path */
- struct list_head free_list; /* Free list */
- int flags; /* Flags */
- int mode; /* Encryption mode for tfm */
+ union {
+ struct {
+ struct page *bounce_page; /* Ciphertext page */
+ struct page *control_page; /* Original page */
+ } w;
+ struct {
+ struct bio *bio;
+ struct work_struct work;
+ } r;
+ struct list_head free_list; /* Free list */
+ };
+ char flags; /* Flags */
+ char mode; /* Encryption mode for tfm */
};
struct ext4_completion_result {
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 5765f88..79636e2 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -84,7 +84,7 @@
/* The bounce data pages are unmapped. */
data_page = page;
ctx = (struct ext4_crypto_ctx *)page_private(data_page);
- page = ctx->control_page;
+ page = ctx->w.control_page;
}
#endif
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 171b9ac..ec3ef93 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -54,8 +54,8 @@
{
#ifdef CONFIG_EXT4_FS_ENCRYPTION
struct ext4_crypto_ctx *ctx =
- container_of(work, struct ext4_crypto_ctx, work);
- struct bio *bio = ctx->bio;
+ container_of(work, struct ext4_crypto_ctx, r.work);
+ struct bio *bio = ctx->r.bio;
struct bio_vec *bv;
int i;
@@ -109,9 +109,9 @@
if (err) {
ext4_release_crypto_ctx(ctx);
} else {
- INIT_WORK(&ctx->work, completion_pages);
- ctx->bio = bio;
- queue_work(ext4_read_workqueue, &ctx->work);
+ INIT_WORK(&ctx->r.work, completion_pages);
+ ctx->r.bio = bio;
+ queue_work(ext4_read_workqueue, &ctx->r.work);
return;
}
}