aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/crypto.c
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2016-03-26 16:14:34 -0400
committerTheodore Ts'o <tytso@mit.edu>2016-03-26 16:14:34 -0400
commitc9af28fdd44922a6c10c9f8315718408af98e315 (patch)
treeb5bc491b5d141ba710bc6da44b00125b8d5d589c /fs/ext4/crypto.c
parent9e92f48c34eb2b9af9d12f892e2fe1fce5e8ce35 (diff)
downloadlinux-c9af28fdd44922a6c10c9f8315718408af98e315.tar.gz
ext4 crypto: don't let data integrity writebacks fail with ENOMEM
We don't want the writeback triggered from the journal commit (in data=writeback mode) to cause the journal to abort due to generic_writepages() returning an ENOMEM error. In addition, if fsync() fails with ENOMEM, most applications will probably not do the right thing. So if we are doing a data integrity sync, and ext4_encrypt() returns ENOMEM, we will submit any queued I/O to date, and then retry the allocation using GFP_NOFAIL. Google-Bug-Id: 27641567 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/crypto.c')
-rw-r--r--fs/ext4/crypto.c37
1 files changed, 21 insertions, 16 deletions
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index edc053a819144..012fd3285147d 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -91,7 +91,8 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
* Return: An allocated and initialized encryption context on success; error
* value or NULL otherwise.
*/
-struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
+struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
+ gfp_t gfp_flags)
{
struct ext4_crypto_ctx *ctx = NULL;
int res = 0;
@@ -118,7 +119,7 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
list_del(&ctx->free_list);
spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
if (!ctx) {
- ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
+ ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, gfp_flags);
if (!ctx) {
res = -ENOMEM;
goto out;
@@ -255,7 +256,8 @@ static int ext4_page_crypto(struct inode *inode,
ext4_direction_t rw,
pgoff_t index,
struct page *src_page,
- struct page *dest_page)
+ struct page *dest_page,
+ gfp_t gfp_flags)
{
u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
@@ -266,7 +268,7 @@ static int ext4_page_crypto(struct inode *inode,
struct crypto_skcipher *tfm = ci->ci_ctfm;
int res = 0;
- req = skcipher_request_alloc(tfm, GFP_NOFS);
+ req = skcipher_request_alloc(tfm, gfp_flags);
if (!req) {
printk_ratelimited(KERN_ERR
"%s: crypto_request_alloc() failed\n",
@@ -307,9 +309,10 @@ static int ext4_page_crypto(struct inode *inode,
return 0;
}
-static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
+static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx,
+ gfp_t gfp_flags)
{
- ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT);
+ ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, gfp_flags);
if (ctx->w.bounce_page == NULL)
return ERR_PTR(-ENOMEM);
ctx->flags |= EXT4_WRITE_PATH_FL;
@@ -332,7 +335,8 @@ static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
* error value or NULL.
*/
struct page *ext4_encrypt(struct inode *inode,
- struct page *plaintext_page)
+ struct page *plaintext_page,
+ gfp_t gfp_flags)
{
struct ext4_crypto_ctx *ctx;
struct page *ciphertext_page = NULL;
@@ -340,17 +344,17 @@ struct page *ext4_encrypt(struct inode *inode,
BUG_ON(!PageLocked(plaintext_page));
- ctx = ext4_get_crypto_ctx(inode);
+ ctx = ext4_get_crypto_ctx(inode, gfp_flags);
if (IS_ERR(ctx))
return (struct page *) ctx;
/* The encryption operation will require a bounce page. */
- ciphertext_page = alloc_bounce_page(ctx);
+ ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
if (IS_ERR(ciphertext_page))
goto errout;
ctx->w.control_page = plaintext_page;
err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index,
- plaintext_page, ciphertext_page);
+ plaintext_page, ciphertext_page, gfp_flags);
if (err) {
ciphertext_page = ERR_PTR(err);
errout:
@@ -378,8 +382,8 @@ int ext4_decrypt(struct page *page)
{
BUG_ON(!PageLocked(page));
- return ext4_page_crypto(page->mapping->host,
- EXT4_DECRYPT, page->index, page, page);
+ return ext4_page_crypto(page->mapping->host, EXT4_DECRYPT,
+ page->index, page, page, GFP_NOFS);
}
int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
@@ -398,11 +402,11 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
- ctx = ext4_get_crypto_ctx(inode);
+ ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ciphertext_page = alloc_bounce_page(ctx);
+ ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
if (IS_ERR(ciphertext_page)) {
err = PTR_ERR(ciphertext_page);
goto errout;
@@ -410,11 +414,12 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
while (len--) {
err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk,
- ZERO_PAGE(0), ciphertext_page);
+ ZERO_PAGE(0), ciphertext_page,
+ GFP_NOFS);
if (err)
goto errout;
- bio = bio_alloc(GFP_KERNEL, 1);
+ bio = bio_alloc(GFP_NOWAIT, 1);
if (!bio) {
err = -ENOMEM;
goto errout;