forked from luck/tmp_suning_uos_patched
fscrypt updates for v5.3
- Preparations for supporting encryption on ext4 filesystems where the filesystem block size is smaller than PAGE_SIZE. - Don't allow setting encryption policies on dead directories. - Various cleanups. -----BEGIN PGP SIGNATURE----- iIoEABYIADIWIQSacvsUNc7UX4ntmEPzXCl4vpKOKwUCXSNh5xQcZWJpZ2dlcnNA Z29vZ2xlLmNvbQAKCRDzXCl4vpKOK2GPAQDIGnAJ557jCpI23QCWLbCuF1cfEk8J i+sGdLjx/7SVewD9F1AbNPkm1u6sF0XN7NpGXMk6nU4HDYUXxAYr2RGDYQQ= =2vas -----END PGP SIGNATURE----- Merge tag 'fscrypt-for-linus' of git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt Pull fscrypt updates from Eric Biggers: - Preparations for supporting encryption on ext4 filesystems where the filesystem block size is smaller than PAGE_SIZE. - Don't allow setting encryption policies on dead directories. - Various cleanups. * tag 'fscrypt-for-linus' of git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt: fscrypt: document testing with xfstests fscrypt: remove selection of CONFIG_CRYPTO_SHA256 fscrypt: remove unnecessary includes of ratelimit.h fscrypt: don't set policy for a dead directory ext4: encrypt only up to last block in ext4_bio_write_page() ext4: decrypt only the needed block in __ext4_block_zero_page_range() ext4: decrypt only the needed blocks in ext4_block_write_begin() ext4: clear BH_Uptodate flag on decryption error fscrypt: decrypt only the needed blocks in __fscrypt_decrypt_bio() fscrypt: support decrypting multiple filesystem blocks per page fscrypt: introduce fscrypt_decrypt_block_inplace() fscrypt: handle blocksize < PAGE_SIZE in fscrypt_zeroout_range() fscrypt: support encrypting multiple filesystem blocks per page fscrypt: introduce fscrypt_encrypt_block_inplace() fscrypt: clean up some BUG_ON()s in block encryption/decryption fscrypt: rename fscrypt_do_page_crypto() to fscrypt_crypt_block() fscrypt: remove the "write" part of struct fscrypt_ctx fscrypt: simplify bounce page handling
This commit is contained in:
commit
25cd6f355d
|
@ -191,7 +191,9 @@ Currently, the following pairs of encryption modes are supported:
|
|||
If unsure, you should use the (AES-256-XTS, AES-256-CTS-CBC) pair.
|
||||
|
||||
AES-128-CBC was added only for low-powered embedded devices with
|
||||
crypto accelerators such as CAAM or CESA that do not support XTS.
|
||||
crypto accelerators such as CAAM or CESA that do not support XTS. To
|
||||
use AES-128-CBC, CONFIG_CRYPTO_SHA256 (or another SHA-256
|
||||
implementation) must be enabled so that ESSIV can be used.
|
||||
|
||||
Adiantum is a (primarily) stream cipher-based mode that is fast even
|
||||
on CPUs without dedicated crypto instructions. It's also a true
|
||||
|
@ -647,3 +649,42 @@ Note that the precise way that filenames are presented to userspace
|
|||
without the key is subject to change in the future. It is only meant
|
||||
as a way to temporarily present valid filenames so that commands like
|
||||
``rm -r`` work as expected on encrypted directories.
|
||||
|
||||
Tests
|
||||
=====
|
||||
|
||||
To test fscrypt, use xfstests, which is Linux's de facto standard
|
||||
filesystem test suite. First, run all the tests in the "encrypt"
|
||||
group on the relevant filesystem(s). For example, to test ext4 and
|
||||
f2fs encryption using `kvm-xfstests
|
||||
<https://github.com/tytso/xfstests-bld/blob/master/Documentation/kvm-quickstart.md>`_::
|
||||
|
||||
kvm-xfstests -c ext4,f2fs -g encrypt
|
||||
|
||||
UBIFS encryption can also be tested this way, but it should be done in
|
||||
a separate command, and it takes some time for kvm-xfstests to set up
|
||||
emulated UBI volumes::
|
||||
|
||||
kvm-xfstests -c ubifs -g encrypt
|
||||
|
||||
No tests should fail. However, tests that use non-default encryption
|
||||
modes (e.g. generic/549 and generic/550) will be skipped if the needed
|
||||
algorithms were not built into the kernel's crypto API. Also, tests
|
||||
that access the raw block device (e.g. generic/399, generic/548,
|
||||
generic/549, generic/550) will be skipped on UBIFS.
|
||||
|
||||
Besides running the "encrypt" group tests, for ext4 and f2fs it's also
|
||||
possible to run most xfstests with the "test_dummy_encryption" mount
|
||||
option. This option causes all new files to be automatically
|
||||
encrypted with a dummy key, without having to make any API calls.
|
||||
This tests the encrypted I/O paths more thoroughly. To do this with
|
||||
kvm-xfstests, use the "encrypt" filesystem configuration::
|
||||
|
||||
kvm-xfstests -c ext4/encrypt,f2fs/encrypt -g auto
|
||||
|
||||
Because this runs many more tests than "-g encrypt" does, it takes
|
||||
much longer to run; so also consider using `gce-xfstests
|
||||
<https://github.com/tytso/xfstests-bld/blob/master/Documentation/gce-xfstests.md>`_
|
||||
instead of kvm-xfstests::
|
||||
|
||||
gce-xfstests -c ext4/encrypt,f2fs/encrypt -g auto
|
||||
|
|
|
@ -7,7 +7,6 @@ config FS_ENCRYPTION
|
|||
select CRYPTO_ECB
|
||||
select CRYPTO_XTS
|
||||
select CRYPTO_CTS
|
||||
select CRYPTO_SHA256
|
||||
select KEYS
|
||||
help
|
||||
Enable encryption of files and directories. This
|
||||
|
|
|
@ -33,9 +33,8 @@ static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
|
|||
|
||||
bio_for_each_segment_all(bv, bio, iter_all) {
|
||||
struct page *page = bv->bv_page;
|
||||
int ret = fscrypt_decrypt_page(page->mapping->host, page,
|
||||
PAGE_SIZE, 0, page->index);
|
||||
|
||||
int ret = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len,
|
||||
bv->bv_offset);
|
||||
if (ret)
|
||||
SetPageError(page);
|
||||
else if (done)
|
||||
|
@ -53,9 +52,8 @@ EXPORT_SYMBOL(fscrypt_decrypt_bio);
|
|||
|
||||
static void completion_pages(struct work_struct *work)
|
||||
{
|
||||
struct fscrypt_ctx *ctx =
|
||||
container_of(work, struct fscrypt_ctx, r.work);
|
||||
struct bio *bio = ctx->r.bio;
|
||||
struct fscrypt_ctx *ctx = container_of(work, struct fscrypt_ctx, work);
|
||||
struct bio *bio = ctx->bio;
|
||||
|
||||
__fscrypt_decrypt_bio(bio, true);
|
||||
fscrypt_release_ctx(ctx);
|
||||
|
@ -64,57 +62,29 @@ static void completion_pages(struct work_struct *work)
|
|||
|
||||
void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio)
|
||||
{
|
||||
INIT_WORK(&ctx->r.work, completion_pages);
|
||||
ctx->r.bio = bio;
|
||||
fscrypt_enqueue_decrypt_work(&ctx->r.work);
|
||||
INIT_WORK(&ctx->work, completion_pages);
|
||||
ctx->bio = bio;
|
||||
fscrypt_enqueue_decrypt_work(&ctx->work);
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio);
|
||||
|
||||
void fscrypt_pullback_bio_page(struct page **page, bool restore)
|
||||
{
|
||||
struct fscrypt_ctx *ctx;
|
||||
struct page *bounce_page;
|
||||
|
||||
/* The bounce data pages are unmapped. */
|
||||
if ((*page)->mapping)
|
||||
return;
|
||||
|
||||
/* The bounce data page is unmapped. */
|
||||
bounce_page = *page;
|
||||
ctx = (struct fscrypt_ctx *)page_private(bounce_page);
|
||||
|
||||
/* restore control page */
|
||||
*page = ctx->w.control_page;
|
||||
|
||||
if (restore)
|
||||
fscrypt_restore_control_page(bounce_page);
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_pullback_bio_page);
|
||||
|
||||
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
|
||||
sector_t pblk, unsigned int len)
|
||||
{
|
||||
struct fscrypt_ctx *ctx;
|
||||
struct page *ciphertext_page = NULL;
|
||||
const unsigned int blockbits = inode->i_blkbits;
|
||||
const unsigned int blocksize = 1 << blockbits;
|
||||
struct page *ciphertext_page;
|
||||
struct bio *bio;
|
||||
int ret, err = 0;
|
||||
|
||||
BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
|
||||
|
||||
ctx = fscrypt_get_ctx(GFP_NOFS);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT);
|
||||
if (IS_ERR(ciphertext_page)) {
|
||||
err = PTR_ERR(ciphertext_page);
|
||||
goto errout;
|
||||
}
|
||||
ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT);
|
||||
if (!ciphertext_page)
|
||||
return -ENOMEM;
|
||||
|
||||
while (len--) {
|
||||
err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk,
|
||||
ZERO_PAGE(0), ciphertext_page,
|
||||
PAGE_SIZE, 0, GFP_NOFS);
|
||||
err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
|
||||
ZERO_PAGE(0), ciphertext_page,
|
||||
blocksize, 0, GFP_NOFS);
|
||||
if (err)
|
||||
goto errout;
|
||||
|
||||
|
@ -124,14 +94,11 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
|
|||
goto errout;
|
||||
}
|
||||
bio_set_dev(bio, inode->i_sb->s_bdev);
|
||||
bio->bi_iter.bi_sector =
|
||||
pblk << (inode->i_sb->s_blocksize_bits - 9);
|
||||
bio->bi_iter.bi_sector = pblk << (blockbits - 9);
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
ret = bio_add_page(bio, ciphertext_page,
|
||||
inode->i_sb->s_blocksize, 0);
|
||||
if (ret != inode->i_sb->s_blocksize) {
|
||||
ret = bio_add_page(bio, ciphertext_page, blocksize, 0);
|
||||
if (WARN_ON(ret != blocksize)) {
|
||||
/* should never happen! */
|
||||
WARN_ON(1);
|
||||
bio_put(bio);
|
||||
err = -EIO;
|
||||
goto errout;
|
||||
|
@ -147,7 +114,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
|
|||
}
|
||||
err = 0;
|
||||
errout:
|
||||
fscrypt_release_ctx(ctx);
|
||||
fscrypt_free_bounce_page(ciphertext_page);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_zeroout_range);
|
||||
|
|
|
@ -59,23 +59,16 @@ void fscrypt_enqueue_decrypt_work(struct work_struct *work)
|
|||
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
|
||||
|
||||
/**
|
||||
* fscrypt_release_ctx() - Releases an encryption context
|
||||
* @ctx: The encryption context to release.
|
||||
* fscrypt_release_ctx() - Release a decryption context
|
||||
* @ctx: The decryption context to release.
|
||||
*
|
||||
* If the encryption context was allocated from the pre-allocated pool, returns
|
||||
* it to that pool. Else, frees it.
|
||||
*
|
||||
* If there's a bounce page in the context, this frees that.
|
||||
* If the decryption context was allocated from the pre-allocated pool, return
|
||||
* it to that pool. Else, free it.
|
||||
*/
|
||||
void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) {
|
||||
mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
|
||||
ctx->w.bounce_page = NULL;
|
||||
}
|
||||
ctx->w.control_page = NULL;
|
||||
if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
|
||||
kmem_cache_free(fscrypt_ctx_cachep, ctx);
|
||||
} else {
|
||||
|
@ -87,12 +80,12 @@ void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
|
|||
EXPORT_SYMBOL(fscrypt_release_ctx);
|
||||
|
||||
/**
|
||||
* fscrypt_get_ctx() - Gets an encryption context
|
||||
* fscrypt_get_ctx() - Get a decryption context
|
||||
* @gfp_flags: The gfp flag for memory allocation
|
||||
*
|
||||
* Allocates and initializes an encryption context.
|
||||
* Allocate and initialize a decryption context.
|
||||
*
|
||||
* Return: A new encryption context on success; an ERR_PTR() otherwise.
|
||||
* Return: A new decryption context on success; an ERR_PTR() otherwise.
|
||||
*/
|
||||
struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
|
||||
{
|
||||
|
@ -100,14 +93,8 @@ struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
|
|||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* We first try getting the ctx from a free list because in
|
||||
* the common case the ctx will have an allocated and
|
||||
* initialized crypto tfm, so it's probably a worthwhile
|
||||
* optimization. For the bounce page, we first try getting it
|
||||
* from the kernel allocator because that's just about as fast
|
||||
* as getting it from a list and because a cache of free pages
|
||||
* should generally be a "last resort" option for a filesystem
|
||||
* to be able to do its job.
|
||||
* First try getting a ctx from the free list so that we don't have to
|
||||
* call into the slab allocator.
|
||||
*/
|
||||
spin_lock_irqsave(&fscrypt_ctx_lock, flags);
|
||||
ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
|
||||
|
@ -123,11 +110,31 @@ struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
|
|||
} else {
|
||||
ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
|
||||
}
|
||||
ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL;
|
||||
return ctx;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_get_ctx);
|
||||
|
||||
struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
|
||||
{
|
||||
return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* fscrypt_free_bounce_page() - free a ciphertext bounce page
|
||||
*
|
||||
* Free a bounce page that was allocated by fscrypt_encrypt_pagecache_blocks(),
|
||||
* or by fscrypt_alloc_bounce_page() directly.
|
||||
*/
|
||||
void fscrypt_free_bounce_page(struct page *bounce_page)
|
||||
{
|
||||
if (!bounce_page)
|
||||
return;
|
||||
set_page_private(bounce_page, (unsigned long)NULL);
|
||||
ClearPagePrivate(bounce_page);
|
||||
mempool_free(bounce_page, fscrypt_bounce_page_pool);
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_free_bounce_page);
|
||||
|
||||
void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
|
||||
const struct fscrypt_info *ci)
|
||||
{
|
||||
|
@ -141,10 +148,11 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
|
|||
crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw);
|
||||
}
|
||||
|
||||
int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
|
||||
u64 lblk_num, struct page *src_page,
|
||||
struct page *dest_page, unsigned int len,
|
||||
unsigned int offs, gfp_t gfp_flags)
|
||||
/* Encrypt or decrypt a single filesystem block of file contents */
|
||||
int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
|
||||
u64 lblk_num, struct page *src_page,
|
||||
struct page *dest_page, unsigned int len,
|
||||
unsigned int offs, gfp_t gfp_flags)
|
||||
{
|
||||
union fscrypt_iv iv;
|
||||
struct skcipher_request *req = NULL;
|
||||
|
@ -154,7 +162,10 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
|
|||
struct crypto_skcipher *tfm = ci->ci_ctfm;
|
||||
int res = 0;
|
||||
|
||||
BUG_ON(len == 0);
|
||||
if (WARN_ON_ONCE(len <= 0))
|
||||
return -EINVAL;
|
||||
if (WARN_ON_ONCE(len % FS_CRYPTO_BLOCK_SIZE != 0))
|
||||
return -EINVAL;
|
||||
|
||||
fscrypt_generate_iv(&iv, lblk_num, ci);
|
||||
|
||||
|
@ -186,126 +197,158 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
|
||||
if (ctx->w.bounce_page == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL;
|
||||
return ctx->w.bounce_page;
|
||||
}
|
||||
|
||||
/**
|
||||
* fscypt_encrypt_page() - Encrypts a page
|
||||
* @inode: The inode for which the encryption should take place
|
||||
* @page: The page to encrypt. Must be locked for bounce-page
|
||||
* encryption.
|
||||
* @len: Length of data to encrypt in @page and encrypted
|
||||
* data in returned page.
|
||||
* @offs: Offset of data within @page and returned
|
||||
* page holding encrypted data.
|
||||
* @lblk_num: Logical block number. This must be unique for multiple
|
||||
* calls with same inode, except when overwriting
|
||||
* previously written data.
|
||||
* @gfp_flags: The gfp flag for memory allocation
|
||||
* fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a pagecache page
|
||||
* @page: The locked pagecache page containing the block(s) to encrypt
|
||||
* @len: Total size of the block(s) to encrypt. Must be a nonzero
|
||||
* multiple of the filesystem's block size.
|
||||
* @offs: Byte offset within @page of the first block to encrypt. Must be
|
||||
* a multiple of the filesystem's block size.
|
||||
* @gfp_flags: Memory allocation flags
|
||||
*
|
||||
* Encrypts @page using the ctx encryption context. Performs encryption
|
||||
* either in-place or into a newly allocated bounce page.
|
||||
* Called on the page write path.
|
||||
* A new bounce page is allocated, and the specified block(s) are encrypted into
|
||||
* it. In the bounce page, the ciphertext block(s) will be located at the same
|
||||
* offsets at which the plaintext block(s) were located in the source page; any
|
||||
* other parts of the bounce page will be left uninitialized. However, normally
|
||||
* blocksize == PAGE_SIZE and the whole page is encrypted at once.
|
||||
*
|
||||
* Bounce page allocation is the default.
|
||||
* In this case, the contents of @page are encrypted and stored in an
|
||||
* allocated bounce page. @page has to be locked and the caller must call
|
||||
* fscrypt_restore_control_page() on the returned ciphertext page to
|
||||
* release the bounce buffer and the encryption context.
|
||||
* This is for use by the filesystem's ->writepages() method.
|
||||
*
|
||||
* In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in
|
||||
* fscrypt_operations. Here, the input-page is returned with its content
|
||||
* encrypted.
|
||||
*
|
||||
* Return: A page with the encrypted content on success. Else, an
|
||||
* error value or NULL.
|
||||
* Return: the new encrypted bounce page on success; an ERR_PTR() on failure
|
||||
*/
|
||||
struct page *fscrypt_encrypt_page(const struct inode *inode,
|
||||
struct page *page,
|
||||
unsigned int len,
|
||||
unsigned int offs,
|
||||
u64 lblk_num, gfp_t gfp_flags)
|
||||
struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
|
||||
unsigned int len,
|
||||
unsigned int offs,
|
||||
gfp_t gfp_flags)
|
||||
|
||||
{
|
||||
struct fscrypt_ctx *ctx;
|
||||
struct page *ciphertext_page = page;
|
||||
const struct inode *inode = page->mapping->host;
|
||||
const unsigned int blockbits = inode->i_blkbits;
|
||||
const unsigned int blocksize = 1 << blockbits;
|
||||
struct page *ciphertext_page;
|
||||
u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
|
||||
(offs >> blockbits);
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
|
||||
if (WARN_ON_ONCE(!PageLocked(page)))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
|
||||
/* with inplace-encryption we just encrypt the page */
|
||||
err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
|
||||
ciphertext_page, len, offs,
|
||||
gfp_flags);
|
||||
if (err)
|
||||
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags);
|
||||
if (!ciphertext_page)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
|
||||
err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num,
|
||||
page, ciphertext_page,
|
||||
blocksize, i, gfp_flags);
|
||||
if (err) {
|
||||
fscrypt_free_bounce_page(ciphertext_page);
|
||||
return ERR_PTR(err);
|
||||
|
||||
return ciphertext_page;
|
||||
}
|
||||
|
||||
BUG_ON(!PageLocked(page));
|
||||
|
||||
ctx = fscrypt_get_ctx(gfp_flags);
|
||||
if (IS_ERR(ctx))
|
||||
return ERR_CAST(ctx);
|
||||
|
||||
/* The encryption operation will require a bounce page. */
|
||||
ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags);
|
||||
if (IS_ERR(ciphertext_page))
|
||||
goto errout;
|
||||
|
||||
ctx->w.control_page = page;
|
||||
err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num,
|
||||
page, ciphertext_page, len, offs,
|
||||
gfp_flags);
|
||||
if (err) {
|
||||
ciphertext_page = ERR_PTR(err);
|
||||
goto errout;
|
||||
}
|
||||
}
|
||||
SetPagePrivate(ciphertext_page);
|
||||
set_page_private(ciphertext_page, (unsigned long)ctx);
|
||||
lock_page(ciphertext_page);
|
||||
return ciphertext_page;
|
||||
|
||||
errout:
|
||||
fscrypt_release_ctx(ctx);
|
||||
set_page_private(ciphertext_page, (unsigned long)page);
|
||||
return ciphertext_page;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_encrypt_page);
|
||||
EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
|
||||
|
||||
/**
|
||||
* fscrypt_decrypt_page() - Decrypts a page in-place
|
||||
* @inode: The corresponding inode for the page to decrypt.
|
||||
* @page: The page to decrypt. Must be locked in case
|
||||
* it is a writeback page (FS_CFLG_OWN_PAGES unset).
|
||||
* @len: Number of bytes in @page to be decrypted.
|
||||
* @offs: Start of data in @page.
|
||||
* @lblk_num: Logical block number.
|
||||
* fscrypt_encrypt_block_inplace() - Encrypt a filesystem block in-place
|
||||
* @inode: The inode to which this block belongs
|
||||
* @page: The page containing the block to encrypt
|
||||
* @len: Size of block to encrypt. Doesn't need to be a multiple of the
|
||||
* fs block size, but must be a multiple of FS_CRYPTO_BLOCK_SIZE.
|
||||
* @offs: Byte offset within @page at which the block to encrypt begins
|
||||
* @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
|
||||
* number of the block within the file
|
||||
* @gfp_flags: Memory allocation flags
|
||||
*
|
||||
* Decrypts page in-place using the ctx encryption context.
|
||||
* Encrypt a possibly-compressed filesystem block that is located in an
|
||||
* arbitrary page, not necessarily in the original pagecache page. The @inode
|
||||
* and @lblk_num must be specified, as they can't be determined from @page.
|
||||
*
|
||||
* Called from the read completion callback.
|
||||
*
|
||||
* Return: Zero on success, non-zero otherwise.
|
||||
* Return: 0 on success; -errno on failure
|
||||
*/
|
||||
int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
|
||||
unsigned int len, unsigned int offs, u64 lblk_num)
|
||||
int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
|
||||
unsigned int len, unsigned int offs,
|
||||
u64 lblk_num, gfp_t gfp_flags)
|
||||
{
|
||||
if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
|
||||
BUG_ON(!PageLocked(page));
|
||||
|
||||
return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
|
||||
len, offs, GFP_NOFS);
|
||||
return fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page,
|
||||
len, offs, gfp_flags);
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_decrypt_page);
|
||||
EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
|
||||
|
||||
/**
|
||||
* fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a pagecache page
|
||||
* @page: The locked pagecache page containing the block(s) to decrypt
|
||||
* @len: Total size of the block(s) to decrypt. Must be a nonzero
|
||||
* multiple of the filesystem's block size.
|
||||
* @offs: Byte offset within @page of the first block to decrypt. Must be
|
||||
* a multiple of the filesystem's block size.
|
||||
*
|
||||
* The specified block(s) are decrypted in-place within the pagecache page,
|
||||
* which must still be locked and not uptodate. Normally, blocksize ==
|
||||
* PAGE_SIZE and the whole page is decrypted at once.
|
||||
*
|
||||
* This is for use by the filesystem's ->readpages() method.
|
||||
*
|
||||
* Return: 0 on success; -errno on failure
|
||||
*/
|
||||
int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
|
||||
unsigned int offs)
|
||||
{
|
||||
const struct inode *inode = page->mapping->host;
|
||||
const unsigned int blockbits = inode->i_blkbits;
|
||||
const unsigned int blocksize = 1 << blockbits;
|
||||
u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
|
||||
(offs >> blockbits);
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
if (WARN_ON_ONCE(!PageLocked(page)))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
|
||||
err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
|
||||
page, blocksize, i, GFP_NOFS);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks);
|
||||
|
||||
/**
|
||||
* fscrypt_decrypt_block_inplace() - Decrypt a filesystem block in-place
|
||||
* @inode: The inode to which this block belongs
|
||||
* @page: The page containing the block to decrypt
|
||||
* @len: Size of block to decrypt. Doesn't need to be a multiple of the
|
||||
* fs block size, but must be a multiple of FS_CRYPTO_BLOCK_SIZE.
|
||||
* @offs: Byte offset within @page at which the block to decrypt begins
|
||||
* @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
|
||||
* number of the block within the file
|
||||
*
|
||||
* Decrypt a possibly-compressed filesystem block that is located in an
|
||||
* arbitrary page, not necessarily in the original pagecache page. The @inode
|
||||
* and @lblk_num must be specified, as they can't be determined from @page.
|
||||
*
|
||||
* Return: 0 on success; -errno on failure
|
||||
*/
|
||||
int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
|
||||
unsigned int len, unsigned int offs,
|
||||
u64 lblk_num)
|
||||
{
|
||||
return fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page,
|
||||
len, offs, GFP_NOFS);
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
|
||||
|
||||
/*
|
||||
* Validate dentries in encrypted directories to make sure we aren't potentially
|
||||
|
@ -355,18 +398,6 @@ const struct dentry_operations fscrypt_d_ops = {
|
|||
.d_revalidate = fscrypt_d_revalidate,
|
||||
};
|
||||
|
||||
void fscrypt_restore_control_page(struct page *page)
|
||||
{
|
||||
struct fscrypt_ctx *ctx;
|
||||
|
||||
ctx = (struct fscrypt_ctx *)page_private(page);
|
||||
set_page_private(page, (unsigned long)NULL);
|
||||
ClearPagePrivate(page);
|
||||
unlock_page(page);
|
||||
fscrypt_release_ctx(ctx);
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_restore_control_page);
|
||||
|
||||
static void fscrypt_destroy(void)
|
||||
{
|
||||
struct fscrypt_ctx *pos, *n;
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
*/
|
||||
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <crypto/skcipher.h>
|
||||
#include "fscrypt_private.h"
|
||||
|
||||
|
|
|
@ -94,7 +94,6 @@ typedef enum {
|
|||
} fscrypt_direction_t;
|
||||
|
||||
#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
|
||||
#define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002
|
||||
|
||||
static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
|
||||
u32 filenames_mode)
|
||||
|
@ -117,14 +116,12 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
|
|||
/* crypto.c */
|
||||
extern struct kmem_cache *fscrypt_info_cachep;
|
||||
extern int fscrypt_initialize(unsigned int cop_flags);
|
||||
extern int fscrypt_do_page_crypto(const struct inode *inode,
|
||||
fscrypt_direction_t rw, u64 lblk_num,
|
||||
struct page *src_page,
|
||||
struct page *dest_page,
|
||||
unsigned int len, unsigned int offs,
|
||||
gfp_t gfp_flags);
|
||||
extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
|
||||
gfp_t gfp_flags);
|
||||
extern int fscrypt_crypt_block(const struct inode *inode,
|
||||
fscrypt_direction_t rw, u64 lblk_num,
|
||||
struct page *src_page, struct page *dest_page,
|
||||
unsigned int len, unsigned int offs,
|
||||
gfp_t gfp_flags);
|
||||
extern struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags);
|
||||
extern const struct dentry_operations fscrypt_d_ops;
|
||||
|
||||
extern void __printf(3, 4) __cold
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
* Encryption hooks for higher-level filesystem operations.
|
||||
*/
|
||||
|
||||
#include <linux/ratelimit.h>
|
||||
#include "fscrypt_private.h"
|
||||
|
||||
/**
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
#include <keys/user-type.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/sha.h>
|
||||
|
|
|
@ -81,6 +81,8 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg)
|
|||
if (ret == -ENODATA) {
|
||||
if (!S_ISDIR(inode->i_mode))
|
||||
ret = -ENOTDIR;
|
||||
else if (IS_DEADDIR(inode))
|
||||
ret = -ENOENT;
|
||||
else if (!inode->i_sb->s_cop->empty_dir(inode))
|
||||
ret = -ENOTEMPTY;
|
||||
else
|
||||
|
|
|
@ -1164,8 +1164,9 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
|
|||
int err = 0;
|
||||
unsigned blocksize = inode->i_sb->s_blocksize;
|
||||
unsigned bbits;
|
||||
struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
|
||||
bool decrypt = false;
|
||||
struct buffer_head *bh, *head, *wait[2];
|
||||
int nr_wait = 0;
|
||||
int i;
|
||||
|
||||
BUG_ON(!PageLocked(page));
|
||||
BUG_ON(from > PAGE_SIZE);
|
||||
|
@ -1217,23 +1218,32 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
|
|||
!buffer_unwritten(bh) &&
|
||||
(block_start < from || block_end > to)) {
|
||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
||||
*wait_bh++ = bh;
|
||||
decrypt = IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
|
||||
wait[nr_wait++] = bh;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* If we issued read requests, let them complete.
|
||||
*/
|
||||
while (wait_bh > wait) {
|
||||
wait_on_buffer(*--wait_bh);
|
||||
if (!buffer_uptodate(*wait_bh))
|
||||
for (i = 0; i < nr_wait; i++) {
|
||||
wait_on_buffer(wait[i]);
|
||||
if (!buffer_uptodate(wait[i]))
|
||||
err = -EIO;
|
||||
}
|
||||
if (unlikely(err))
|
||||
if (unlikely(err)) {
|
||||
page_zero_new_buffers(page, from, to);
|
||||
else if (decrypt)
|
||||
err = fscrypt_decrypt_page(page->mapping->host, page,
|
||||
PAGE_SIZE, 0, page->index);
|
||||
} else if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
|
||||
for (i = 0; i < nr_wait; i++) {
|
||||
int err2;
|
||||
|
||||
err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
|
||||
bh_offset(wait[i]));
|
||||
if (err2) {
|
||||
clear_buffer_uptodate(wait[i]);
|
||||
err = err2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
@ -4065,9 +4075,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
|
|||
if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
|
||||
/* We expect the key to be set. */
|
||||
BUG_ON(!fscrypt_has_encryption_key(inode));
|
||||
BUG_ON(blocksize != PAGE_SIZE);
|
||||
WARN_ON_ONCE(fscrypt_decrypt_page(page->mapping->host,
|
||||
page, PAGE_SIZE, 0, page->index));
|
||||
WARN_ON_ONCE(fscrypt_decrypt_pagecache_blocks(
|
||||
page, blocksize, bh_offset(bh)));
|
||||
}
|
||||
}
|
||||
if (ext4_should_journal_data(inode)) {
|
||||
|
|
|
@ -66,9 +66,7 @@ static void ext4_finish_bio(struct bio *bio)
|
|||
|
||||
bio_for_each_segment_all(bvec, bio, iter_all) {
|
||||
struct page *page = bvec->bv_page;
|
||||
#ifdef CONFIG_FS_ENCRYPTION
|
||||
struct page *data_page = NULL;
|
||||
#endif
|
||||
struct page *bounce_page = NULL;
|
||||
struct buffer_head *bh, *head;
|
||||
unsigned bio_start = bvec->bv_offset;
|
||||
unsigned bio_end = bio_start + bvec->bv_len;
|
||||
|
@ -78,13 +76,10 @@ static void ext4_finish_bio(struct bio *bio)
|
|||
if (!page)
|
||||
continue;
|
||||
|
||||
#ifdef CONFIG_FS_ENCRYPTION
|
||||
if (!page->mapping) {
|
||||
/* The bounce data pages are unmapped. */
|
||||
data_page = page;
|
||||
fscrypt_pullback_bio_page(&page, false);
|
||||
if (fscrypt_is_bounce_page(page)) {
|
||||
bounce_page = page;
|
||||
page = fscrypt_pagecache_page(bounce_page);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (bio->bi_status) {
|
||||
SetPageError(page);
|
||||
|
@ -111,10 +106,7 @@ static void ext4_finish_bio(struct bio *bio)
|
|||
bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
|
||||
local_irq_restore(flags);
|
||||
if (!under_io) {
|
||||
#ifdef CONFIG_FS_ENCRYPTION
|
||||
if (data_page)
|
||||
fscrypt_restore_control_page(data_page);
|
||||
#endif
|
||||
fscrypt_free_bounce_page(bounce_page);
|
||||
end_page_writeback(page);
|
||||
}
|
||||
}
|
||||
|
@ -415,7 +407,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
|
|||
struct writeback_control *wbc,
|
||||
bool keep_towrite)
|
||||
{
|
||||
struct page *data_page = NULL;
|
||||
struct page *bounce_page = NULL;
|
||||
struct inode *inode = page->mapping->host;
|
||||
unsigned block_start;
|
||||
struct buffer_head *bh, *head;
|
||||
|
@ -475,14 +467,22 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
|
|||
|
||||
bh = head = page_buffers(page);
|
||||
|
||||
/*
|
||||
* If any blocks are being written to an encrypted file, encrypt them
|
||||
* into a bounce page. For simplicity, just encrypt until the last
|
||||
* block which might be needed. This may cause some unneeded blocks
|
||||
* (e.g. holes) to be unnecessarily encrypted, but this is rare and
|
||||
* can't happen in the common case of blocksize == PAGE_SIZE.
|
||||
*/
|
||||
if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && nr_to_submit) {
|
||||
gfp_t gfp_flags = GFP_NOFS;
|
||||
unsigned int enc_bytes = round_up(len, i_blocksize(inode));
|
||||
|
||||
retry_encrypt:
|
||||
data_page = fscrypt_encrypt_page(inode, page, PAGE_SIZE, 0,
|
||||
page->index, gfp_flags);
|
||||
if (IS_ERR(data_page)) {
|
||||
ret = PTR_ERR(data_page);
|
||||
bounce_page = fscrypt_encrypt_pagecache_blocks(page, enc_bytes,
|
||||
0, gfp_flags);
|
||||
if (IS_ERR(bounce_page)) {
|
||||
ret = PTR_ERR(bounce_page);
|
||||
if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
|
||||
if (io->io_bio) {
|
||||
ext4_io_submit(io);
|
||||
|
@ -491,7 +491,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
|
|||
gfp_flags |= __GFP_NOFAIL;
|
||||
goto retry_encrypt;
|
||||
}
|
||||
data_page = NULL;
|
||||
bounce_page = NULL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -500,8 +500,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
|
|||
do {
|
||||
if (!buffer_async_write(bh))
|
||||
continue;
|
||||
ret = io_submit_add_bh(io, inode,
|
||||
data_page ? data_page : page, bh);
|
||||
ret = io_submit_add_bh(io, inode, bounce_page ?: page, bh);
|
||||
if (ret) {
|
||||
/*
|
||||
* We only get here on ENOMEM. Not much else
|
||||
|
@ -517,8 +516,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
|
|||
/* Error stopped previous loop? Clean up buffers... */
|
||||
if (ret) {
|
||||
out:
|
||||
if (data_page)
|
||||
fscrypt_restore_control_page(data_page);
|
||||
fscrypt_free_bounce_page(bounce_page);
|
||||
printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
|
||||
redirty_page_for_writepage(wbc, page);
|
||||
do {
|
||||
|
|
|
@ -185,7 +185,7 @@ static void f2fs_write_end_io(struct bio *bio)
|
|||
continue;
|
||||
}
|
||||
|
||||
fscrypt_pullback_bio_page(&page, true);
|
||||
fscrypt_finalize_bounce_page(&page);
|
||||
|
||||
if (unlikely(bio->bi_status)) {
|
||||
mapping_set_error(page->mapping, -EIO);
|
||||
|
@ -362,10 +362,9 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
|
|||
|
||||
bio_for_each_segment_all(bvec, io->bio, iter_all) {
|
||||
|
||||
if (bvec->bv_page->mapping)
|
||||
target = bvec->bv_page;
|
||||
else
|
||||
target = fscrypt_control_page(bvec->bv_page);
|
||||
target = bvec->bv_page;
|
||||
if (fscrypt_is_bounce_page(target))
|
||||
target = fscrypt_pagecache_page(target);
|
||||
|
||||
if (inode && inode == target->mapping->host)
|
||||
return true;
|
||||
|
@ -1727,8 +1726,9 @@ static int encrypt_one_page(struct f2fs_io_info *fio)
|
|||
f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
|
||||
|
||||
retry_encrypt:
|
||||
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
|
||||
PAGE_SIZE, 0, fio->page->index, gfp_flags);
|
||||
fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(fio->page,
|
||||
PAGE_SIZE, 0,
|
||||
gfp_flags);
|
||||
if (IS_ERR(fio->encrypted_page)) {
|
||||
/* flush pending IOs and wait for a while in the ENOMEM case */
|
||||
if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
|
||||
|
@ -1900,8 +1900,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
|
|||
err = f2fs_inplace_write_data(fio);
|
||||
if (err) {
|
||||
if (f2fs_encrypted_file(inode))
|
||||
fscrypt_pullback_bio_page(&fio->encrypted_page,
|
||||
true);
|
||||
fscrypt_finalize_bounce_page(&fio->encrypted_page);
|
||||
if (PageWriteback(page))
|
||||
end_page_writeback(page);
|
||||
} else {
|
||||
|
|
|
@ -29,8 +29,8 @@ int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn,
|
|||
{
|
||||
struct ubifs_info *c = inode->i_sb->s_fs_info;
|
||||
void *p = &dn->data;
|
||||
struct page *ret;
|
||||
unsigned int pad_len = round_up(in_len, UBIFS_CIPHER_BLOCK_SIZE);
|
||||
int err;
|
||||
|
||||
ubifs_assert(c, pad_len <= *out_len);
|
||||
dn->compr_size = cpu_to_le16(in_len);
|
||||
|
@ -39,11 +39,11 @@ int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn,
|
|||
if (pad_len != in_len)
|
||||
memset(p + in_len, 0, pad_len - in_len);
|
||||
|
||||
ret = fscrypt_encrypt_page(inode, virt_to_page(&dn->data), pad_len,
|
||||
offset_in_page(&dn->data), block, GFP_NOFS);
|
||||
if (IS_ERR(ret)) {
|
||||
ubifs_err(c, "fscrypt_encrypt_page failed: %ld", PTR_ERR(ret));
|
||||
return PTR_ERR(ret);
|
||||
err = fscrypt_encrypt_block_inplace(inode, virt_to_page(p), pad_len,
|
||||
offset_in_page(p), block, GFP_NOFS);
|
||||
if (err) {
|
||||
ubifs_err(c, "fscrypt_encrypt_block_inplace() failed: %d", err);
|
||||
return err;
|
||||
}
|
||||
*out_len = pad_len;
|
||||
|
||||
|
@ -64,10 +64,11 @@ int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn,
|
|||
}
|
||||
|
||||
ubifs_assert(c, dlen <= UBIFS_BLOCK_SIZE);
|
||||
err = fscrypt_decrypt_page(inode, virt_to_page(&dn->data), dlen,
|
||||
offset_in_page(&dn->data), block);
|
||||
err = fscrypt_decrypt_block_inplace(inode, virt_to_page(&dn->data),
|
||||
dlen, offset_in_page(&dn->data),
|
||||
block);
|
||||
if (err) {
|
||||
ubifs_err(c, "fscrypt_decrypt_page failed: %i", err);
|
||||
ubifs_err(c, "fscrypt_decrypt_block_inplace() failed: %d", err);
|
||||
return err;
|
||||
}
|
||||
*out_len = clen;
|
||||
|
|
|
@ -63,16 +63,13 @@ struct fscrypt_operations {
|
|||
unsigned int max_namelen;
|
||||
};
|
||||
|
||||
/* Decryption work */
|
||||
struct fscrypt_ctx {
|
||||
union {
|
||||
struct {
|
||||
struct page *bounce_page; /* Ciphertext page */
|
||||
struct page *control_page; /* Original page */
|
||||
} w;
|
||||
struct {
|
||||
struct bio *bio;
|
||||
struct work_struct work;
|
||||
} r;
|
||||
};
|
||||
struct list_head free_list; /* Free list */
|
||||
};
|
||||
u8 flags; /* Flags */
|
||||
|
@ -106,18 +103,33 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry)
|
|||
extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
|
||||
extern struct fscrypt_ctx *fscrypt_get_ctx(gfp_t);
|
||||
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
|
||||
extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
|
||||
unsigned int, unsigned int,
|
||||
u64, gfp_t);
|
||||
extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
|
||||
unsigned int, u64);
|
||||
|
||||
static inline struct page *fscrypt_control_page(struct page *page)
|
||||
extern struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
|
||||
unsigned int len,
|
||||
unsigned int offs,
|
||||
gfp_t gfp_flags);
|
||||
extern int fscrypt_encrypt_block_inplace(const struct inode *inode,
|
||||
struct page *page, unsigned int len,
|
||||
unsigned int offs, u64 lblk_num,
|
||||
gfp_t gfp_flags);
|
||||
|
||||
extern int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
|
||||
unsigned int offs);
|
||||
extern int fscrypt_decrypt_block_inplace(const struct inode *inode,
|
||||
struct page *page, unsigned int len,
|
||||
unsigned int offs, u64 lblk_num);
|
||||
|
||||
static inline bool fscrypt_is_bounce_page(struct page *page)
|
||||
{
|
||||
return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
|
||||
return page->mapping == NULL;
|
||||
}
|
||||
|
||||
extern void fscrypt_restore_control_page(struct page *);
|
||||
static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
|
||||
{
|
||||
return (struct page *)page_private(bounce_page);
|
||||
}
|
||||
|
||||
extern void fscrypt_free_bounce_page(struct page *bounce_page);
|
||||
|
||||
/* policy.c */
|
||||
extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
|
||||
|
@ -223,7 +235,6 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
|
|||
extern void fscrypt_decrypt_bio(struct bio *);
|
||||
extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
|
||||
struct bio *bio);
|
||||
extern void fscrypt_pullback_bio_page(struct page **, bool);
|
||||
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
|
||||
unsigned int);
|
||||
|
||||
|
@ -283,32 +294,51 @@ static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
|
|||
return;
|
||||
}
|
||||
|
||||
static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
|
||||
struct page *page,
|
||||
unsigned int len,
|
||||
unsigned int offs,
|
||||
u64 lblk_num, gfp_t gfp_flags)
|
||||
static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
|
||||
unsigned int len,
|
||||
unsigned int offs,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
static inline int fscrypt_decrypt_page(const struct inode *inode,
|
||||
struct page *page,
|
||||
unsigned int len, unsigned int offs,
|
||||
u64 lblk_num)
|
||||
static inline int fscrypt_encrypt_block_inplace(const struct inode *inode,
|
||||
struct page *page,
|
||||
unsigned int len,
|
||||
unsigned int offs, u64 lblk_num,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline struct page *fscrypt_control_page(struct page *page)
|
||||
static inline int fscrypt_decrypt_pagecache_blocks(struct page *page,
|
||||
unsigned int len,
|
||||
unsigned int offs)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int fscrypt_decrypt_block_inplace(const struct inode *inode,
|
||||
struct page *page,
|
||||
unsigned int len,
|
||||
unsigned int offs, u64 lblk_num)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline bool fscrypt_is_bounce_page(struct page *page)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
|
||||
{
|
||||
WARN_ON_ONCE(1);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static inline void fscrypt_restore_control_page(struct page *page)
|
||||
static inline void fscrypt_free_bounce_page(struct page *bounce_page)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
/* policy.c */
|
||||
|
@ -410,11 +440,6 @@ static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
|
|||
{
|
||||
}
|
||||
|
||||
static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
|
||||
sector_t pblk, unsigned int len)
|
||||
{
|
||||
|
@ -692,4 +717,15 @@ static inline int fscrypt_encrypt_symlink(struct inode *inode,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* If *pagep is a bounce page, free it and set *pagep to the pagecache page */
|
||||
static inline void fscrypt_finalize_bounce_page(struct page **pagep)
|
||||
{
|
||||
struct page *page = *pagep;
|
||||
|
||||
if (fscrypt_is_bounce_page(page)) {
|
||||
*pagep = fscrypt_pagecache_page(page);
|
||||
fscrypt_free_bounce_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* _LINUX_FSCRYPT_H */
|
||||
|
|
Loading…
Reference in New Issue
Block a user