d3ec10aa95
A lockdep circular locking dependency report was seen when running a keyutils test: [12537.027242] ====================================================== [12537.059309] WARNING: possible circular locking dependency detected [12537.088148] 4.18.0-147.7.1.el8_1.x86_64+debug #1 Tainted: G OE --------- - - [12537.125253] ------------------------------------------------------ [12537.153189] keyctl/25598 is trying to acquire lock: [12537.175087] 000000007c39f96c (&mm->mmap_sem){++++}, at: __might_fault+0xc4/0x1b0 [12537.208365] [12537.208365] but task is already holding lock: [12537.234507] 000000003de5b58d (&type->lock_class){++++}, at: keyctl_read_key+0x15a/0x220 [12537.270476] [12537.270476] which lock already depends on the new lock. [12537.270476] [12537.307209] [12537.307209] the existing dependency chain (in reverse order) is: [12537.340754] [12537.340754] -> #3 (&type->lock_class){++++}: [12537.367434] down_write+0x4d/0x110 [12537.385202] __key_link_begin+0x87/0x280 [12537.405232] request_key_and_link+0x483/0xf70 [12537.427221] request_key+0x3c/0x80 [12537.444839] dns_query+0x1db/0x5a5 [dns_resolver] [12537.468445] dns_resolve_server_name_to_ip+0x1e1/0x4d0 [cifs] [12537.496731] cifs_reconnect+0xe04/0x2500 [cifs] [12537.519418] cifs_readv_from_socket+0x461/0x690 [cifs] [12537.546263] cifs_read_from_socket+0xa0/0xe0 [cifs] [12537.573551] cifs_demultiplex_thread+0x311/0x2db0 [cifs] [12537.601045] kthread+0x30c/0x3d0 [12537.617906] ret_from_fork+0x3a/0x50 [12537.636225] [12537.636225] -> #2 (root_key_user.cons_lock){+.+.}: [12537.664525] __mutex_lock+0x105/0x11f0 [12537.683734] request_key_and_link+0x35a/0xf70 [12537.705640] request_key+0x3c/0x80 [12537.723304] dns_query+0x1db/0x5a5 [dns_resolver] [12537.746773] dns_resolve_server_name_to_ip+0x1e1/0x4d0 [cifs] [12537.775607] cifs_reconnect+0xe04/0x2500 [cifs] [12537.798322] cifs_readv_from_socket+0x461/0x690 [cifs] [12537.823369] cifs_read_from_socket+0xa0/0xe0 [cifs] [12537.847262] cifs_demultiplex_thread+0x311/0x2db0 [cifs] [12537.873477] kthread+0x30c/0x3d0 [12537.890281] ret_from_fork+0x3a/0x50 [12537.908649] [12537.908649] -> #1 (&tcp_ses->srv_mutex){+.+.}: [12537.935225] __mutex_lock+0x105/0x11f0 [12537.954450] cifs_call_async+0x102/0x7f0 [cifs] [12537.977250] smb2_async_readv+0x6c3/0xc90 [cifs] [12538.000659] cifs_readpages+0x120a/0x1e50 [cifs] [12538.023920] read_pages+0xf5/0x560 [12538.041583] __do_page_cache_readahead+0x41d/0x4b0 [12538.067047] ondemand_readahead+0x44c/0xc10 [12538.092069] filemap_fault+0xec1/0x1830 [12538.111637] __do_fault+0x82/0x260 [12538.129216] do_fault+0x419/0xfb0 [12538.146390] __handle_mm_fault+0x862/0xdf0 [12538.167408] handle_mm_fault+0x154/0x550 [12538.187401] __do_page_fault+0x42f/0xa60 [12538.207395] do_page_fault+0x38/0x5e0 [12538.225777] page_fault+0x1e/0x30 [12538.243010] [12538.243010] -> #0 (&mm->mmap_sem){++++}: [12538.267875] lock_acquire+0x14c/0x420 [12538.286848] __might_fault+0x119/0x1b0 [12538.306006] keyring_read_iterator+0x7e/0x170 [12538.327936] assoc_array_subtree_iterate+0x97/0x280 [12538.352154] keyring_read+0xe9/0x110 [12538.370558] keyctl_read_key+0x1b9/0x220 [12538.391470] do_syscall_64+0xa5/0x4b0 [12538.410511] entry_SYSCALL_64_after_hwframe+0x6a/0xdf [12538.435535] [12538.435535] other info that might help us debug this: [12538.435535] [12538.472829] Chain exists of: [12538.472829] &mm->mmap_sem --> root_key_user.cons_lock --> &type->lock_class [12538.472829] [12538.524820] Possible unsafe locking scenario: [12538.524820] [12538.551431] CPU0 CPU1 [12538.572654] ---- ---- [12538.595865] lock(&type->lock_class); [12538.613737] lock(root_key_user.cons_lock); [12538.644234] lock(&type->lock_class); [12538.672410] lock(&mm->mmap_sem); [12538.687758] [12538.687758] *** DEADLOCK *** [12538.687758] [12538.714455] 1 lock held by keyctl/25598: [12538.732097] #0: 000000003de5b58d (&type->lock_class){++++}, at: keyctl_read_key+0x15a/0x220 [12538.770573] [12538.770573] stack backtrace: [12538.790136] CPU: 2 PID: 25598 Comm: keyctl Kdump: loaded Tainted: G [12538.844855] Hardware name: HP ProLiant DL360 Gen9/ProLiant DL360 Gen9, BIOS P89 12/27/2015 [12538.881963] Call Trace: [12538.892897] dump_stack+0x9a/0xf0 [12538.907908] print_circular_bug.isra.25.cold.50+0x1bc/0x279 [12538.932891] ? save_trace+0xd6/0x250 [12538.948979] check_prev_add.constprop.32+0xc36/0x14f0 [12538.971643] ? keyring_compare_object+0x104/0x190 [12538.992738] ? check_usage+0x550/0x550 [12539.009845] ? sched_clock+0x5/0x10 [12539.025484] ? sched_clock_cpu+0x18/0x1e0 [12539.043555] __lock_acquire+0x1f12/0x38d0 [12539.061551] ? trace_hardirqs_on+0x10/0x10 [12539.080554] lock_acquire+0x14c/0x420 [12539.100330] ? __might_fault+0xc4/0x1b0 [12539.119079] __might_fault+0x119/0x1b0 [12539.135869] ? __might_fault+0xc4/0x1b0 [12539.153234] keyring_read_iterator+0x7e/0x170 [12539.172787] ? keyring_read+0x110/0x110 [12539.190059] assoc_array_subtree_iterate+0x97/0x280 [12539.211526] keyring_read+0xe9/0x110 [12539.227561] ? keyring_gc_check_iterator+0xc0/0xc0 [12539.249076] keyctl_read_key+0x1b9/0x220 [12539.266660] do_syscall_64+0xa5/0x4b0 [12539.283091] entry_SYSCALL_64_after_hwframe+0x6a/0xdf One way to prevent this deadlock scenario from happening is to not allow writing to userspace while holding the key semaphore. Instead, an internal buffer is allocated for getting the keys out from the read method first before copying them out to userspace without holding the lock. That requires taking out the __user modifier from all the relevant read methods as well as additional changes to not use any userspace write helpers. That is, 1) The put_user() call is replaced by a direct copy. 2) The copy_to_user() call is replaced by memcpy(). 3) All the fault handling code is removed. Compiling on a x86-64 system, the size of the rxrpc_read() function is reduced from 3795 bytes to 2384 bytes with this patch. Fixes: ^1da177e4c3f4 ("Linux-2.6.12-rc2") Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com> Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: David Howells <dhowells@redhat.com>
450 lines
10 KiB
C
450 lines
10 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/* Large capacity key type
|
|
*
|
|
* Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
|
|
* Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "big_key: "fmt
|
|
#include <linux/init.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/file.h>
|
|
#include <linux/shmem_fs.h>
|
|
#include <linux/err.h>
|
|
#include <linux/scatterlist.h>
|
|
#include <linux/random.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <keys/user-type.h>
|
|
#include <keys/big_key-type.h>
|
|
#include <crypto/aead.h>
|
|
#include <crypto/gcm.h>
|
|
|
|
struct big_key_buf {
|
|
unsigned int nr_pages;
|
|
void *virt;
|
|
struct scatterlist *sg;
|
|
struct page *pages[];
|
|
};
|
|
|
|
/*
|
|
* Layout of key payload words.
|
|
*/
|
|
enum {
|
|
big_key_data,
|
|
big_key_path,
|
|
big_key_path_2nd_part,
|
|
big_key_len,
|
|
};
|
|
|
|
/*
|
|
* Crypto operation with big_key data
|
|
*/
|
|
enum big_key_op {
|
|
BIG_KEY_ENC,
|
|
BIG_KEY_DEC,
|
|
};
|
|
|
|
/*
|
|
* If the data is under this limit, there's no point creating a shm file to
|
|
* hold it as the permanently resident metadata for the shmem fs will be at
|
|
* least as large as the data.
|
|
*/
|
|
#define BIG_KEY_FILE_THRESHOLD (sizeof(struct inode) + sizeof(struct dentry))
|
|
|
|
/*
|
|
* Key size for big_key data encryption
|
|
*/
|
|
#define ENC_KEY_SIZE 32
|
|
|
|
/*
|
|
* Authentication tag length
|
|
*/
|
|
#define ENC_AUTHTAG_SIZE 16
|
|
|
|
/*
|
|
* big_key defined keys take an arbitrary string as the description and an
|
|
* arbitrary blob of data as the payload
|
|
*/
|
|
struct key_type key_type_big_key = {
|
|
.name = "big_key",
|
|
.preparse = big_key_preparse,
|
|
.free_preparse = big_key_free_preparse,
|
|
.instantiate = generic_key_instantiate,
|
|
.revoke = big_key_revoke,
|
|
.destroy = big_key_destroy,
|
|
.describe = big_key_describe,
|
|
.read = big_key_read,
|
|
/* no ->update(); don't add it without changing big_key_crypt() nonce */
|
|
};
|
|
|
|
/*
|
|
* Crypto names for big_key data authenticated encryption
|
|
*/
|
|
static const char big_key_alg_name[] = "gcm(aes)";
|
|
#define BIG_KEY_IV_SIZE GCM_AES_IV_SIZE
|
|
|
|
/*
|
|
* Crypto algorithms for big_key data authenticated encryption
|
|
*/
|
|
static struct crypto_aead *big_key_aead;
|
|
|
|
/*
|
|
* Since changing the key affects the entire object, we need a mutex.
|
|
*/
|
|
static DEFINE_MUTEX(big_key_aead_lock);
|
|
|
|
/*
|
|
* Encrypt/decrypt big_key data
|
|
*/
|
|
static int big_key_crypt(enum big_key_op op, struct big_key_buf *buf, size_t datalen, u8 *key)
|
|
{
|
|
int ret;
|
|
struct aead_request *aead_req;
|
|
/* We always use a zero nonce. The reason we can get away with this is
|
|
* because we're using a different randomly generated key for every
|
|
* different encryption. Notably, too, key_type_big_key doesn't define
|
|
* an .update function, so there's no chance we'll wind up reusing the
|
|
* key to encrypt updated data. Simply put: one key, one encryption.
|
|
*/
|
|
u8 zero_nonce[BIG_KEY_IV_SIZE];
|
|
|
|
aead_req = aead_request_alloc(big_key_aead, GFP_KERNEL);
|
|
if (!aead_req)
|
|
return -ENOMEM;
|
|
|
|
memset(zero_nonce, 0, sizeof(zero_nonce));
|
|
aead_request_set_crypt(aead_req, buf->sg, buf->sg, datalen, zero_nonce);
|
|
aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
|
|
aead_request_set_ad(aead_req, 0);
|
|
|
|
mutex_lock(&big_key_aead_lock);
|
|
if (crypto_aead_setkey(big_key_aead, key, ENC_KEY_SIZE)) {
|
|
ret = -EAGAIN;
|
|
goto error;
|
|
}
|
|
if (op == BIG_KEY_ENC)
|
|
ret = crypto_aead_encrypt(aead_req);
|
|
else
|
|
ret = crypto_aead_decrypt(aead_req);
|
|
error:
|
|
mutex_unlock(&big_key_aead_lock);
|
|
aead_request_free(aead_req);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Free up the buffer.
|
|
*/
|
|
static void big_key_free_buffer(struct big_key_buf *buf)
|
|
{
|
|
unsigned int i;
|
|
|
|
if (buf->virt) {
|
|
memset(buf->virt, 0, buf->nr_pages * PAGE_SIZE);
|
|
vunmap(buf->virt);
|
|
}
|
|
|
|
for (i = 0; i < buf->nr_pages; i++)
|
|
if (buf->pages[i])
|
|
__free_page(buf->pages[i]);
|
|
|
|
kfree(buf);
|
|
}
|
|
|
|
/*
|
|
* Allocate a buffer consisting of a set of pages with a virtual mapping
|
|
* applied over them.
|
|
*/
|
|
static void *big_key_alloc_buffer(size_t len)
|
|
{
|
|
struct big_key_buf *buf;
|
|
unsigned int npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
unsigned int i, l;
|
|
|
|
buf = kzalloc(sizeof(struct big_key_buf) +
|
|
sizeof(struct page) * npg +
|
|
sizeof(struct scatterlist) * npg,
|
|
GFP_KERNEL);
|
|
if (!buf)
|
|
return NULL;
|
|
|
|
buf->nr_pages = npg;
|
|
buf->sg = (void *)(buf->pages + npg);
|
|
sg_init_table(buf->sg, npg);
|
|
|
|
for (i = 0; i < buf->nr_pages; i++) {
|
|
buf->pages[i] = alloc_page(GFP_KERNEL);
|
|
if (!buf->pages[i])
|
|
goto nomem;
|
|
|
|
l = min_t(size_t, len, PAGE_SIZE);
|
|
sg_set_page(&buf->sg[i], buf->pages[i], l, 0);
|
|
len -= l;
|
|
}
|
|
|
|
buf->virt = vmap(buf->pages, buf->nr_pages, VM_MAP, PAGE_KERNEL);
|
|
if (!buf->virt)
|
|
goto nomem;
|
|
|
|
return buf;
|
|
|
|
nomem:
|
|
big_key_free_buffer(buf);
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* Preparse a big key
|
|
*/
|
|
int big_key_preparse(struct key_preparsed_payload *prep)
|
|
{
|
|
struct big_key_buf *buf;
|
|
struct path *path = (struct path *)&prep->payload.data[big_key_path];
|
|
struct file *file;
|
|
u8 *enckey;
|
|
ssize_t written;
|
|
size_t datalen = prep->datalen, enclen = datalen + ENC_AUTHTAG_SIZE;
|
|
int ret;
|
|
|
|
if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
|
|
return -EINVAL;
|
|
|
|
/* Set an arbitrary quota */
|
|
prep->quotalen = 16;
|
|
|
|
prep->payload.data[big_key_len] = (void *)(unsigned long)datalen;
|
|
|
|
if (datalen > BIG_KEY_FILE_THRESHOLD) {
|
|
/* Create a shmem file to store the data in. This will permit the data
|
|
* to be swapped out if needed.
|
|
*
|
|
* File content is stored encrypted with randomly generated key.
|
|
*/
|
|
loff_t pos = 0;
|
|
|
|
buf = big_key_alloc_buffer(enclen);
|
|
if (!buf)
|
|
return -ENOMEM;
|
|
memcpy(buf->virt, prep->data, datalen);
|
|
|
|
/* generate random key */
|
|
enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);
|
|
if (!enckey) {
|
|
ret = -ENOMEM;
|
|
goto error;
|
|
}
|
|
ret = get_random_bytes_wait(enckey, ENC_KEY_SIZE);
|
|
if (unlikely(ret))
|
|
goto err_enckey;
|
|
|
|
/* encrypt aligned data */
|
|
ret = big_key_crypt(BIG_KEY_ENC, buf, datalen, enckey);
|
|
if (ret)
|
|
goto err_enckey;
|
|
|
|
/* save aligned data to file */
|
|
file = shmem_kernel_file_setup("", enclen, 0);
|
|
if (IS_ERR(file)) {
|
|
ret = PTR_ERR(file);
|
|
goto err_enckey;
|
|
}
|
|
|
|
written = kernel_write(file, buf->virt, enclen, &pos);
|
|
if (written != enclen) {
|
|
ret = written;
|
|
if (written >= 0)
|
|
ret = -ENOMEM;
|
|
goto err_fput;
|
|
}
|
|
|
|
/* Pin the mount and dentry to the key so that we can open it again
|
|
* later
|
|
*/
|
|
prep->payload.data[big_key_data] = enckey;
|
|
*path = file->f_path;
|
|
path_get(path);
|
|
fput(file);
|
|
big_key_free_buffer(buf);
|
|
} else {
|
|
/* Just store the data in a buffer */
|
|
void *data = kmalloc(datalen, GFP_KERNEL);
|
|
|
|
if (!data)
|
|
return -ENOMEM;
|
|
|
|
prep->payload.data[big_key_data] = data;
|
|
memcpy(data, prep->data, prep->datalen);
|
|
}
|
|
return 0;
|
|
|
|
err_fput:
|
|
fput(file);
|
|
err_enckey:
|
|
kzfree(enckey);
|
|
error:
|
|
big_key_free_buffer(buf);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Clear preparsement.
|
|
*/
|
|
void big_key_free_preparse(struct key_preparsed_payload *prep)
|
|
{
|
|
if (prep->datalen > BIG_KEY_FILE_THRESHOLD) {
|
|
struct path *path = (struct path *)&prep->payload.data[big_key_path];
|
|
|
|
path_put(path);
|
|
}
|
|
kzfree(prep->payload.data[big_key_data]);
|
|
}
|
|
|
|
/*
|
|
* dispose of the links from a revoked keyring
|
|
* - called with the key sem write-locked
|
|
*/
|
|
void big_key_revoke(struct key *key)
|
|
{
|
|
struct path *path = (struct path *)&key->payload.data[big_key_path];
|
|
|
|
/* clear the quota */
|
|
key_payload_reserve(key, 0);
|
|
if (key_is_positive(key) &&
|
|
(size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
|
|
vfs_truncate(path, 0);
|
|
}
|
|
|
|
/*
|
|
* dispose of the data dangling from the corpse of a big_key key
|
|
*/
|
|
void big_key_destroy(struct key *key)
|
|
{
|
|
size_t datalen = (size_t)key->payload.data[big_key_len];
|
|
|
|
if (datalen > BIG_KEY_FILE_THRESHOLD) {
|
|
struct path *path = (struct path *)&key->payload.data[big_key_path];
|
|
|
|
path_put(path);
|
|
path->mnt = NULL;
|
|
path->dentry = NULL;
|
|
}
|
|
kzfree(key->payload.data[big_key_data]);
|
|
key->payload.data[big_key_data] = NULL;
|
|
}
|
|
|
|
/*
|
|
* describe the big_key key
|
|
*/
|
|
void big_key_describe(const struct key *key, struct seq_file *m)
|
|
{
|
|
size_t datalen = (size_t)key->payload.data[big_key_len];
|
|
|
|
seq_puts(m, key->description);
|
|
|
|
if (key_is_positive(key))
|
|
seq_printf(m, ": %zu [%s]",
|
|
datalen,
|
|
datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
|
|
}
|
|
|
|
/*
|
|
* read the key data
|
|
* - the key's semaphore is read-locked
|
|
*/
|
|
long big_key_read(const struct key *key, char *buffer, size_t buflen)
|
|
{
|
|
size_t datalen = (size_t)key->payload.data[big_key_len];
|
|
long ret;
|
|
|
|
if (!buffer || buflen < datalen)
|
|
return datalen;
|
|
|
|
if (datalen > BIG_KEY_FILE_THRESHOLD) {
|
|
struct big_key_buf *buf;
|
|
struct path *path = (struct path *)&key->payload.data[big_key_path];
|
|
struct file *file;
|
|
u8 *enckey = (u8 *)key->payload.data[big_key_data];
|
|
size_t enclen = datalen + ENC_AUTHTAG_SIZE;
|
|
loff_t pos = 0;
|
|
|
|
buf = big_key_alloc_buffer(enclen);
|
|
if (!buf)
|
|
return -ENOMEM;
|
|
|
|
file = dentry_open(path, O_RDONLY, current_cred());
|
|
if (IS_ERR(file)) {
|
|
ret = PTR_ERR(file);
|
|
goto error;
|
|
}
|
|
|
|
/* read file to kernel and decrypt */
|
|
ret = kernel_read(file, buf->virt, enclen, &pos);
|
|
if (ret >= 0 && ret != enclen) {
|
|
ret = -EIO;
|
|
goto err_fput;
|
|
}
|
|
|
|
ret = big_key_crypt(BIG_KEY_DEC, buf, enclen, enckey);
|
|
if (ret)
|
|
goto err_fput;
|
|
|
|
ret = datalen;
|
|
|
|
/* copy out decrypted data */
|
|
memcpy(buffer, buf->virt, datalen);
|
|
|
|
err_fput:
|
|
fput(file);
|
|
error:
|
|
big_key_free_buffer(buf);
|
|
} else {
|
|
ret = datalen;
|
|
memcpy(buffer, key->payload.data[big_key_data], datalen);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Register key type
|
|
*/
|
|
static int __init big_key_init(void)
|
|
{
|
|
int ret;
|
|
|
|
/* init block cipher */
|
|
big_key_aead = crypto_alloc_aead(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
|
|
if (IS_ERR(big_key_aead)) {
|
|
ret = PTR_ERR(big_key_aead);
|
|
pr_err("Can't alloc crypto: %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
if (unlikely(crypto_aead_ivsize(big_key_aead) != BIG_KEY_IV_SIZE)) {
|
|
WARN(1, "big key algorithm changed?");
|
|
ret = -EINVAL;
|
|
goto free_aead;
|
|
}
|
|
|
|
ret = crypto_aead_setauthsize(big_key_aead, ENC_AUTHTAG_SIZE);
|
|
if (ret < 0) {
|
|
pr_err("Can't set crypto auth tag len: %d\n", ret);
|
|
goto free_aead;
|
|
}
|
|
|
|
ret = register_key_type(&key_type_big_key);
|
|
if (ret < 0) {
|
|
pr_err("Can't register type: %d\n", ret);
|
|
goto free_aead;
|
|
}
|
|
|
|
return 0;
|
|
|
|
free_aead:
|
|
crypto_free_aead(big_key_aead);
|
|
return ret;
|
|
}
|
|
|
|
late_initcall(big_key_init);
|