tmp_suning_uos_patched/fs/kernfs/file.c
Chris Wilson e4234a1fc3 kernfs: Move faulting copy_user operations outside of the mutex
A fault in a user provided buffer may lead anywhere, and lockdep warns
that we have a potential deadlock between the mm->mmap_sem and the
kernfs file mutex:

[   82.811702] ======================================================
[   82.811705] [ INFO: possible circular locking dependency detected ]
[   82.811709] 4.5.0-rc4-gfxbench+ #1 Not tainted
[   82.811711] -------------------------------------------------------
[   82.811714] kms_setmode/5859 is trying to acquire lock:
[   82.811717]  (&dev->struct_mutex){+.+.+.}, at: [<ffffffff8150d9c1>] drm_gem_mmap+0x1a1/0x270
[   82.811731]
but task is already holding lock:
[   82.811734]  (&mm->mmap_sem){++++++}, at: [<ffffffff8117b364>] vm_mmap_pgoff+0x44/0xa0
[   82.811745]
which lock already depends on the new lock.

[   82.811749]
the existing dependency chain (in reverse order) is:
[   82.811752]
-> #3 (&mm->mmap_sem){++++++}:
[   82.811761]        [<ffffffff810cc883>] lock_acquire+0xc3/0x1d0
[   82.811766]        [<ffffffff8118bc65>] __might_fault+0x75/0xa0
[   82.811771]        [<ffffffff8124da4a>] kernfs_fop_write+0x8a/0x180
[   82.811787]        [<ffffffff811d1023>] __vfs_write+0x23/0xe0
[   82.811792]        [<ffffffff811d1d74>] vfs_write+0xa4/0x190
[   82.811797]        [<ffffffff811d2c14>] SyS_write+0x44/0xb0
[   82.811801]        [<ffffffff817bb81b>] entry_SYSCALL_64_fastpath+0x16/0x73
[   82.811807]
-> #2 (s_active#6){++++.+}:
[   82.811814]        [<ffffffff810cc883>] lock_acquire+0xc3/0x1d0
[   82.811819]        [<ffffffff8124c070>] __kernfs_remove+0x210/0x2f0
[   82.811823]        [<ffffffff8124d040>] kernfs_remove_by_name_ns+0x40/0xa0
[   82.811828]        [<ffffffff8124e9e0>] sysfs_remove_file_ns+0x10/0x20
[   82.811832]        [<ffffffff815318d4>] device_del+0x124/0x250
[   82.811837]        [<ffffffff81531a19>] device_unregister+0x19/0x60
[   82.811841]        [<ffffffff8153c051>] cpu_cache_sysfs_exit+0x51/0xb0
[   82.811846]        [<ffffffff8153c628>] cacheinfo_cpu_callback+0x38/0x70
[   82.811851]        [<ffffffff8109ae89>] notifier_call_chain+0x39/0xa0
[   82.811856]        [<ffffffff8109aef9>] __raw_notifier_call_chain+0x9/0x10
[   82.811860]        [<ffffffff810786de>] cpu_notify+0x1e/0x40
[   82.811865]        [<ffffffff81078779>] cpu_notify_nofail+0x9/0x20
[   82.811869]        [<ffffffff81078ac3>] _cpu_down+0x233/0x340
[   82.811874]        [<ffffffff81079019>] disable_nonboot_cpus+0xc9/0x350
[   82.811878]        [<ffffffff810d2e11>] suspend_devices_and_enter+0x5a1/0xb50
[   82.811883]        [<ffffffff810d3903>] pm_suspend+0x543/0x8d0
[   82.811888]        [<ffffffff810d1b77>] state_store+0x77/0xe0
[   82.811892]        [<ffffffff813fa68f>] kobj_attr_store+0xf/0x20
[   82.811897]        [<ffffffff8124e740>] sysfs_kf_write+0x40/0x50
[   82.811902]        [<ffffffff8124dafc>] kernfs_fop_write+0x13c/0x180
[   82.811906]        [<ffffffff811d1023>] __vfs_write+0x23/0xe0
[   82.811910]        [<ffffffff811d1d74>] vfs_write+0xa4/0x190
[   82.811914]        [<ffffffff811d2c14>] SyS_write+0x44/0xb0
[   82.811918]        [<ffffffff817bb81b>] entry_SYSCALL_64_fastpath+0x16/0x73
[   82.811923]
-> #1 (cpu_hotplug.lock){+.+.+.}:
[   82.811929]        [<ffffffff810cc883>] lock_acquire+0xc3/0x1d0
[   82.811933]        [<ffffffff817b6f72>] mutex_lock_nested+0x62/0x3b0
[   82.811940]        [<ffffffff810784c1>] get_online_cpus+0x61/0x80
[   82.811944]        [<ffffffff811170eb>] stop_machine+0x1b/0xe0
[   82.811949]        [<ffffffffa0178edd>] gen8_ggtt_insert_entries__BKL+0x2d/0x30 [i915]
[   82.812009]        [<ffffffffa017d3a6>] ggtt_bind_vma+0x46/0x70 [i915]
[   82.812045]        [<ffffffffa017eb70>] i915_vma_bind+0x140/0x290 [i915]
[   82.812081]        [<ffffffffa01862b9>] i915_gem_object_do_pin+0x899/0xb00 [i915]
[   82.812117]        [<ffffffffa0186555>] i915_gem_object_pin+0x35/0x40 [i915]
[   82.812154]        [<ffffffffa019a23e>] intel_init_pipe_control+0xbe/0x210 [i915]
[   82.812192]        [<ffffffffa0197312>] intel_logical_rings_init+0xe2/0xde0 [i915]
[   82.812232]        [<ffffffffa0186fe3>] i915_gem_init+0xf3/0x130 [i915]
[   82.812278]        [<ffffffffa02097ed>] i915_driver_load+0xf2d/0x1770 [i915]
[   82.812318]        [<ffffffff81512474>] drm_dev_register+0xa4/0xb0
[   82.812323]        [<ffffffff8151467e>] drm_get_pci_dev+0xce/0x1e0
[   82.812328]        [<ffffffffa01472cf>] i915_pci_probe+0x2f/0x50 [i915]
[   82.812360]        [<ffffffff8143f907>] pci_device_probe+0x87/0xf0
[   82.812366]        [<ffffffff81535f89>] driver_probe_device+0x229/0x450
[   82.812371]        [<ffffffff81536233>] __driver_attach+0x83/0x90
[   82.812375]        [<ffffffff81533c61>] bus_for_each_dev+0x61/0xa0
[   82.812380]        [<ffffffff81535879>] driver_attach+0x19/0x20
[   82.812384]        [<ffffffff8153535f>] bus_add_driver+0x1ef/0x290
[   82.812388]        [<ffffffff81536e9b>] driver_register+0x5b/0xe0
[   82.812393]        [<ffffffff8143e83b>] __pci_register_driver+0x5b/0x60
[   82.812398]        [<ffffffff81514866>] drm_pci_init+0xd6/0x100
[   82.812402]        [<ffffffffa027c094>] 0xffffffffa027c094
[   82.812406]        [<ffffffff810003de>] do_one_initcall+0xae/0x1d0
[   82.812412]        [<ffffffff811595a0>] do_init_module+0x5b/0x1cb
[   82.812417]        [<ffffffff81106160>] load_module+0x1c20/0x2480
[   82.812422]        [<ffffffff81106bae>] SyS_finit_module+0x7e/0xa0
[   82.812428]        [<ffffffff817bb81b>] entry_SYSCALL_64_fastpath+0x16/0x73
[   82.812433]
-> #0 (&dev->struct_mutex){+.+.+.}:
[   82.812439]        [<ffffffff810cbe59>] __lock_acquire+0x1fc9/0x20f0
[   82.812443]        [<ffffffff810cc883>] lock_acquire+0xc3/0x1d0
[   82.812456]        [<ffffffff8150d9e7>] drm_gem_mmap+0x1c7/0x270
[   82.812460]        [<ffffffff81196a14>] mmap_region+0x334/0x580
[   82.812466]        [<ffffffff81196fc4>] do_mmap+0x364/0x410
[   82.812470]        [<ffffffff8117b38d>] vm_mmap_pgoff+0x6d/0xa0
[   82.812474]        [<ffffffff811950f4>] SyS_mmap_pgoff+0x184/0x220
[   82.812479]        [<ffffffff8100a0fd>] SyS_mmap+0x1d/0x20
[   82.812484]        [<ffffffff817bb81b>] entry_SYSCALL_64_fastpath+0x16/0x73
[   82.812489]
other info that might help us debug this:

[   82.812493] Chain exists of:
  &dev->struct_mutex --> s_active#6 --> &mm->mmap_sem

[   82.812502]  Possible unsafe locking scenario:

[   82.812506]        CPU0                    CPU1
[   82.812508]        ----                    ----
[   82.812510]   lock(&mm->mmap_sem);
[   82.812514]                                lock(s_active#6);
[   82.812519]                                lock(&mm->mmap_sem);
[   82.812522]   lock(&dev->struct_mutex);
[   82.812526]
 *** DEADLOCK ***

[   82.812531] 1 lock held by kms_setmode/5859:
[   82.812533]  #0:  (&mm->mmap_sem){++++++}, at: [<ffffffff8117b364>] vm_mmap_pgoff+0x44/0xa0
[   82.812541]
stack backtrace:
[   82.812547] CPU: 0 PID: 5859 Comm: kms_setmode Not tainted 4.5.0-rc4-gfxbench+ #1
[   82.812550] Hardware name:                  /NUC5CPYB, BIOS PYBSWCEL.86A.0040.2015.0814.1353 08/14/2015
[   82.812553]  0000000000000000 ffff880079407bf0 ffffffff813f8505 ffffffff825fb270
[   82.812560]  ffffffff825c4190 ffff880079407c30 ffffffff810c84ac ffff880079407c90
[   82.812566]  ffff8800797ed328 ffff8800797ecb00 0000000000000001 ffff8800797ed350
[   82.812573] Call Trace:
[   82.812578]  [<ffffffff813f8505>] dump_stack+0x67/0x92
[   82.812582]  [<ffffffff810c84ac>] print_circular_bug+0x1fc/0x310
[   82.812586]  [<ffffffff810cbe59>] __lock_acquire+0x1fc9/0x20f0
[   82.812590]  [<ffffffff810cc883>] lock_acquire+0xc3/0x1d0
[   82.812594]  [<ffffffff8150d9c1>] ? drm_gem_mmap+0x1a1/0x270
[   82.812599]  [<ffffffff8150d9e7>] drm_gem_mmap+0x1c7/0x270
[   82.812603]  [<ffffffff8150d9c1>] ? drm_gem_mmap+0x1a1/0x270
[   82.812608]  [<ffffffff81196a14>] mmap_region+0x334/0x580
[   82.812612]  [<ffffffff81196fc4>] do_mmap+0x364/0x410
[   82.812616]  [<ffffffff8117b38d>] vm_mmap_pgoff+0x6d/0xa0
[   82.812629]  [<ffffffff811950f4>] SyS_mmap_pgoff+0x184/0x220
[   82.812633]  [<ffffffff8100a0fd>] SyS_mmap+0x1d/0x20
[   82.812637]  [<ffffffff817bb81b>] entry_SYSCALL_64_fastpath+0x16/0x73

Highly unlikely though this scenario is, we can avoid the issue entirely
by moving the copy operation from out under the kernfs_get_active()
tracking by assigning the preallocated buffer its own mutex. The
temporary buffer allocation doesn't require mutex locking as it is
entirely local.

The locked section was extended by the addition of the preallocated buf
to speed up md user operations in

commit 2b75869bba
Author: NeilBrown <neilb@suse.de>
Date:   Mon Oct 13 16:41:28 2014 +1100

    sysfs/kernfs: allow attributes to request write buffer be pre-allocated.

Reported-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=94350
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: NeilBrown <neilb@suse.de>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-04-30 10:05:05 -07:00

961 lines
24 KiB
C

/*
* fs/kernfs/file.c - kernfs file implementation
*
* Copyright (c) 2001-3 Patrick Mochel
* Copyright (c) 2007 SUSE Linux Products GmbH
* Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
*
* This file is released under the GPLv2.
*/
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/fsnotify.h>
#include "kernfs-internal.h"
/*
* There's one kernfs_open_file for each open file and one kernfs_open_node
* for each kernfs_node with one or more open files.
*
* kernfs_node->attr.open points to kernfs_open_node. attr.open is
* protected by kernfs_open_node_lock.
*
* filp->private_data points to seq_file whose ->private points to
* kernfs_open_file. kernfs_open_files are chained at
* kernfs_open_node->files, which is protected by kernfs_open_file_mutex.
*/
static DEFINE_SPINLOCK(kernfs_open_node_lock);
static DEFINE_MUTEX(kernfs_open_file_mutex);
struct kernfs_open_node {
atomic_t refcnt;
atomic_t event;
wait_queue_head_t poll;
struct list_head files; /* goes through kernfs_open_file.list */
};
/*
* kernfs_notify() may be called from any context and bounces notifications
* through a work item. To minimize space overhead in kernfs_node, the
* pending queue is implemented as a singly linked list of kernfs_nodes.
* The list is terminated with the self pointer so that whether a
* kernfs_node is on the list or not can be determined by testing the next
* pointer for NULL.
*/
#define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list)
static DEFINE_SPINLOCK(kernfs_notify_lock);
static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL;
static struct kernfs_open_file *kernfs_of(struct file *file)
{
return ((struct seq_file *)file->private_data)->private;
}
/*
* Determine the kernfs_ops for the given kernfs_node. This function must
* be called while holding an active reference.
*/
static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn)
{
if (kn->flags & KERNFS_LOCKDEP)
lockdep_assert_held(kn);
return kn->attr.ops;
}
/*
* As kernfs_seq_stop() is also called after kernfs_seq_start() or
* kernfs_seq_next() failure, it needs to distinguish whether it's stopping
* a seq_file iteration which is fully initialized with an active reference
* or an aborted kernfs_seq_start() due to get_active failure. The
* position pointer is the only context for each seq_file iteration and
* thus the stop condition should be encoded in it. As the return value is
* directly visible to userland, ERR_PTR(-ENODEV) is the only acceptable
* choice to indicate get_active failure.
*
* Unfortunately, this is complicated due to the optional custom seq_file
* operations which may return ERR_PTR(-ENODEV) too. kernfs_seq_stop()
* can't distinguish whether ERR_PTR(-ENODEV) is from get_active failure or
* custom seq_file operations and thus can't decide whether put_active
* should be performed or not only on ERR_PTR(-ENODEV).
*
* This is worked around by factoring out the custom seq_stop() and
* put_active part into kernfs_seq_stop_active(), skipping it from
* kernfs_seq_stop() if ERR_PTR(-ENODEV) while invoking it directly after
* custom seq_file operations fail with ERR_PTR(-ENODEV) - this ensures
* that kernfs_seq_stop_active() is skipped only after get_active failure.
*/
static void kernfs_seq_stop_active(struct seq_file *sf, void *v)
{
struct kernfs_open_file *of = sf->private;
const struct kernfs_ops *ops = kernfs_ops(of->kn);
if (ops->seq_stop)
ops->seq_stop(sf, v);
kernfs_put_active(of->kn);
}
static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
{
struct kernfs_open_file *of = sf->private;
const struct kernfs_ops *ops;
/*
* @of->mutex nests outside active ref and is primarily to ensure that
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
if (!kernfs_get_active(of->kn))
return ERR_PTR(-ENODEV);
ops = kernfs_ops(of->kn);
if (ops->seq_start) {
void *next = ops->seq_start(sf, ppos);
/* see the comment above kernfs_seq_stop_active() */
if (next == ERR_PTR(-ENODEV))
kernfs_seq_stop_active(sf, next);
return next;
} else {
/*
* The same behavior and code as single_open(). Returns
* !NULL if pos is at the beginning; otherwise, NULL.
*/
return NULL + !*ppos;
}
}
static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
{
struct kernfs_open_file *of = sf->private;
const struct kernfs_ops *ops = kernfs_ops(of->kn);
if (ops->seq_next) {
void *next = ops->seq_next(sf, v, ppos);
/* see the comment above kernfs_seq_stop_active() */
if (next == ERR_PTR(-ENODEV))
kernfs_seq_stop_active(sf, next);
return next;
} else {
/*
* The same behavior and code as single_open(), always
* terminate after the initial read.
*/
++*ppos;
return NULL;
}
}
static void kernfs_seq_stop(struct seq_file *sf, void *v)
{
struct kernfs_open_file *of = sf->private;
if (v != ERR_PTR(-ENODEV))
kernfs_seq_stop_active(sf, v);
mutex_unlock(&of->mutex);
}
static int kernfs_seq_show(struct seq_file *sf, void *v)
{
struct kernfs_open_file *of = sf->private;
of->event = atomic_read(&of->kn->attr.open->event);
return of->kn->attr.ops->seq_show(sf, v);
}
static const struct seq_operations kernfs_seq_ops = {
.start = kernfs_seq_start,
.next = kernfs_seq_next,
.stop = kernfs_seq_stop,
.show = kernfs_seq_show,
};
/*
* As reading a bin file can have side-effects, the exact offset and bytes
* specified in read(2) call should be passed to the read callback making
* it difficult to use seq_file. Implement simplistic custom buffering for
* bin files.
*/
static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
char __user *user_buf, size_t count,
loff_t *ppos)
{
ssize_t len = min_t(size_t, count, PAGE_SIZE);
const struct kernfs_ops *ops;
char *buf;
buf = of->prealloc_buf;
if (buf)
mutex_lock(&of->prealloc_mutex);
else
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/*
* @of->mutex nests outside active ref and is used both to ensure that
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
if (!kernfs_get_active(of->kn)) {
len = -ENODEV;
mutex_unlock(&of->mutex);
goto out_free;
}
of->event = atomic_read(&of->kn->attr.open->event);
ops = kernfs_ops(of->kn);
if (ops->read)
len = ops->read(of, buf, len, *ppos);
else
len = -EINVAL;
kernfs_put_active(of->kn);
mutex_unlock(&of->mutex);
if (len < 0)
goto out_free;
if (copy_to_user(user_buf, buf, len)) {
len = -EFAULT;
goto out_free;
}
*ppos += len;
out_free:
if (buf == of->prealloc_buf)
mutex_unlock(&of->prealloc_mutex);
else
kfree(buf);
return len;
}
/**
* kernfs_fop_read - kernfs vfs read callback
* @file: file pointer
* @user_buf: data to write
* @count: number of bytes
* @ppos: starting offset
*/
static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct kernfs_open_file *of = kernfs_of(file);
if (of->kn->flags & KERNFS_HAS_SEQ_SHOW)
return seq_read(file, user_buf, count, ppos);
else
return kernfs_file_direct_read(of, user_buf, count, ppos);
}
/**
* kernfs_fop_write - kernfs vfs write callback
* @file: file pointer
* @user_buf: data to write
* @count: number of bytes
* @ppos: starting offset
*
* Copy data in from userland and pass it to the matching kernfs write
* operation.
*
* There is no easy way for us to know if userspace is only doing a partial
* write, so we don't support them. We expect the entire buffer to come on
* the first write. Hint: if you're writing a value, first read the file,
* modify only the the value you're changing, then write entire buffer
* back.
*/
static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct kernfs_open_file *of = kernfs_of(file);
const struct kernfs_ops *ops;
size_t len;
char *buf;
if (of->atomic_write_len) {
len = count;
if (len > of->atomic_write_len)
return -E2BIG;
} else {
len = min_t(size_t, count, PAGE_SIZE);
}
buf = of->prealloc_buf;
if (buf)
mutex_lock(&of->prealloc_mutex);
else
buf = kmalloc(len + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (copy_from_user(buf, user_buf, len)) {
len = -EFAULT;
goto out_free;
}
buf[len] = '\0'; /* guarantee string termination */
/*
* @of->mutex nests outside active ref and is used both to ensure that
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
if (!kernfs_get_active(of->kn)) {
mutex_unlock(&of->mutex);
len = -ENODEV;
goto out_free;
}
ops = kernfs_ops(of->kn);
if (ops->write)
len = ops->write(of, buf, len, *ppos);
else
len = -EINVAL;
kernfs_put_active(of->kn);
mutex_unlock(&of->mutex);
if (len > 0)
*ppos += len;
out_free:
if (buf == of->prealloc_buf)
mutex_unlock(&of->prealloc_mutex);
else
kfree(buf);
return len;
}
static void kernfs_vma_open(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct kernfs_open_file *of = kernfs_of(file);
if (!of->vm_ops)
return;
if (!kernfs_get_active(of->kn))
return;
if (of->vm_ops->open)
of->vm_ops->open(vma);
kernfs_put_active(of->kn);
}
static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct file *file = vma->vm_file;
struct kernfs_open_file *of = kernfs_of(file);
int ret;
if (!of->vm_ops)
return VM_FAULT_SIGBUS;
if (!kernfs_get_active(of->kn))
return VM_FAULT_SIGBUS;
ret = VM_FAULT_SIGBUS;
if (of->vm_ops->fault)
ret = of->vm_ops->fault(vma, vmf);
kernfs_put_active(of->kn);
return ret;
}
static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
struct file *file = vma->vm_file;
struct kernfs_open_file *of = kernfs_of(file);
int ret;
if (!of->vm_ops)
return VM_FAULT_SIGBUS;
if (!kernfs_get_active(of->kn))
return VM_FAULT_SIGBUS;
ret = 0;
if (of->vm_ops->page_mkwrite)
ret = of->vm_ops->page_mkwrite(vma, vmf);
else
file_update_time(file);
kernfs_put_active(of->kn);
return ret;
}
static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write)
{
struct file *file = vma->vm_file;
struct kernfs_open_file *of = kernfs_of(file);
int ret;
if (!of->vm_ops)
return -EINVAL;
if (!kernfs_get_active(of->kn))
return -EINVAL;
ret = -EINVAL;
if (of->vm_ops->access)
ret = of->vm_ops->access(vma, addr, buf, len, write);
kernfs_put_active(of->kn);
return ret;
}
#ifdef CONFIG_NUMA
static int kernfs_vma_set_policy(struct vm_area_struct *vma,
struct mempolicy *new)
{
struct file *file = vma->vm_file;
struct kernfs_open_file *of = kernfs_of(file);
int ret;
if (!of->vm_ops)
return 0;
if (!kernfs_get_active(of->kn))
return -EINVAL;
ret = 0;
if (of->vm_ops->set_policy)
ret = of->vm_ops->set_policy(vma, new);
kernfs_put_active(of->kn);
return ret;
}
static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
unsigned long addr)
{
struct file *file = vma->vm_file;
struct kernfs_open_file *of = kernfs_of(file);
struct mempolicy *pol;
if (!of->vm_ops)
return vma->vm_policy;
if (!kernfs_get_active(of->kn))
return vma->vm_policy;
pol = vma->vm_policy;
if (of->vm_ops->get_policy)
pol = of->vm_ops->get_policy(vma, addr);
kernfs_put_active(of->kn);
return pol;
}
#endif
static const struct vm_operations_struct kernfs_vm_ops = {
.open = kernfs_vma_open,
.fault = kernfs_vma_fault,
.page_mkwrite = kernfs_vma_page_mkwrite,
.access = kernfs_vma_access,
#ifdef CONFIG_NUMA
.set_policy = kernfs_vma_set_policy,
.get_policy = kernfs_vma_get_policy,
#endif
};
static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
{
struct kernfs_open_file *of = kernfs_of(file);
const struct kernfs_ops *ops;
int rc;
/*
* mmap path and of->mutex are prone to triggering spurious lockdep
* warnings and we don't want to add spurious locking dependency
* between the two. Check whether mmap is actually implemented
* without grabbing @of->mutex by testing HAS_MMAP flag. See the
* comment in kernfs_file_open() for more details.
*/
if (!(of->kn->flags & KERNFS_HAS_MMAP))
return -ENODEV;
mutex_lock(&of->mutex);
rc = -ENODEV;
if (!kernfs_get_active(of->kn))
goto out_unlock;
ops = kernfs_ops(of->kn);
rc = ops->mmap(of, vma);
if (rc)
goto out_put;
/*
* PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
* to satisfy versions of X which crash if the mmap fails: that
* substitutes a new vm_file, and we don't then want bin_vm_ops.
*/
if (vma->vm_file != file)
goto out_put;
rc = -EINVAL;
if (of->mmapped && of->vm_ops != vma->vm_ops)
goto out_put;
/*
* It is not possible to successfully wrap close.
* So error if someone is trying to use close.
*/
rc = -EINVAL;
if (vma->vm_ops && vma->vm_ops->close)
goto out_put;
rc = 0;
of->mmapped = 1;
of->vm_ops = vma->vm_ops;
vma->vm_ops = &kernfs_vm_ops;
out_put:
kernfs_put_active(of->kn);
out_unlock:
mutex_unlock(&of->mutex);
return rc;
}
/**
* kernfs_get_open_node - get or create kernfs_open_node
* @kn: target kernfs_node
* @of: kernfs_open_file for this instance of open
*
* If @kn->attr.open exists, increment its reference count; otherwise,
* create one. @of is chained to the files list.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, -errno on failure.
*/
static int kernfs_get_open_node(struct kernfs_node *kn,
struct kernfs_open_file *of)
{
struct kernfs_open_node *on, *new_on = NULL;
retry:
mutex_lock(&kernfs_open_file_mutex);
spin_lock_irq(&kernfs_open_node_lock);
if (!kn->attr.open && new_on) {
kn->attr.open = new_on;
new_on = NULL;
}
on = kn->attr.open;
if (on) {
atomic_inc(&on->refcnt);
list_add_tail(&of->list, &on->files);
}
spin_unlock_irq(&kernfs_open_node_lock);
mutex_unlock(&kernfs_open_file_mutex);
if (on) {
kfree(new_on);
return 0;
}
/* not there, initialize a new one and retry */
new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
if (!new_on)
return -ENOMEM;
atomic_set(&new_on->refcnt, 0);
atomic_set(&new_on->event, 1);
init_waitqueue_head(&new_on->poll);
INIT_LIST_HEAD(&new_on->files);
goto retry;
}
/**
* kernfs_put_open_node - put kernfs_open_node
* @kn: target kernfs_nodet
* @of: associated kernfs_open_file
*
* Put @kn->attr.open and unlink @of from the files list. If
* reference count reaches zero, disassociate and free it.
*
* LOCKING:
* None.
*/
static void kernfs_put_open_node(struct kernfs_node *kn,
struct kernfs_open_file *of)
{
struct kernfs_open_node *on = kn->attr.open;
unsigned long flags;
mutex_lock(&kernfs_open_file_mutex);
spin_lock_irqsave(&kernfs_open_node_lock, flags);
if (of)
list_del(&of->list);
if (atomic_dec_and_test(&on->refcnt))
kn->attr.open = NULL;
else
on = NULL;
spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
mutex_unlock(&kernfs_open_file_mutex);
kfree(on);
}
static int kernfs_fop_open(struct inode *inode, struct file *file)
{
struct kernfs_node *kn = file->f_path.dentry->d_fsdata;
struct kernfs_root *root = kernfs_root(kn);
const struct kernfs_ops *ops;
struct kernfs_open_file *of;
bool has_read, has_write, has_mmap;
int error = -EACCES;
if (!kernfs_get_active(kn))
return -ENODEV;
ops = kernfs_ops(kn);
has_read = ops->seq_show || ops->read || ops->mmap;
has_write = ops->write || ops->mmap;
has_mmap = ops->mmap;
/* see the flag definition for details */
if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) {
if ((file->f_mode & FMODE_WRITE) &&
(!(inode->i_mode & S_IWUGO) || !has_write))
goto err_out;
if ((file->f_mode & FMODE_READ) &&
(!(inode->i_mode & S_IRUGO) || !has_read))
goto err_out;
}
/* allocate a kernfs_open_file for the file */
error = -ENOMEM;
of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL);
if (!of)
goto err_out;
/*
* The following is done to give a different lockdep key to
* @of->mutex for files which implement mmap. This is a rather
* crude way to avoid false positive lockdep warning around
* mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
* reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
* which mm->mmap_sem nests, while holding @of->mutex. As each
* open file has a separate mutex, it's okay as long as those don't
* happen on the same file. At this point, we can't easily give
* each file a separate locking class. Let's differentiate on
* whether the file has mmap or not for now.
*
* Both paths of the branch look the same. They're supposed to
* look that way and give @of->mutex different static lockdep keys.
*/
if (has_mmap)
mutex_init(&of->mutex);
else
mutex_init(&of->mutex);
of->kn = kn;
of->file = file;
/*
* Write path needs to atomic_write_len outside active reference.
* Cache it in open_file. See kernfs_fop_write() for details.
*/
of->atomic_write_len = ops->atomic_write_len;
error = -EINVAL;
/*
* ->seq_show is incompatible with ->prealloc,
* as seq_read does its own allocation.
* ->read must be used instead.
*/
if (ops->prealloc && ops->seq_show)
goto err_free;
if (ops->prealloc) {
int len = of->atomic_write_len ?: PAGE_SIZE;
of->prealloc_buf = kmalloc(len + 1, GFP_KERNEL);
error = -ENOMEM;
if (!of->prealloc_buf)
goto err_free;
mutex_init(&of->prealloc_mutex);
}
/*
* Always instantiate seq_file even if read access doesn't use
* seq_file or is not requested. This unifies private data access
* and readable regular files are the vast majority anyway.
*/
if (ops->seq_show)
error = seq_open(file, &kernfs_seq_ops);
else
error = seq_open(file, NULL);
if (error)
goto err_free;
((struct seq_file *)file->private_data)->private = of;
/* seq_file clears PWRITE unconditionally, restore it if WRITE */
if (file->f_mode & FMODE_WRITE)
file->f_mode |= FMODE_PWRITE;
/* make sure we have open node struct */
error = kernfs_get_open_node(kn, of);
if (error)
goto err_close;
/* open succeeded, put active references */
kernfs_put_active(kn);
return 0;
err_close:
seq_release(inode, file);
err_free:
kfree(of->prealloc_buf);
kfree(of);
err_out:
kernfs_put_active(kn);
return error;
}
static int kernfs_fop_release(struct inode *inode, struct file *filp)
{
struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
struct kernfs_open_file *of = kernfs_of(filp);
kernfs_put_open_node(kn, of);
seq_release(inode, filp);
kfree(of->prealloc_buf);
kfree(of);
return 0;
}
void kernfs_unmap_bin_file(struct kernfs_node *kn)
{
struct kernfs_open_node *on;
struct kernfs_open_file *of;
if (!(kn->flags & KERNFS_HAS_MMAP))
return;
spin_lock_irq(&kernfs_open_node_lock);
on = kn->attr.open;
if (on)
atomic_inc(&on->refcnt);
spin_unlock_irq(&kernfs_open_node_lock);
if (!on)
return;
mutex_lock(&kernfs_open_file_mutex);
list_for_each_entry(of, &on->files, list) {
struct inode *inode = file_inode(of->file);
unmap_mapping_range(inode->i_mapping, 0, 0, 1);
}
mutex_unlock(&kernfs_open_file_mutex);
kernfs_put_open_node(kn, NULL);
}
/*
* Kernfs attribute files are pollable. The idea is that you read
* the content and then you use 'poll' or 'select' to wait for
* the content to change. When the content changes (assuming the
* manager for the kobject supports notification), poll will
* return POLLERR|POLLPRI, and select will return the fd whether
* it is waiting for read, write, or exceptions.
* Once poll/select indicates that the value has changed, you
* need to close and re-open the file, or seek to 0 and read again.
* Reminder: this only works for attributes which actively support
* it, and it is not possible to test an attribute from userspace
* to see if it supports poll (Neither 'poll' nor 'select' return
* an appropriate error code). When in doubt, set a suitable timeout value.
*/
static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
{
struct kernfs_open_file *of = kernfs_of(filp);
struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
struct kernfs_open_node *on = kn->attr.open;
if (!kernfs_get_active(kn))
goto trigger;
poll_wait(filp, &on->poll, wait);
kernfs_put_active(kn);
if (of->event != atomic_read(&on->event))
goto trigger;
return DEFAULT_POLLMASK;
trigger:
return DEFAULT_POLLMASK|POLLERR|POLLPRI;
}
static void kernfs_notify_workfn(struct work_struct *work)
{
struct kernfs_node *kn;
struct kernfs_open_node *on;
struct kernfs_super_info *info;
repeat:
/* pop one off the notify_list */
spin_lock_irq(&kernfs_notify_lock);
kn = kernfs_notify_list;
if (kn == KERNFS_NOTIFY_EOL) {
spin_unlock_irq(&kernfs_notify_lock);
return;
}
kernfs_notify_list = kn->attr.notify_next;
kn->attr.notify_next = NULL;
spin_unlock_irq(&kernfs_notify_lock);
/* kick poll */
spin_lock_irq(&kernfs_open_node_lock);
on = kn->attr.open;
if (on) {
atomic_inc(&on->event);
wake_up_interruptible(&on->poll);
}
spin_unlock_irq(&kernfs_open_node_lock);
/* kick fsnotify */
mutex_lock(&kernfs_mutex);
list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
struct inode *inode;
struct dentry *dentry;
inode = ilookup(info->sb, kn->ino);
if (!inode)
continue;
dentry = d_find_any_alias(inode);
if (dentry) {
fsnotify_parent(NULL, dentry, FS_MODIFY);
fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
NULL, 0);
dput(dentry);
}
iput(inode);
}
mutex_unlock(&kernfs_mutex);
kernfs_put(kn);
goto repeat;
}
/**
* kernfs_notify - notify a kernfs file
* @kn: file to notify
*
* Notify @kn such that poll(2) on @kn wakes up. Maybe be called from any
* context.
*/
void kernfs_notify(struct kernfs_node *kn)
{
static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn);
unsigned long flags;
if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
return;
spin_lock_irqsave(&kernfs_notify_lock, flags);
if (!kn->attr.notify_next) {
kernfs_get(kn);
kn->attr.notify_next = kernfs_notify_list;
kernfs_notify_list = kn;
schedule_work(&kernfs_notify_work);
}
spin_unlock_irqrestore(&kernfs_notify_lock, flags);
}
EXPORT_SYMBOL_GPL(kernfs_notify);
const struct file_operations kernfs_file_fops = {
.read = kernfs_fop_read,
.write = kernfs_fop_write,
.llseek = generic_file_llseek,
.mmap = kernfs_fop_mmap,
.open = kernfs_fop_open,
.release = kernfs_fop_release,
.poll = kernfs_fop_poll,
};
/**
* __kernfs_create_file - kernfs internal function to create a file
* @parent: directory to create the file in
* @name: name of the file
* @mode: mode of the file
* @size: size of the file
* @ops: kernfs operations for the file
* @priv: private data for the file
* @ns: optional namespace tag of the file
* @key: lockdep key for the file's active_ref, %NULL to disable lockdep
*
* Returns the created node on success, ERR_PTR() value on error.
*/
struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
const char *name,
umode_t mode, loff_t size,
const struct kernfs_ops *ops,
void *priv, const void *ns,
struct lock_class_key *key)
{
struct kernfs_node *kn;
unsigned flags;
int rc;
flags = KERNFS_FILE;
kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, flags);
if (!kn)
return ERR_PTR(-ENOMEM);
kn->attr.ops = ops;
kn->attr.size = size;
kn->ns = ns;
kn->priv = priv;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (key) {
lockdep_init_map(&kn->dep_map, "s_active", key, 0);
kn->flags |= KERNFS_LOCKDEP;
}
#endif
/*
* kn->attr.ops is accesible only while holding active ref. We
* need to know whether some ops are implemented outside active
* ref. Cache their existence in flags.
*/
if (ops->seq_show)
kn->flags |= KERNFS_HAS_SEQ_SHOW;
if (ops->mmap)
kn->flags |= KERNFS_HAS_MMAP;
rc = kernfs_add_one(kn);
if (rc) {
kernfs_put(kn);
return ERR_PTR(rc);
}
return kn;
}