kernel_optimize_test/net/sunrpc/debugfs.c
Linus Torvalds 6860c981b9 NFS client updates for Linux 5.3
Highlights include:
 
 Stable fixes:
 - SUNRPC: Ensure bvecs are re-synced when we re-encode the RPC request
 - Fix an Oops in ff_layout_track_ds_error due to a PTR_ERR() dereference
 - Revert buggy NFS readdirplus optimisation
 - NFSv4: Handle the special Linux file open access mode
 - pnfs: Fix a problem where we gratuitously start doing I/O through the MDS
 
 Features:
 - Allow NFS client to set up multiple TCP connections to the server using
    a new 'nconnect=X' mount option. Queue length is used to balance load.
 - Enhance statistics reporting to report on all transports when using
    multiple connections.
 - Speed up SUNRPC by removing bh-safe spinlocks
 - Add a mechanism to allow NFSv4 to request that containers set a
    unique per-host identifier for when the hostname is not set.
 - Ensure NFSv4 updates the lease_time after a clientid update
 
 Bugfixes and cleanup:
 - Fix use-after-free in rpcrdma_post_recvs
 - Fix a memory leak when nfs_match_client() is interrupted
 - Fix buggy file access checking in NFSv4 open for execute
 - disable unsupported client side deduplication
 - Fix spurious client disconnections
 - Fix occasional RDMA transport deadlock
 - Various RDMA cleanups
 - Various tracepoint fixes
 - Fix the TCP callback channel to guarantee the server can actually send
    the number of callback requests that was negotiated at mount time.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEESQctxSBg8JpV8KqEZwvnipYKAPIFAl0wz/EACgkQZwvnipYK
 APJUrQ/9FpbWTaI/H0BkBf+/iIDAwuGozldDDRkHpHvykvcbNSFPNk5CA4fV/lCq
 rI0tD9ijj0KJhAjS1RWLb9oXMUm/MDIIcZNvsbOiIfGftzGnxAz022KS2JyV+Xk6
 ul7sCQ8QmbQJzfZk+aFZxl5slS9i5nM+Sa//k5SK3JghOlLuYkW50OlmE6ME8rKP
 sLWD2quLh1J5mjG+B9ml4YWozNi0elFN6ZqdzO/SjIhIG7rn2Hkr0glWcEprdiS7
 qHoF6dIn80oR37YNrcbBtEGqBnMnWkpGAGq2Hgf5I2YtO+xfEgMqqs9FNIcahtQG
 SGa1bnpcys9+fG4fnwzgOgYkvGzKdi/bIYc8lhBowl+76z+6VgjwlwT3wRtnWfO9
 288LZDMMfOSYoDkbA9iGtoQRviIsoGMoeH/r5tZe7U1W35h60NGPVhH9sP9/Gc2s
 Va1xBYK6PaIg+UxeghJmCnNK3zANyEI7AtDVvn0wsJuDcgXO5/PAANenc6/Fl6nf
 U9XB4wmgaM2jN5uqLYT3bJAaAPAXTOayVnnd9oDYb87/31sYM6iwzSPkmm5+qIsi
 0Ftks4PSjpjTTrmUvs3CL+ll3Y9bWUd58qlooGwi6u1DnXDQDzbIoRQBezNFJ+Lc
 MahUVY7w9ATyJFJRvbscO7mRS8qTmr4qP91bAOsYQ0P2Yo02L7k=
 =eR7d
 -----END PGP SIGNATURE-----

Merge tag 'nfs-for-5.3-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs

Pull NFS client updates from Trond Myklebust:
 "Highlights include:

  Stable fixes:

   - SUNRPC: Ensure bvecs are re-synced when we re-encode the RPC
     request

   - Fix an Oops in ff_layout_track_ds_error due to a PTR_ERR()
     dereference

   - Revert buggy NFS readdirplus optimisation

   - NFSv4: Handle the special Linux file open access mode

   - pnfs: Fix a problem where we gratuitously start doing I/O through
     the MDS

  Features:

   - Allow NFS client to set up multiple TCP connections to the server
     using a new 'nconnect=X' mount option. Queue length is used to
     balance load.

   - Enhance statistics reporting to report on all transports when using
     multiple connections.

   - Speed up SUNRPC by removing bh-safe spinlocks

   - Add a mechanism to allow NFSv4 to request that containers set a
     unique per-host identifier for when the hostname is not set.

   - Ensure NFSv4 updates the lease_time after a clientid update

  Bugfixes and cleanup:

   - Fix use-after-free in rpcrdma_post_recvs

   - Fix a memory leak when nfs_match_client() is interrupted

   - Fix buggy file access checking in NFSv4 open for execute

   - disable unsupported client side deduplication

   - Fix spurious client disconnections

   - Fix occasional RDMA transport deadlock

   - Various RDMA cleanups

   - Various tracepoint fixes

   - Fix the TCP callback channel to guarantee the server can actually
     send the number of callback requests that was negotiated at mount
     time"

* tag 'nfs-for-5.3-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: (68 commits)
  pnfs/flexfiles: Add tracepoints for detecting pnfs fallback to MDS
  pnfs: Fix a problem where we gratuitously start doing I/O through the MDS
  SUNRPC: Optimise transport balancing code
  SUNRPC: Ensure the bvecs are reset when we re-encode the RPC request
  pnfs/flexfiles: Fix PTR_ERR() dereferences in ff_layout_track_ds_error
  NFSv4: Don't use the zero stateid with layoutget
  SUNRPC: Fix up backchannel slot table accounting
  SUNRPC: Fix initialisation of struct rpc_xprt_switch
  SUNRPC: Skip zero-refcount transports
  SUNRPC: Replace division by multiplication in calculation of queue length
  NFSv4: Validate the stateid before applying it to state recovery
  nfs4.0: Refetch lease_time after clientid update
  nfs4: Rename nfs41_setup_state_renewal
  nfs4: Make nfs4_proc_get_lease_time available for nfs4.0
  nfs: Fix copy-and-paste error in debug message
  NFS: Replace 16 seq_printf() calls by seq_puts()
  NFS: Use seq_putc() in nfs_show_stats()
  Revert "NFS: readdirplus optimization by cache mechanism" (memleak)
  SUNRPC: Fix transport accounting when caller specifies an rpc_xprt
  NFS: Record task, client ID, and XID in xdr_status trace points
  ...
2019-07-18 14:32:33 -07:00

325 lines
7.2 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* debugfs interface for sunrpc
*
* (c) 2014 Jeff Layton <jlayton@primarydata.com>
*/
#include <linux/debugfs.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/clnt.h>
#include "netns.h"
static struct dentry *topdir;
static struct dentry *rpc_clnt_dir;
static struct dentry *rpc_xprt_dir;
unsigned int rpc_inject_disconnect;
static int
tasks_show(struct seq_file *f, void *v)
{
u32 xid = 0;
struct rpc_task *task = v;
struct rpc_clnt *clnt = task->tk_client;
const char *rpc_waitq = "none";
if (RPC_IS_QUEUED(task))
rpc_waitq = rpc_qname(task->tk_waitqueue);
if (task->tk_rqstp)
xid = be32_to_cpu(task->tk_rqstp->rq_xid);
seq_printf(f, "%5u %04x %6d 0x%x 0x%x %8ld %ps %sv%u %s a:%ps q:%s\n",
task->tk_pid, task->tk_flags, task->tk_status,
clnt->cl_clid, xid, rpc_task_timeout(task), task->tk_ops,
clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task),
task->tk_action, rpc_waitq);
return 0;
}
static void *
tasks_start(struct seq_file *f, loff_t *ppos)
__acquires(&clnt->cl_lock)
{
struct rpc_clnt *clnt = f->private;
loff_t pos = *ppos;
struct rpc_task *task;
spin_lock(&clnt->cl_lock);
list_for_each_entry(task, &clnt->cl_tasks, tk_task)
if (pos-- == 0)
return task;
return NULL;
}
static void *
tasks_next(struct seq_file *f, void *v, loff_t *pos)
{
struct rpc_clnt *clnt = f->private;
struct rpc_task *task = v;
struct list_head *next = task->tk_task.next;
++*pos;
/* If there's another task on list, return it */
if (next == &clnt->cl_tasks)
return NULL;
return list_entry(next, struct rpc_task, tk_task);
}
static void
tasks_stop(struct seq_file *f, void *v)
__releases(&clnt->cl_lock)
{
struct rpc_clnt *clnt = f->private;
spin_unlock(&clnt->cl_lock);
}
static const struct seq_operations tasks_seq_operations = {
.start = tasks_start,
.next = tasks_next,
.stop = tasks_stop,
.show = tasks_show,
};
static int tasks_open(struct inode *inode, struct file *filp)
{
int ret = seq_open(filp, &tasks_seq_operations);
if (!ret) {
struct seq_file *seq = filp->private_data;
struct rpc_clnt *clnt = seq->private = inode->i_private;
if (!atomic_inc_not_zero(&clnt->cl_count)) {
seq_release(inode, filp);
ret = -EINVAL;
}
}
return ret;
}
static int
tasks_release(struct inode *inode, struct file *filp)
{
struct seq_file *seq = filp->private_data;
struct rpc_clnt *clnt = seq->private;
rpc_release_client(clnt);
return seq_release(inode, filp);
}
static const struct file_operations tasks_fops = {
.owner = THIS_MODULE,
.open = tasks_open,
.read = seq_read,
.llseek = seq_lseek,
.release = tasks_release,
};
static int do_xprt_debugfs(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *numv)
{
int len;
char name[24]; /* enough for "../../rpc_xprt/ + 8 hex digits + NULL */
char link[9]; /* enough for 8 hex digits + NULL */
int *nump = numv;
if (IS_ERR_OR_NULL(xprt->debugfs))
return 0;
len = snprintf(name, sizeof(name), "../../rpc_xprt/%s",
xprt->debugfs->d_name.name);
if (len > sizeof(name))
return -1;
if (*nump == 0)
strcpy(link, "xprt");
else {
len = snprintf(link, sizeof(link), "xprt%d", *nump);
if (len > sizeof(link))
return -1;
}
debugfs_create_symlink(link, clnt->cl_debugfs, name);
(*nump)++;
return 0;
}
void
rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
{
int len;
char name[9]; /* enough for 8 hex digits + NULL */
int xprtnum = 0;
len = snprintf(name, sizeof(name), "%x", clnt->cl_clid);
if (len >= sizeof(name))
return;
/* make the per-client dir */
clnt->cl_debugfs = debugfs_create_dir(name, rpc_clnt_dir);
/* make tasks file */
debugfs_create_file("tasks", S_IFREG | 0400, clnt->cl_debugfs, clnt,
&tasks_fops);
rpc_clnt_iterate_for_each_xprt(clnt, do_xprt_debugfs, &xprtnum);
}
void
rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt)
{
debugfs_remove_recursive(clnt->cl_debugfs);
clnt->cl_debugfs = NULL;
}
static int
xprt_info_show(struct seq_file *f, void *v)
{
struct rpc_xprt *xprt = f->private;
seq_printf(f, "netid: %s\n", xprt->address_strings[RPC_DISPLAY_NETID]);
seq_printf(f, "addr: %s\n", xprt->address_strings[RPC_DISPLAY_ADDR]);
seq_printf(f, "port: %s\n", xprt->address_strings[RPC_DISPLAY_PORT]);
seq_printf(f, "state: 0x%lx\n", xprt->state);
return 0;
}
static int
xprt_info_open(struct inode *inode, struct file *filp)
{
int ret;
struct rpc_xprt *xprt = inode->i_private;
ret = single_open(filp, xprt_info_show, xprt);
if (!ret) {
if (!xprt_get(xprt)) {
single_release(inode, filp);
ret = -EINVAL;
}
}
return ret;
}
static int
xprt_info_release(struct inode *inode, struct file *filp)
{
struct rpc_xprt *xprt = inode->i_private;
xprt_put(xprt);
return single_release(inode, filp);
}
static const struct file_operations xprt_info_fops = {
.owner = THIS_MODULE,
.open = xprt_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = xprt_info_release,
};
void
rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
{
int len, id;
static atomic_t cur_id;
char name[9]; /* 8 hex digits + NULL term */
id = (unsigned int)atomic_inc_return(&cur_id);
len = snprintf(name, sizeof(name), "%x", id);
if (len >= sizeof(name))
return;
/* make the per-client dir */
xprt->debugfs = debugfs_create_dir(name, rpc_xprt_dir);
/* make tasks file */
debugfs_create_file("info", S_IFREG | 0400, xprt->debugfs, xprt,
&xprt_info_fops);
atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect);
}
void
rpc_xprt_debugfs_unregister(struct rpc_xprt *xprt)
{
debugfs_remove_recursive(xprt->debugfs);
xprt->debugfs = NULL;
}
static int
fault_open(struct inode *inode, struct file *filp)
{
filp->private_data = kmalloc(128, GFP_KERNEL);
if (!filp->private_data)
return -ENOMEM;
return 0;
}
static int
fault_release(struct inode *inode, struct file *filp)
{
kfree(filp->private_data);
return 0;
}
static ssize_t
fault_disconnect_read(struct file *filp, char __user *user_buf,
size_t len, loff_t *offset)
{
char *buffer = (char *)filp->private_data;
size_t size;
size = sprintf(buffer, "%u\n", rpc_inject_disconnect);
return simple_read_from_buffer(user_buf, len, offset, buffer, size);
}
static ssize_t
fault_disconnect_write(struct file *filp, const char __user *user_buf,
size_t len, loff_t *offset)
{
char buffer[16];
if (len >= sizeof(buffer))
len = sizeof(buffer) - 1;
if (copy_from_user(buffer, user_buf, len))
return -EFAULT;
buffer[len] = '\0';
if (kstrtouint(buffer, 10, &rpc_inject_disconnect))
return -EINVAL;
return len;
}
static const struct file_operations fault_disconnect_fops = {
.owner = THIS_MODULE,
.open = fault_open,
.read = fault_disconnect_read,
.write = fault_disconnect_write,
.release = fault_release,
};
void __exit
sunrpc_debugfs_exit(void)
{
debugfs_remove_recursive(topdir);
topdir = NULL;
rpc_clnt_dir = NULL;
rpc_xprt_dir = NULL;
}
void __init
sunrpc_debugfs_init(void)
{
struct dentry *rpc_fault_dir;
topdir = debugfs_create_dir("sunrpc", NULL);
rpc_clnt_dir = debugfs_create_dir("rpc_clnt", topdir);
rpc_xprt_dir = debugfs_create_dir("rpc_xprt", topdir);
rpc_fault_dir = debugfs_create_dir("inject_fault", topdir);
debugfs_create_file("disconnect", S_IFREG | 0400, rpc_fault_dir, NULL,
&fault_disconnect_fops);
}