forked from luck/tmp_suning_uos_patched
NFS Client Updates for Linux 5.8
New features and improvements: - Sunrpc receive buffer sizes only change when establishing a GSS credentials - Add more sunrpc tracepoints - Improve on tracepoints to capture internal NFS I/O errors Other bugfixes and cleanups: - Move a dprintk() to after a call to nfs_alloc_fattr() - Fix off-by-one issues in rpc_ntop6 - Fix a few coccicheck warnings - Use the correct SPDX license identifiers - Fix rpc_call_done assignment for BIND_CONN_TO_SESSION - Replace zero-length array with flexible array - Remove duplicate headers - Set invalid blocks after NFSv4 writes to update space_used attribute - Fix direct WRITE throughput regression -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEnZ5MQTpR7cLU7KEp18tUv7ClQOsFAl7ibyIACgkQ18tUv7Cl QOsOHBAA1A1stYld0gOhKZtMqxRJi3fnJ5mgroLGtyVQe8uAjpD8Ib1oRleC4MJq ifpYPozIhMZQCvDiGTAKJ8629OYiXGrN8D5nV6Y2tEGpu5wYv98MyZlU9Y8rVzCP 5vsIMUp5XH8y2wYO8k7fDPPxWNH9Ax89wz5OI16mZxgY/LDm4ojZq+pGbYnWZa4w oK6Efa66z7yQkPV8oIWuvLe1zZYWGAPibBEwJbrvUWyfygB3owI36sc6nuiEQM+4 hD3h5UtVn8BnudUqvLLa21rnQROMFpgYf4Q/2A1UaNfyRAPoPXMztECBSEYXO0L4 saiMc5o/yTTBCC0ZjV1F+xuGQzMgSQ83KOdbr+a+upvBeFpBynJxccdvMTDEam+q rl7Ypdc42CsTZ1aVWG/AoIk6GENzR0tXqNR6BcDjYG/yRWvnt/RIZlp6G67IbtRH b9we+3MbI/lTBoCFGahkkBYO3elTNwilxH3pWcRi8ehNn0GPjlLqHePR17Tmq1tL QycDlm7QB1m5xNsOOLaBoB4SyguPV0SBprZJ4yYU1B3KC3bGurZVK3+TSLXQrO9V 12RLDt4AOGr0TlctBIhNbkGp8xHY6Dg7HgbdjdrVq8Y9YCfg0C37789BnZA5nVxF 4L101lsTI0puymh+MwmhiyOvCldn30f+MjuWJSm17Id+eRIxYj4= =a84h -----END PGP SIGNATURE----- Merge tag 'nfs-for-5.8-1' of git://git.linux-nfs.org/projects/anna/linux-nfs Pull NFS client updates from Anna Schumaker: "New features and improvements: - Sunrpc receive buffer sizes only change when establishing a GSS credentials - Add more sunrpc tracepoints - Improve on tracepoints to capture internal NFS I/O errors Other bugfixes and cleanups: - Move a dprintk() to after a call to nfs_alloc_fattr() - Fix off-by-one issues in rpc_ntop6 - Fix a few coccicheck warnings - Use the correct SPDX license identifiers - Fix rpc_call_done assignment for BIND_CONN_TO_SESSION - Replace zero-length array with flexible array - Remove duplicate headers - Set invalid blocks after NFSv4 writes to update space_used attribute - Fix direct WRITE throughput regression" * tag 'nfs-for-5.8-1' of git://git.linux-nfs.org/projects/anna/linux-nfs: (27 commits) NFS: Fix direct WRITE throughput regression SUNRPC: rpc_xprt lifetime events should record xprt->state xprtrdma: Make xprt_rdma_slot_table_entries static nfs: set invalid blocks after NFSv4 writes NFS: remove redundant initialization of variable result sunrpc: add missing newline when printing parameter 'auth_hashtable_size' by sysfs NFS: Add a tracepoint in nfs_set_pgio_error() NFS: Trace short NFS READs NFS: nfs_xdr_status should record the procedure name SUNRPC: Set SOFTCONN when destroying GSS contexts SUNRPC: rpc_call_null_helper() should set RPC_TASK_SOFT SUNRPC: rpc_call_null_helper() already sets RPC_TASK_NULLCREDS SUNRPC: trace RPC client lifetime events SUNRPC: Trace transport lifetime events SUNRPC: Split the xdr_buf event class SUNRPC: Add tracepoint to rpc_call_rpcerror() SUNRPC: Update the RPC_SHOW_SOCKET() macro SUNRPC: Update the rpc_show_task_flags() macro SUNRPC: Trace GSS context lifetimes SUNRPC: receive buffer size estimation values almost never change ...
This commit is contained in:
commit
a539568299
|
@ -446,7 +446,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
|
|||
struct inode *inode = mapping->host;
|
||||
struct nfs_direct_req *dreq;
|
||||
struct nfs_lock_context *l_ctx;
|
||||
ssize_t result = -EINVAL, requested;
|
||||
ssize_t result, requested;
|
||||
size_t count = iov_iter_count(iter);
|
||||
nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
|
||||
|
||||
|
@ -731,6 +731,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
|
|||
nfs_list_remove_request(req);
|
||||
if (request_commit) {
|
||||
kref_get(&req->wb_kref);
|
||||
memcpy(&req->wb_verf, &hdr->verf.verifier,
|
||||
sizeof(req->wb_verf));
|
||||
nfs_mark_request_commit(req, hdr->lseg, &cinfo,
|
||||
hdr->ds_commit_idx);
|
||||
}
|
||||
|
|
|
@ -39,7 +39,6 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen,
|
|||
#include <linux/string.h>
|
||||
#include <linux/kmod.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/socket.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/inet.h>
|
||||
|
|
|
@ -833,6 +833,8 @@ int nfs_getattr(const struct path *path, struct kstat *stat,
|
|||
do_update |= cache_validity & NFS_INO_INVALID_ATIME;
|
||||
if (request_mask & (STATX_CTIME|STATX_MTIME))
|
||||
do_update |= cache_validity & NFS_INO_REVAL_PAGECACHE;
|
||||
if (request_mask & STATX_BLOCKS)
|
||||
do_update |= cache_validity & NFS_INO_INVALID_BLOCKS;
|
||||
if (do_update) {
|
||||
/* Update the attribute cache */
|
||||
if (!(server->flags & NFS_MOUNT_NOAC))
|
||||
|
@ -1764,7 +1766,8 @@ int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fa
|
|||
status = nfs_post_op_update_inode_locked(inode, fattr,
|
||||
NFS_INO_INVALID_CHANGE
|
||||
| NFS_INO_INVALID_CTIME
|
||||
| NFS_INO_INVALID_MTIME);
|
||||
| NFS_INO_INVALID_MTIME
|
||||
| NFS_INO_INVALID_BLOCKS);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -1871,7 +1874,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|
|||
nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR
|
||||
| NFS_INO_INVALID_ATIME
|
||||
| NFS_INO_REVAL_FORCED
|
||||
| NFS_INO_REVAL_PAGECACHE);
|
||||
| NFS_INO_REVAL_PAGECACHE
|
||||
| NFS_INO_INVALID_BLOCKS);
|
||||
|
||||
/* Do atomic weak cache consistency updates */
|
||||
nfs_wcc_update_inode(inode, fattr);
|
||||
|
@ -2033,8 +2037,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|
|||
inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
|
||||
} else if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
|
||||
inode->i_blocks = fattr->du.nfs2.blocks;
|
||||
else
|
||||
else {
|
||||
nfsi->cache_validity |= save_cache_validity &
|
||||
(NFS_INO_INVALID_BLOCKS
|
||||
| NFS_INO_REVAL_FORCED);
|
||||
cache_revalidated = false;
|
||||
}
|
||||
|
||||
/* Update attrtimeo value if we're out of the unstable period */
|
||||
if (attr_changed) {
|
||||
|
|
|
@ -179,11 +179,11 @@ nfs3_proc_lookup(struct inode *dir, struct dentry *dentry,
|
|||
if (nfs_lookup_is_soft_revalidate(dentry))
|
||||
task_flags |= RPC_TASK_TIMEOUT;
|
||||
|
||||
dprintk("NFS call lookup %pd2\n", dentry);
|
||||
res.dir_attr = nfs_alloc_fattr();
|
||||
if (res.dir_attr == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
dprintk("NFS call lookup %pd2\n", dentry);
|
||||
nfs_fattr_init(fattr);
|
||||
status = rpc_call_sync(NFS_CLIENT(dir), &msg, task_flags);
|
||||
nfs_refresh_inode(dir, res.dir_attr);
|
||||
|
|
|
@ -7909,7 +7909,7 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
|
|||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
|
||||
.rpc_call_done = &nfs4_bind_one_conn_to_session_done,
|
||||
.rpc_call_done = nfs4_bind_one_conn_to_session_done,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -961,6 +961,97 @@ TRACE_EVENT(nfs_readpage_done,
|
|||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(nfs_readpage_short,
|
||||
TP_PROTO(
|
||||
const struct rpc_task *task,
|
||||
const struct nfs_pgio_header *hdr
|
||||
),
|
||||
|
||||
TP_ARGS(task, hdr),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(u32, fhandle)
|
||||
__field(u64, fileid)
|
||||
__field(loff_t, offset)
|
||||
__field(u32, arg_count)
|
||||
__field(u32, res_count)
|
||||
__field(bool, eof)
|
||||
__field(int, status)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
const struct inode *inode = hdr->inode;
|
||||
const struct nfs_inode *nfsi = NFS_I(inode);
|
||||
const struct nfs_fh *fh = hdr->args.fh ?
|
||||
hdr->args.fh : &nfsi->fh;
|
||||
|
||||
__entry->status = task->tk_status;
|
||||
__entry->offset = hdr->args.offset;
|
||||
__entry->arg_count = hdr->args.count;
|
||||
__entry->res_count = hdr->res.count;
|
||||
__entry->eof = hdr->res.eof;
|
||||
__entry->dev = inode->i_sb->s_dev;
|
||||
__entry->fileid = nfsi->fileid;
|
||||
__entry->fhandle = nfs_fhandle_hash(fh);
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"fileid=%02x:%02x:%llu fhandle=0x%08x "
|
||||
"offset=%lld count=%u res=%u status=%d%s",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
(unsigned long long)__entry->fileid,
|
||||
__entry->fhandle,
|
||||
(long long)__entry->offset, __entry->arg_count,
|
||||
__entry->res_count, __entry->status,
|
||||
__entry->eof ? " eof" : ""
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(nfs_pgio_error,
|
||||
TP_PROTO(
|
||||
const struct nfs_pgio_header *hdr,
|
||||
int error,
|
||||
loff_t pos
|
||||
),
|
||||
|
||||
TP_ARGS(hdr, error, pos),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(u32, fhandle)
|
||||
__field(u64, fileid)
|
||||
__field(loff_t, offset)
|
||||
__field(u32, arg_count)
|
||||
__field(u32, res_count)
|
||||
__field(loff_t, pos)
|
||||
__field(int, status)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
const struct inode *inode = hdr->inode;
|
||||
const struct nfs_inode *nfsi = NFS_I(inode);
|
||||
const struct nfs_fh *fh = hdr->args.fh ?
|
||||
hdr->args.fh : &nfsi->fh;
|
||||
|
||||
__entry->status = error;
|
||||
__entry->offset = hdr->args.offset;
|
||||
__entry->arg_count = hdr->args.count;
|
||||
__entry->res_count = hdr->res.count;
|
||||
__entry->dev = inode->i_sb->s_dev;
|
||||
__entry->fileid = nfsi->fileid;
|
||||
__entry->fhandle = nfs_fhandle_hash(fh);
|
||||
),
|
||||
|
||||
TP_printk("fileid=%02x:%02x:%llu fhandle=0x%08x "
|
||||
"offset=%lld count=%u res=%u pos=%llu status=%d",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
(unsigned long long)__entry->fileid, __entry->fhandle,
|
||||
(long long)__entry->offset, __entry->arg_count, __entry->res_count,
|
||||
__entry->pos, __entry->status
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_DEFINE_ENUM(NFS_UNSTABLE);
|
||||
TRACE_DEFINE_ENUM(NFS_DATA_SYNC);
|
||||
TRACE_DEFINE_ENUM(NFS_FILE_SYNC);
|
||||
|
@ -1312,7 +1403,12 @@ TRACE_EVENT(nfs_xdr_status,
|
|||
__field(unsigned int, task_id)
|
||||
__field(unsigned int, client_id)
|
||||
__field(u32, xid)
|
||||
__field(int, version)
|
||||
__field(unsigned long, error)
|
||||
__string(program,
|
||||
xdr->rqst->rq_task->tk_client->cl_program->name)
|
||||
__string(procedure,
|
||||
xdr->rqst->rq_task->tk_msg.rpc_proc->p_name)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -1322,13 +1418,19 @@ TRACE_EVENT(nfs_xdr_status,
|
|||
__entry->task_id = task->tk_pid;
|
||||
__entry->client_id = task->tk_client->cl_clid;
|
||||
__entry->xid = be32_to_cpu(rqstp->rq_xid);
|
||||
__entry->version = task->tk_client->cl_vers;
|
||||
__entry->error = error;
|
||||
__assign_str(program,
|
||||
task->tk_client->cl_program->name)
|
||||
__assign_str(procedure, task->tk_msg.rpc_proc->p_name)
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"task:%u@%d xid=0x%08x error=%ld (%s)",
|
||||
"task:%u@%d xid=0x%08x %sv%d %s error=%ld (%s)",
|
||||
__entry->task_id, __entry->client_id, __entry->xid,
|
||||
-__entry->error, nfs_show_status(__entry->error)
|
||||
__get_str(program), __entry->version,
|
||||
__get_str(procedure), -__entry->error,
|
||||
nfs_show_status(__entry->error)
|
||||
)
|
||||
);
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
|
||||
#include "internal.h"
|
||||
#include "pnfs.h"
|
||||
#include "nfstrace.h"
|
||||
|
||||
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
|
||||
|
||||
|
@ -64,6 +65,7 @@ void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
|
|||
{
|
||||
unsigned int new = pos - hdr->io_start;
|
||||
|
||||
trace_nfs_pgio_error(hdr, error, pos);
|
||||
if (hdr->good_bytes > new) {
|
||||
hdr->good_bytes = new;
|
||||
clear_bit(NFS_IOHDR_EOF, &hdr->flags);
|
||||
|
|
|
@ -264,6 +264,8 @@ static void nfs_readpage_retry(struct rpc_task *task,
|
|||
|
||||
/* This is a short read! */
|
||||
nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
|
||||
trace_nfs_readpage_short(task, hdr);
|
||||
|
||||
/* Has the server at least made some progress? */
|
||||
if (resp->count == 0) {
|
||||
nfs_set_pgio_error(hdr, -EIO, argp->offset);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2019 Hammerspace Inc
|
||||
*/
|
||||
|
|
|
@ -38,7 +38,7 @@ struct nfs4_ace {
|
|||
|
||||
struct nfs4_acl {
|
||||
uint32_t naces;
|
||||
struct nfs4_ace aces[0];
|
||||
struct nfs4_ace aces[];
|
||||
};
|
||||
|
||||
#define NFS4_MAXLABELLEN 2048
|
||||
|
@ -295,7 +295,7 @@ static inline bool seqid_mutating_err(u32 err)
|
|||
case NFS4ERR_NOFILEHANDLE:
|
||||
case NFS4ERR_MOVED:
|
||||
return false;
|
||||
};
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -230,6 +230,7 @@ struct nfs4_copy_state {
|
|||
#define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */
|
||||
#define NFS_INO_DATA_INVAL_DEFER \
|
||||
BIT(13) /* Deferred cache invalidation */
|
||||
#define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */
|
||||
|
||||
#define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \
|
||||
| NFS_INO_INVALID_CTIME \
|
||||
|
|
|
@ -1227,7 +1227,7 @@ struct nfs4_secinfo4 {
|
|||
|
||||
struct nfs4_secinfo_flavors {
|
||||
unsigned int num_flavors;
|
||||
struct nfs4_secinfo4 flavors[0];
|
||||
struct nfs4_secinfo4 flavors[];
|
||||
};
|
||||
|
||||
struct nfs4_secinfo_arg {
|
||||
|
|
|
@ -76,7 +76,7 @@ struct rpc_auth {
|
|||
unsigned int au_verfsize; /* size of reply verifier */
|
||||
unsigned int au_ralign; /* words before UL header */
|
||||
|
||||
unsigned int au_flags;
|
||||
unsigned long au_flags;
|
||||
const struct rpc_authops *au_ops;
|
||||
rpc_authflavor_t au_flavor; /* pseudoflavor (note may
|
||||
* differ from the flavor in
|
||||
|
@ -89,7 +89,8 @@ struct rpc_auth {
|
|||
};
|
||||
|
||||
/* rpc_auth au_flags */
|
||||
#define RPCAUTH_AUTH_DATATOUCH 0x00000002
|
||||
#define RPCAUTH_AUTH_DATATOUCH (1)
|
||||
#define RPCAUTH_AUTH_UPDATE_SLACK (2)
|
||||
|
||||
struct rpc_auth_create_args {
|
||||
rpc_authflavor_t pseudoflavor;
|
||||
|
|
|
@ -17,6 +17,16 @@
|
|||
** GSS-API related trace events
|
||||
**/
|
||||
|
||||
TRACE_DEFINE_ENUM(RPC_GSS_SVC_NONE);
|
||||
TRACE_DEFINE_ENUM(RPC_GSS_SVC_INTEGRITY);
|
||||
TRACE_DEFINE_ENUM(RPC_GSS_SVC_PRIVACY);
|
||||
|
||||
#define show_gss_service(x) \
|
||||
__print_symbolic(x, \
|
||||
{ RPC_GSS_SVC_NONE, "none" }, \
|
||||
{ RPC_GSS_SVC_INTEGRITY, "integrity" }, \
|
||||
{ RPC_GSS_SVC_PRIVACY, "privacy" })
|
||||
|
||||
TRACE_DEFINE_ENUM(GSS_S_BAD_MECH);
|
||||
TRACE_DEFINE_ENUM(GSS_S_BAD_NAME);
|
||||
TRACE_DEFINE_ENUM(GSS_S_BAD_NAMETYPE);
|
||||
|
@ -126,6 +136,40 @@ DEFINE_GSSAPI_EVENT(verify_mic);
|
|||
DEFINE_GSSAPI_EVENT(wrap);
|
||||
DEFINE_GSSAPI_EVENT(unwrap);
|
||||
|
||||
DECLARE_EVENT_CLASS(rpcgss_ctx_class,
|
||||
TP_PROTO(
|
||||
const struct gss_cred *gc
|
||||
),
|
||||
|
||||
TP_ARGS(gc),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(const void *, cred)
|
||||
__field(unsigned long, service)
|
||||
__string(principal, gc->gc_principal)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cred = gc;
|
||||
__entry->service = gc->gc_service;
|
||||
__assign_str(principal, gc->gc_principal)
|
||||
),
|
||||
|
||||
TP_printk("cred=%p service=%s principal='%s'",
|
||||
__entry->cred, show_gss_service(__entry->service),
|
||||
__get_str(principal))
|
||||
);
|
||||
|
||||
#define DEFINE_CTX_EVENT(name) \
|
||||
DEFINE_EVENT(rpcgss_ctx_class, rpcgss_ctx_##name, \
|
||||
TP_PROTO( \
|
||||
const struct gss_cred *gc \
|
||||
), \
|
||||
TP_ARGS(gc))
|
||||
|
||||
DEFINE_CTX_EVENT(init);
|
||||
DEFINE_CTX_EVENT(destroy);
|
||||
|
||||
TRACE_EVENT(rpcgss_svc_accept_upcall,
|
||||
TP_PROTO(
|
||||
__be32 xid,
|
||||
|
@ -291,6 +335,40 @@ TRACE_EVENT(rpcgss_need_reencode,
|
|||
__entry->ret ? "" : "un")
|
||||
);
|
||||
|
||||
TRACE_EVENT(rpcgss_update_slack,
|
||||
TP_PROTO(
|
||||
const struct rpc_task *task,
|
||||
const struct rpc_auth *auth
|
||||
),
|
||||
|
||||
TP_ARGS(task, auth),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, task_id)
|
||||
__field(unsigned int, client_id)
|
||||
__field(u32, xid)
|
||||
__field(const void *, auth)
|
||||
__field(unsigned int, rslack)
|
||||
__field(unsigned int, ralign)
|
||||
__field(unsigned int, verfsize)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->task_id = task->tk_pid;
|
||||
__entry->client_id = task->tk_client->cl_clid;
|
||||
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
|
||||
__entry->auth = auth;
|
||||
__entry->rslack = auth->au_rslack;
|
||||
__entry->ralign = auth->au_ralign;
|
||||
__entry->verfsize = auth->au_verfsize;
|
||||
),
|
||||
|
||||
TP_printk("task:%u@%u xid=0x%08x auth=%p rslack=%u ralign=%u verfsize=%u\n",
|
||||
__entry->task_id, __entry->client_id, __entry->xid,
|
||||
__entry->auth, __entry->rslack, __entry->ralign,
|
||||
__entry->verfsize)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(rpcgss_svc_seqno_class,
|
||||
TP_PROTO(
|
||||
__be32 xid,
|
||||
|
@ -371,6 +449,7 @@ TRACE_EVENT(rpcgss_upcall_result,
|
|||
|
||||
TRACE_EVENT(rpcgss_context,
|
||||
TP_PROTO(
|
||||
u32 window_size,
|
||||
unsigned long expiry,
|
||||
unsigned long now,
|
||||
unsigned int timeout,
|
||||
|
@ -378,12 +457,13 @@ TRACE_EVENT(rpcgss_context,
|
|||
const u8 *data
|
||||
),
|
||||
|
||||
TP_ARGS(expiry, now, timeout, len, data),
|
||||
TP_ARGS(window_size, expiry, now, timeout, len, data),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, expiry)
|
||||
__field(unsigned long, now)
|
||||
__field(unsigned int, timeout)
|
||||
__field(u32, window_size)
|
||||
__field(int, len)
|
||||
__string(acceptor, data)
|
||||
),
|
||||
|
@ -392,13 +472,14 @@ TRACE_EVENT(rpcgss_context,
|
|||
__entry->expiry = expiry;
|
||||
__entry->now = now;
|
||||
__entry->timeout = timeout;
|
||||
__entry->window_size = window_size;
|
||||
__entry->len = len;
|
||||
strncpy(__get_str(acceptor), data, len);
|
||||
),
|
||||
|
||||
TP_printk("gc_expiry=%lu now=%lu timeout=%u acceptor=%.*s",
|
||||
__entry->expiry, __entry->now, __entry->timeout,
|
||||
__entry->len, __get_str(acceptor))
|
||||
TP_printk("win_size=%u expiry=%lu now=%lu timeout=%u acceptor=%.*s",
|
||||
__entry->window_size, __entry->expiry, __entry->now,
|
||||
__entry->timeout, __entry->len, __get_str(acceptor))
|
||||
);
|
||||
|
||||
|
||||
|
|
|
@ -380,12 +380,8 @@ TRACE_EVENT(xprtrdma_inline_thresh,
|
|||
|
||||
DEFINE_CONN_EVENT(connect);
|
||||
DEFINE_CONN_EVENT(disconnect);
|
||||
DEFINE_CONN_EVENT(flush_dct);
|
||||
|
||||
DEFINE_RXPRT_EVENT(xprtrdma_create);
|
||||
DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
|
||||
DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
|
||||
DEFINE_RXPRT_EVENT(xprtrdma_op_close);
|
||||
DEFINE_RXPRT_EVENT(xprtrdma_op_setport);
|
||||
|
||||
TRACE_EVENT(xprtrdma_op_connect,
|
||||
|
|
|
@ -47,14 +47,17 @@ TRACE_DEFINE_ENUM(AF_INET6);
|
|||
{ AF_INET, "AF_INET" }, \
|
||||
{ AF_INET6, "AF_INET6" })
|
||||
|
||||
DECLARE_EVENT_CLASS(xdr_buf_class,
|
||||
DECLARE_EVENT_CLASS(rpc_xdr_buf_class,
|
||||
TP_PROTO(
|
||||
const struct rpc_task *task,
|
||||
const struct xdr_buf *xdr
|
||||
),
|
||||
|
||||
TP_ARGS(xdr),
|
||||
TP_ARGS(task, xdr),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, task_id)
|
||||
__field(unsigned int, client_id)
|
||||
__field(const void *, head_base)
|
||||
__field(size_t, head_len)
|
||||
__field(const void *, tail_base)
|
||||
|
@ -64,6 +67,8 @@ DECLARE_EVENT_CLASS(xdr_buf_class,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->task_id = task->tk_pid;
|
||||
__entry->client_id = task->tk_client->cl_clid;
|
||||
__entry->head_base = xdr->head[0].iov_base;
|
||||
__entry->head_len = xdr->head[0].iov_len;
|
||||
__entry->tail_base = xdr->tail[0].iov_base;
|
||||
|
@ -72,23 +77,137 @@ DECLARE_EVENT_CLASS(xdr_buf_class,
|
|||
__entry->msg_len = xdr->len;
|
||||
),
|
||||
|
||||
TP_printk("head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
|
||||
TP_printk("task:%u@%u head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
|
||||
__entry->task_id, __entry->client_id,
|
||||
__entry->head_base, __entry->head_len, __entry->page_len,
|
||||
__entry->tail_base, __entry->tail_len, __entry->msg_len
|
||||
)
|
||||
);
|
||||
|
||||
#define DEFINE_XDRBUF_EVENT(name) \
|
||||
DEFINE_EVENT(xdr_buf_class, name, \
|
||||
#define DEFINE_RPCXDRBUF_EVENT(name) \
|
||||
DEFINE_EVENT(rpc_xdr_buf_class, \
|
||||
rpc_xdr_##name, \
|
||||
TP_PROTO( \
|
||||
const struct rpc_task *task, \
|
||||
const struct xdr_buf *xdr \
|
||||
), \
|
||||
TP_ARGS(xdr))
|
||||
TP_ARGS(task, xdr))
|
||||
|
||||
DEFINE_RPCXDRBUF_EVENT(sendto);
|
||||
DEFINE_RPCXDRBUF_EVENT(recvfrom);
|
||||
DEFINE_RPCXDRBUF_EVENT(reply_pages);
|
||||
|
||||
|
||||
DECLARE_EVENT_CLASS(rpc_clnt_class,
|
||||
TP_PROTO(
|
||||
const struct rpc_clnt *clnt
|
||||
),
|
||||
|
||||
TP_ARGS(clnt),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, client_id)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->client_id = clnt->cl_clid;
|
||||
),
|
||||
|
||||
TP_printk("clid=%u", __entry->client_id)
|
||||
);
|
||||
|
||||
#define DEFINE_RPC_CLNT_EVENT(name) \
|
||||
DEFINE_EVENT(rpc_clnt_class, \
|
||||
rpc_clnt_##name, \
|
||||
TP_PROTO( \
|
||||
const struct rpc_clnt *clnt \
|
||||
), \
|
||||
TP_ARGS(clnt))
|
||||
|
||||
DEFINE_RPC_CLNT_EVENT(free);
|
||||
DEFINE_RPC_CLNT_EVENT(killall);
|
||||
DEFINE_RPC_CLNT_EVENT(shutdown);
|
||||
DEFINE_RPC_CLNT_EVENT(release);
|
||||
DEFINE_RPC_CLNT_EVENT(replace_xprt);
|
||||
DEFINE_RPC_CLNT_EVENT(replace_xprt_err);
|
||||
|
||||
TRACE_EVENT(rpc_clnt_new,
|
||||
TP_PROTO(
|
||||
const struct rpc_clnt *clnt,
|
||||
const struct rpc_xprt *xprt,
|
||||
const char *program,
|
||||
const char *server
|
||||
),
|
||||
|
||||
TP_ARGS(clnt, xprt, program, server),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, client_id)
|
||||
__string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
|
||||
__string(port, xprt->address_strings[RPC_DISPLAY_PORT])
|
||||
__string(program, program)
|
||||
__string(server, server)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->client_id = clnt->cl_clid;
|
||||
__assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
|
||||
__assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
|
||||
__assign_str(program, program)
|
||||
__assign_str(server, server)
|
||||
),
|
||||
|
||||
TP_printk("client=%u peer=[%s]:%s program=%s server=%s",
|
||||
__entry->client_id, __get_str(addr), __get_str(port),
|
||||
__get_str(program), __get_str(server))
|
||||
);
|
||||
|
||||
TRACE_EVENT(rpc_clnt_new_err,
|
||||
TP_PROTO(
|
||||
const char *program,
|
||||
const char *server,
|
||||
int error
|
||||
),
|
||||
|
||||
TP_ARGS(program, server, error),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, error)
|
||||
__string(program, program)
|
||||
__string(server, server)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->error = error;
|
||||
__assign_str(program, program)
|
||||
__assign_str(server, server)
|
||||
),
|
||||
|
||||
TP_printk("program=%s server=%s error=%d",
|
||||
__get_str(program), __get_str(server), __entry->error)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rpc_clnt_clone_err,
|
||||
TP_PROTO(
|
||||
const struct rpc_clnt *clnt,
|
||||
int error
|
||||
),
|
||||
|
||||
TP_ARGS(clnt, error),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, client_id)
|
||||
__field(int, error)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->client_id = clnt->cl_clid;
|
||||
__entry->error = error;
|
||||
),
|
||||
|
||||
TP_printk("client=%u error=%d", __entry->client_id, __entry->error)
|
||||
);
|
||||
|
||||
DEFINE_XDRBUF_EVENT(xprt_sendto);
|
||||
DEFINE_XDRBUF_EVENT(xprt_recvfrom);
|
||||
DEFINE_XDRBUF_EVENT(svc_recvfrom);
|
||||
DEFINE_XDRBUF_EVENT(svc_sendto);
|
||||
|
||||
TRACE_DEFINE_ENUM(RPC_AUTH_OK);
|
||||
TRACE_DEFINE_ENUM(RPC_AUTH_BADCRED);
|
||||
|
@ -175,29 +294,35 @@ TRACE_EVENT(rpc_request,
|
|||
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_ASYNC);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_SWAPPER);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_NULLCREDS);
|
||||
TRACE_DEFINE_ENUM(RPC_CALL_MAJORSEEN);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_ROOTCREDS);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_DYNAMIC);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_NO_ROUND_ROBIN);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_SOFT);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_SOFTCONN);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_SENT);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_TIMEOUT);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_NOCONNECT);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_NO_RETRANS_TIMEOUT);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_CRED_NOREF);
|
||||
|
||||
#define rpc_show_task_flags(flags) \
|
||||
__print_flags(flags, "|", \
|
||||
{ RPC_TASK_ASYNC, "ASYNC" }, \
|
||||
{ RPC_TASK_SWAPPER, "SWAPPER" }, \
|
||||
{ RPC_TASK_NULLCREDS, "NULLCREDS" }, \
|
||||
{ RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \
|
||||
{ RPC_TASK_ROOTCREDS, "ROOTCREDS" }, \
|
||||
{ RPC_TASK_DYNAMIC, "DYNAMIC" }, \
|
||||
{ RPC_TASK_NO_ROUND_ROBIN, "NO_ROUND_ROBIN" }, \
|
||||
{ RPC_TASK_SOFT, "SOFT" }, \
|
||||
{ RPC_TASK_SOFTCONN, "SOFTCONN" }, \
|
||||
{ RPC_TASK_SENT, "SENT" }, \
|
||||
{ RPC_TASK_TIMEOUT, "TIMEOUT" }, \
|
||||
{ RPC_TASK_NOCONNECT, "NOCONNECT" }, \
|
||||
{ RPC_TASK_NO_RETRANS_TIMEOUT, "NORTO" })
|
||||
{ RPC_TASK_NO_RETRANS_TIMEOUT, "NORTO" }, \
|
||||
{ RPC_TASK_CRED_NOREF, "CRED_NOREF" })
|
||||
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_RUNNING);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_QUEUED);
|
||||
|
@ -392,6 +517,34 @@ DEFINE_RPC_REPLY_EVENT(stale_creds);
|
|||
DEFINE_RPC_REPLY_EVENT(bad_creds);
|
||||
DEFINE_RPC_REPLY_EVENT(auth_tooweak);
|
||||
|
||||
TRACE_EVENT(rpc_call_rpcerror,
|
||||
TP_PROTO(
|
||||
const struct rpc_task *task,
|
||||
int tk_status,
|
||||
int rpc_status
|
||||
),
|
||||
|
||||
TP_ARGS(task, tk_status, rpc_status),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, task_id)
|
||||
__field(unsigned int, client_id)
|
||||
__field(int, tk_status)
|
||||
__field(int, rpc_status)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->client_id = task->tk_client->cl_clid;
|
||||
__entry->task_id = task->tk_pid;
|
||||
__entry->tk_status = tk_status;
|
||||
__entry->rpc_status = rpc_status;
|
||||
),
|
||||
|
||||
TP_printk("task:%u@%u tk_status=%d rpc_status=%d",
|
||||
__entry->task_id, __entry->client_id,
|
||||
__entry->tk_status, __entry->rpc_status)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rpc_stats_latency,
|
||||
|
||||
TP_PROTO(
|
||||
|
@ -559,43 +712,6 @@ TRACE_EVENT(rpc_xdr_alignment,
|
|||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rpc_reply_pages,
|
||||
TP_PROTO(
|
||||
const struct rpc_rqst *req
|
||||
),
|
||||
|
||||
TP_ARGS(req),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, task_id)
|
||||
__field(unsigned int, client_id)
|
||||
__field(const void *, head_base)
|
||||
__field(size_t, head_len)
|
||||
__field(const void *, tail_base)
|
||||
__field(size_t, tail_len)
|
||||
__field(unsigned int, page_len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->task_id = req->rq_task->tk_pid;
|
||||
__entry->client_id = req->rq_task->tk_client->cl_clid;
|
||||
|
||||
__entry->head_base = req->rq_rcv_buf.head[0].iov_base;
|
||||
__entry->head_len = req->rq_rcv_buf.head[0].iov_len;
|
||||
__entry->page_len = req->rq_rcv_buf.page_len;
|
||||
__entry->tail_base = req->rq_rcv_buf.tail[0].iov_base;
|
||||
__entry->tail_len = req->rq_rcv_buf.tail[0].iov_len;
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"task:%u@%u xdr=[%p,%zu]/%u/[%p,%zu]\n",
|
||||
__entry->task_id, __entry->client_id,
|
||||
__entry->head_base, __entry->head_len,
|
||||
__entry->page_len,
|
||||
__entry->tail_base, __entry->tail_len
|
||||
)
|
||||
);
|
||||
|
||||
/*
|
||||
* First define the enums in the below macros to be exported to userspace
|
||||
* via TRACE_DEFINE_ENUM().
|
||||
|
@ -608,9 +724,9 @@ TRACE_EVENT(rpc_reply_pages,
|
|||
#define RPC_SHOW_SOCKET \
|
||||
EM( SS_FREE, "FREE" ) \
|
||||
EM( SS_UNCONNECTED, "UNCONNECTED" ) \
|
||||
EM( SS_CONNECTING, "CONNECTING," ) \
|
||||
EM( SS_CONNECTED, "CONNECTED," ) \
|
||||
EMe(SS_DISCONNECTING, "DISCONNECTING" )
|
||||
EM( SS_CONNECTING, "CONNECTING" ) \
|
||||
EM( SS_CONNECTED, "CONNECTED" ) \
|
||||
EMe( SS_DISCONNECTING, "DISCONNECTING" )
|
||||
|
||||
#define rpc_show_socket_state(state) \
|
||||
__print_symbolic(state, RPC_SHOW_SOCKET)
|
||||
|
@ -752,6 +868,69 @@ DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection);
|
|||
DEFINE_RPC_SOCKET_EVENT(rpc_socket_close);
|
||||
DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown);
|
||||
|
||||
TRACE_DEFINE_ENUM(XPRT_LOCKED);
|
||||
TRACE_DEFINE_ENUM(XPRT_CONNECTED);
|
||||
TRACE_DEFINE_ENUM(XPRT_CONNECTING);
|
||||
TRACE_DEFINE_ENUM(XPRT_CLOSE_WAIT);
|
||||
TRACE_DEFINE_ENUM(XPRT_BOUND);
|
||||
TRACE_DEFINE_ENUM(XPRT_BINDING);
|
||||
TRACE_DEFINE_ENUM(XPRT_CLOSING);
|
||||
TRACE_DEFINE_ENUM(XPRT_CONGESTED);
|
||||
TRACE_DEFINE_ENUM(XPRT_CWND_WAIT);
|
||||
TRACE_DEFINE_ENUM(XPRT_WRITE_SPACE);
|
||||
|
||||
#define rpc_show_xprt_state(x) \
|
||||
__print_flags(x, "|", \
|
||||
{ (1UL << XPRT_LOCKED), "LOCKED"}, \
|
||||
{ (1UL << XPRT_CONNECTED), "CONNECTED"}, \
|
||||
{ (1UL << XPRT_CONNECTING), "CONNECTING"}, \
|
||||
{ (1UL << XPRT_CLOSE_WAIT), "CLOSE_WAIT"}, \
|
||||
{ (1UL << XPRT_BOUND), "BOUND"}, \
|
||||
{ (1UL << XPRT_BINDING), "BINDING"}, \
|
||||
{ (1UL << XPRT_CLOSING), "CLOSING"}, \
|
||||
{ (1UL << XPRT_CONGESTED), "CONGESTED"}, \
|
||||
{ (1UL << XPRT_CWND_WAIT), "CWND_WAIT"}, \
|
||||
{ (1UL << XPRT_WRITE_SPACE), "WRITE_SPACE"})
|
||||
|
||||
DECLARE_EVENT_CLASS(rpc_xprt_lifetime_class,
|
||||
TP_PROTO(
|
||||
const struct rpc_xprt *xprt
|
||||
),
|
||||
|
||||
TP_ARGS(xprt),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, state)
|
||||
__string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
|
||||
__string(port, xprt->address_strings[RPC_DISPLAY_PORT])
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->state = xprt->state;
|
||||
__assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
|
||||
__assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
|
||||
),
|
||||
|
||||
TP_printk("peer=[%s]:%s state=%s",
|
||||
__get_str(addr), __get_str(port),
|
||||
rpc_show_xprt_state(__entry->state))
|
||||
);
|
||||
|
||||
#define DEFINE_RPC_XPRT_LIFETIME_EVENT(name) \
|
||||
DEFINE_EVENT(rpc_xprt_lifetime_class, \
|
||||
xprt_##name, \
|
||||
TP_PROTO( \
|
||||
const struct rpc_xprt *xprt \
|
||||
), \
|
||||
TP_ARGS(xprt))
|
||||
|
||||
DEFINE_RPC_XPRT_LIFETIME_EVENT(create);
|
||||
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto);
|
||||
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done);
|
||||
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force);
|
||||
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_cleanup);
|
||||
DEFINE_RPC_XPRT_LIFETIME_EVENT(destroy);
|
||||
|
||||
DECLARE_EVENT_CLASS(rpc_xprt_event,
|
||||
TP_PROTO(
|
||||
const struct rpc_xprt *xprt,
|
||||
|
@ -1023,6 +1202,54 @@ TRACE_EVENT(xs_stream_read_request,
|
|||
__entry->copied, __entry->reclen, __entry->offset)
|
||||
);
|
||||
|
||||
|
||||
DECLARE_EVENT_CLASS(svc_xdr_buf_class,
|
||||
TP_PROTO(
|
||||
const struct svc_rqst *rqst,
|
||||
const struct xdr_buf *xdr
|
||||
),
|
||||
|
||||
TP_ARGS(rqst, xdr),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, xid)
|
||||
__field(const void *, head_base)
|
||||
__field(size_t, head_len)
|
||||
__field(const void *, tail_base)
|
||||
__field(size_t, tail_len)
|
||||
__field(unsigned int, page_len)
|
||||
__field(unsigned int, msg_len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->xid = be32_to_cpu(rqst->rq_xid);
|
||||
__entry->head_base = xdr->head[0].iov_base;
|
||||
__entry->head_len = xdr->head[0].iov_len;
|
||||
__entry->tail_base = xdr->tail[0].iov_base;
|
||||
__entry->tail_len = xdr->tail[0].iov_len;
|
||||
__entry->page_len = xdr->page_len;
|
||||
__entry->msg_len = xdr->len;
|
||||
),
|
||||
|
||||
TP_printk("xid=0x%08x head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
|
||||
__entry->xid,
|
||||
__entry->head_base, __entry->head_len, __entry->page_len,
|
||||
__entry->tail_base, __entry->tail_len, __entry->msg_len
|
||||
)
|
||||
);
|
||||
|
||||
#define DEFINE_SVCXDRBUF_EVENT(name) \
|
||||
DEFINE_EVENT(svc_xdr_buf_class, \
|
||||
svc_xdr_##name, \
|
||||
TP_PROTO( \
|
||||
const struct svc_rqst *rqst, \
|
||||
const struct xdr_buf *xdr \
|
||||
), \
|
||||
TP_ARGS(rqst, xdr))
|
||||
|
||||
DEFINE_SVCXDRBUF_EVENT(recvfrom);
|
||||
DEFINE_SVCXDRBUF_EVENT(sendto);
|
||||
|
||||
#define show_rqstp_flags(flags) \
|
||||
__print_flags(flags, "|", \
|
||||
{ (1UL << RQ_SECURE), "RQ_SECURE"}, \
|
||||
|
|
|
@ -82,11 +82,11 @@ static size_t rpc_ntop6(const struct sockaddr *sap,
|
|||
|
||||
rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u",
|
||||
IPV6_SCOPE_DELIMITER, sin6->sin6_scope_id);
|
||||
if (unlikely((size_t)rc > sizeof(scopebuf)))
|
||||
if (unlikely((size_t)rc >= sizeof(scopebuf)))
|
||||
return 0;
|
||||
|
||||
len += rc;
|
||||
if (unlikely(len > buflen))
|
||||
if (unlikely(len >= buflen))
|
||||
return 0;
|
||||
|
||||
strcat(buf, scopebuf);
|
||||
|
|
|
@ -81,7 +81,7 @@ static int param_get_hashtbl_sz(char *buffer, const struct kernel_param *kp)
|
|||
unsigned int nbits;
|
||||
|
||||
nbits = *(unsigned int *)kp->arg;
|
||||
return sprintf(buffer, "%u", 1U << nbits);
|
||||
return sprintf(buffer, "%u\n", 1U << nbits);
|
||||
}
|
||||
|
||||
#define param_check_hashtbl_sz(name, p) __param_check(name, p, unsigned int);
|
||||
|
|
|
@ -254,7 +254,7 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
|
|||
if (IS_ERR(p))
|
||||
goto err;
|
||||
done:
|
||||
trace_rpcgss_context(ctx->gc_expiry, now, timeout,
|
||||
trace_rpcgss_context(window_size, ctx->gc_expiry, now, timeout,
|
||||
ctx->gc_acceptor.len, ctx->gc_acceptor.data);
|
||||
err:
|
||||
return p;
|
||||
|
@ -697,10 +697,12 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
|
|||
}
|
||||
schedule();
|
||||
}
|
||||
if (gss_msg->ctx)
|
||||
if (gss_msg->ctx) {
|
||||
trace_rpcgss_ctx_init(gss_cred);
|
||||
gss_cred_set_ctx(cred, gss_msg->ctx);
|
||||
else
|
||||
} else {
|
||||
err = gss_msg->msg.errno;
|
||||
}
|
||||
spin_unlock(&pipe->lock);
|
||||
out_intr:
|
||||
finish_wait(&gss_msg->waitqueue, &wait);
|
||||
|
@ -1054,11 +1056,11 @@ gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
|
|||
auth->au_rslack = GSS_KRB5_MAX_SLACK_NEEDED >> 2;
|
||||
auth->au_verfsize = GSS_VERF_SLACK >> 2;
|
||||
auth->au_ralign = GSS_VERF_SLACK >> 2;
|
||||
auth->au_flags = 0;
|
||||
__set_bit(RPCAUTH_AUTH_UPDATE_SLACK, &auth->au_flags);
|
||||
auth->au_ops = &authgss_ops;
|
||||
auth->au_flavor = flavor;
|
||||
if (gss_pseudoflavor_to_datatouch(gss_auth->mech, flavor))
|
||||
auth->au_flags |= RPCAUTH_AUTH_DATATOUCH;
|
||||
__set_bit(RPCAUTH_AUTH_DATATOUCH, &auth->au_flags);
|
||||
refcount_set(&auth->au_count, 1);
|
||||
kref_init(&gss_auth->kref);
|
||||
|
||||
|
@ -1284,8 +1286,9 @@ gss_send_destroy_context(struct rpc_cred *cred)
|
|||
if (new) {
|
||||
ctx->gc_proc = RPC_GSS_PROC_DESTROY;
|
||||
|
||||
trace_rpcgss_ctx_destroy(gss_cred);
|
||||
task = rpc_call_null(gss_auth->client, &new->gc_base,
|
||||
RPC_TASK_ASYNC|RPC_TASK_SOFT);
|
||||
RPC_TASK_ASYNC);
|
||||
if (!IS_ERR(task))
|
||||
rpc_put_task(task);
|
||||
|
||||
|
@ -1349,7 +1352,6 @@ gss_destroy_nullcred(struct rpc_cred *cred)
|
|||
static void
|
||||
gss_destroy_cred(struct rpc_cred *cred)
|
||||
{
|
||||
|
||||
if (test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0)
|
||||
gss_send_destroy_context(cred);
|
||||
gss_destroy_nullcred(cred);
|
||||
|
@ -1613,6 +1615,7 @@ static int gss_renew_cred(struct rpc_task *task)
|
|||
new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
|
||||
if (IS_ERR(new))
|
||||
return PTR_ERR(new);
|
||||
|
||||
task->tk_rqstp->rq_cred = new;
|
||||
put_rpccred(oldcred);
|
||||
return 0;
|
||||
|
@ -1709,7 +1712,8 @@ gss_validate(struct rpc_task *task, struct xdr_stream *xdr)
|
|||
|
||||
/* We leave it to unwrap to calculate au_rslack. For now we just
|
||||
* calculate the length of the verifier: */
|
||||
cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
|
||||
if (test_bit(RPCAUTH_AUTH_UPDATE_SLACK, &cred->cr_auth->au_flags))
|
||||
cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
|
||||
status = 0;
|
||||
out:
|
||||
gss_put_ctx(ctx);
|
||||
|
@ -1927,13 +1931,30 @@ static int gss_wrap_req(struct rpc_task *task, struct xdr_stream *xdr)
|
|||
return status;
|
||||
}
|
||||
|
||||
static int
|
||||
gss_unwrap_resp_auth(struct rpc_cred *cred)
|
||||
/**
|
||||
* gss_update_rslack - Possibly update RPC receive buffer size estimates
|
||||
* @task: rpc_task for incoming RPC Reply being unwrapped
|
||||
* @cred: controlling rpc_cred for @task
|
||||
* @before: XDR words needed before each RPC Reply message
|
||||
* @after: XDR words needed following each RPC Reply message
|
||||
*
|
||||
*/
|
||||
static void gss_update_rslack(struct rpc_task *task, struct rpc_cred *cred,
|
||||
unsigned int before, unsigned int after)
|
||||
{
|
||||
struct rpc_auth *auth = cred->cr_auth;
|
||||
|
||||
auth->au_rslack = auth->au_verfsize;
|
||||
auth->au_ralign = auth->au_verfsize;
|
||||
if (test_and_clear_bit(RPCAUTH_AUTH_UPDATE_SLACK, &auth->au_flags)) {
|
||||
auth->au_ralign = auth->au_verfsize + before;
|
||||
auth->au_rslack = auth->au_verfsize + after;
|
||||
trace_rpcgss_update_slack(task, auth);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
gss_unwrap_resp_auth(struct rpc_task *task, struct rpc_cred *cred)
|
||||
{
|
||||
gss_update_rslack(task, cred, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1956,7 +1977,6 @@ gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
|
|||
struct xdr_stream *xdr)
|
||||
{
|
||||
struct xdr_buf gss_data, *rcv_buf = &rqstp->rq_rcv_buf;
|
||||
struct rpc_auth *auth = cred->cr_auth;
|
||||
u32 len, offset, seqno, maj_stat;
|
||||
struct xdr_netobj mic;
|
||||
int ret;
|
||||
|
@ -2005,8 +2025,7 @@ gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
|
|||
if (maj_stat != GSS_S_COMPLETE)
|
||||
goto bad_mic;
|
||||
|
||||
auth->au_rslack = auth->au_verfsize + 2 + 1 + XDR_QUADLEN(mic.len);
|
||||
auth->au_ralign = auth->au_verfsize + 2;
|
||||
gss_update_rslack(task, cred, 2, 2 + 1 + XDR_QUADLEN(mic.len));
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
|
@ -2031,7 +2050,6 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
|
|||
{
|
||||
struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
|
||||
struct kvec *head = rqstp->rq_rcv_buf.head;
|
||||
struct rpc_auth *auth = cred->cr_auth;
|
||||
u32 offset, opaque_len, maj_stat;
|
||||
__be32 *p;
|
||||
|
||||
|
@ -2058,8 +2076,8 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
|
|||
*/
|
||||
xdr_init_decode(xdr, rcv_buf, p, rqstp);
|
||||
|
||||
auth->au_rslack = auth->au_verfsize + 2 + ctx->gc_gss_ctx->slack;
|
||||
auth->au_ralign = auth->au_verfsize + 2 + ctx->gc_gss_ctx->align;
|
||||
gss_update_rslack(task, cred, 2 + ctx->gc_gss_ctx->align,
|
||||
2 + ctx->gc_gss_ctx->slack);
|
||||
|
||||
return 0;
|
||||
unwrap_failed:
|
||||
|
@ -2130,7 +2148,7 @@ gss_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr)
|
|||
goto out_decode;
|
||||
switch (gss_cred->gc_service) {
|
||||
case RPC_GSS_SVC_NONE:
|
||||
status = gss_unwrap_resp_auth(cred);
|
||||
status = gss_unwrap_resp_auth(task, cred);
|
||||
break;
|
||||
case RPC_GSS_SVC_INTEGRITY:
|
||||
status = gss_unwrap_resp_integ(task, cred, ctx, rqstp, xdr);
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/sunrpc/clnt.h>
|
||||
#include <linux/sunrpc/sched.h>
|
||||
#include <linux/sunrpc/gss_err.h>
|
||||
#include <linux/sunrpc/auth_gss.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/rpcgss.h>
|
||||
|
|
|
@ -370,10 +370,6 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
|
|||
const char *nodename = args->nodename;
|
||||
int err;
|
||||
|
||||
/* sanity check the name before trying to print it */
|
||||
dprintk("RPC: creating %s client for %s (xprt %p)\n",
|
||||
program->name, args->servername, xprt);
|
||||
|
||||
err = rpciod_up();
|
||||
if (err)
|
||||
goto out_no_rpciod;
|
||||
|
@ -436,6 +432,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
|
|||
goto out_no_path;
|
||||
if (parent)
|
||||
atomic_inc(&parent->cl_count);
|
||||
|
||||
trace_rpc_clnt_new(clnt, xprt, program->name, args->servername);
|
||||
return clnt;
|
||||
|
||||
out_no_path:
|
||||
|
@ -450,6 +448,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
|
|||
out_no_rpciod:
|
||||
xprt_switch_put(xps);
|
||||
xprt_put(xprt);
|
||||
trace_rpc_clnt_new_err(program->name, args->servername, err);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
|
@ -634,10 +633,8 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
|
|||
args->nodename = clnt->cl_nodename;
|
||||
|
||||
new = rpc_new_client(args, xps, xprt, clnt);
|
||||
if (IS_ERR(new)) {
|
||||
err = PTR_ERR(new);
|
||||
goto out_err;
|
||||
}
|
||||
if (IS_ERR(new))
|
||||
return new;
|
||||
|
||||
/* Turn off autobind on clones */
|
||||
new->cl_autobind = 0;
|
||||
|
@ -650,7 +647,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
|
|||
return new;
|
||||
|
||||
out_err:
|
||||
dprintk("RPC: %s: returned error %d\n", __func__, err);
|
||||
trace_rpc_clnt_clone_err(clnt, err);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
|
@ -723,11 +720,8 @@ int rpc_switch_client_transport(struct rpc_clnt *clnt,
|
|||
int err;
|
||||
|
||||
xprt = xprt_create_transport(args);
|
||||
if (IS_ERR(xprt)) {
|
||||
dprintk("RPC: failed to create new xprt for clnt %p\n",
|
||||
clnt);
|
||||
if (IS_ERR(xprt))
|
||||
return PTR_ERR(xprt);
|
||||
}
|
||||
|
||||
xps = xprt_switch_alloc(xprt, GFP_KERNEL);
|
||||
if (xps == NULL) {
|
||||
|
@ -767,7 +761,7 @@ int rpc_switch_client_transport(struct rpc_clnt *clnt,
|
|||
rpc_release_client(parent);
|
||||
xprt_switch_put(oldxps);
|
||||
xprt_put(old);
|
||||
dprintk("RPC: replaced xprt for clnt %p\n", clnt);
|
||||
trace_rpc_clnt_replace_xprt(clnt);
|
||||
return 0;
|
||||
|
||||
out_revert:
|
||||
|
@ -777,7 +771,7 @@ int rpc_switch_client_transport(struct rpc_clnt *clnt,
|
|||
rpc_client_register(clnt, pseudoflavor, NULL);
|
||||
xprt_switch_put(xps);
|
||||
xprt_put(xprt);
|
||||
dprintk("RPC: failed to switch xprt for clnt %p\n", clnt);
|
||||
trace_rpc_clnt_replace_xprt_err(clnt);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_switch_client_transport);
|
||||
|
@ -844,10 +838,11 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
|
|||
|
||||
if (list_empty(&clnt->cl_tasks))
|
||||
return;
|
||||
dprintk("RPC: killing all tasks for client %p\n", clnt);
|
||||
|
||||
/*
|
||||
* Spin lock all_tasks to prevent changes...
|
||||
*/
|
||||
trace_rpc_clnt_killall(clnt);
|
||||
spin_lock(&clnt->cl_lock);
|
||||
list_for_each_entry(rovr, &clnt->cl_tasks, tk_task)
|
||||
rpc_signal_task(rovr);
|
||||
|
@ -863,9 +858,7 @@ void rpc_shutdown_client(struct rpc_clnt *clnt)
|
|||
{
|
||||
might_sleep();
|
||||
|
||||
dprintk_rcu("RPC: shutting down %s client for %s\n",
|
||||
clnt->cl_program->name,
|
||||
rcu_dereference(clnt->cl_xprt)->servername);
|
||||
trace_rpc_clnt_shutdown(clnt);
|
||||
|
||||
while (!list_empty(&clnt->cl_tasks)) {
|
||||
rpc_killall_tasks(clnt);
|
||||
|
@ -884,6 +877,8 @@ static void rpc_free_client_work(struct work_struct *work)
|
|||
{
|
||||
struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work);
|
||||
|
||||
trace_rpc_clnt_free(clnt);
|
||||
|
||||
/* These might block on processes that might allocate memory,
|
||||
* so they cannot be called in rpciod, so they are handled separately
|
||||
* here.
|
||||
|
@ -901,9 +896,7 @@ rpc_free_client(struct rpc_clnt *clnt)
|
|||
{
|
||||
struct rpc_clnt *parent = NULL;
|
||||
|
||||
dprintk_rcu("RPC: destroying %s client for %s\n",
|
||||
clnt->cl_program->name,
|
||||
rcu_dereference(clnt->cl_xprt)->servername);
|
||||
trace_rpc_clnt_release(clnt);
|
||||
if (clnt->cl_parent != clnt)
|
||||
parent = clnt->cl_parent;
|
||||
rpc_unregister_client(clnt);
|
||||
|
@ -945,8 +938,6 @@ rpc_free_auth(struct rpc_clnt *clnt)
|
|||
void
|
||||
rpc_release_client(struct rpc_clnt *clnt)
|
||||
{
|
||||
dprintk("RPC: rpc_release_client(%p)\n", clnt);
|
||||
|
||||
do {
|
||||
if (list_empty(&clnt->cl_tasks))
|
||||
wake_up(&destroy_wait);
|
||||
|
@ -1270,7 +1261,7 @@ void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages,
|
|||
hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign - 1;
|
||||
|
||||
xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len);
|
||||
trace_rpc_reply_pages(req);
|
||||
trace_rpc_xdr_reply_pages(req->rq_task, &req->rq_rcv_buf);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages);
|
||||
|
||||
|
@ -1624,6 +1615,7 @@ const char
|
|||
static void
|
||||
__rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status)
|
||||
{
|
||||
trace_rpc_call_rpcerror(task, tk_status, rpc_status);
|
||||
task->tk_rpc_status = rpc_status;
|
||||
rpc_exit(task, tk_status);
|
||||
}
|
||||
|
@ -2531,7 +2523,7 @@ call_decode(struct rpc_task *task)
|
|||
goto out;
|
||||
|
||||
req->rq_rcv_buf.len = req->rq_private_buf.len;
|
||||
trace_xprt_recvfrom(&req->rq_rcv_buf);
|
||||
trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf);
|
||||
|
||||
/* Check that the softirq receive buffer is valid */
|
||||
WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
|
||||
|
@ -2760,7 +2752,8 @@ struct rpc_task *rpc_call_null_helper(struct rpc_clnt *clnt,
|
|||
.rpc_op_cred = cred,
|
||||
.callback_ops = (ops != NULL) ? ops : &rpc_default_ops,
|
||||
.callback_data = data,
|
||||
.flags = flags | RPC_TASK_NULLCREDS,
|
||||
.flags = flags | RPC_TASK_SOFT | RPC_TASK_SOFTCONN |
|
||||
RPC_TASK_NULLCREDS,
|
||||
};
|
||||
|
||||
return rpc_run_task(&task_setup_data);
|
||||
|
@ -2823,8 +2816,7 @@ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
|
|||
goto success;
|
||||
}
|
||||
|
||||
task = rpc_call_null_helper(clnt, xprt, NULL,
|
||||
RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC|RPC_TASK_NULLCREDS,
|
||||
task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC,
|
||||
&rpc_cb_add_xprt_call_ops, data);
|
||||
|
||||
rpc_put_task(task);
|
||||
|
@ -2867,9 +2859,7 @@ int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt,
|
|||
goto out_err;
|
||||
|
||||
/* Test the connection */
|
||||
task = rpc_call_null_helper(clnt, xprt, NULL,
|
||||
RPC_TASK_SOFT | RPC_TASK_SOFTCONN | RPC_TASK_NULLCREDS,
|
||||
NULL, NULL);
|
||||
task = rpc_call_null_helper(clnt, xprt, NULL, 0, NULL, NULL);
|
||||
if (IS_ERR(task)) {
|
||||
status = PTR_ERR(task);
|
||||
goto out_err;
|
||||
|
|
|
@ -814,7 +814,7 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
|
|||
else
|
||||
len = xprt->xpt_ops->xpo_recvfrom(rqstp);
|
||||
if (len > 0)
|
||||
trace_svc_recvfrom(&rqstp->rq_arg);
|
||||
trace_svc_xdr_recvfrom(rqstp, &rqstp->rq_arg);
|
||||
rqstp->rq_stime = ktime_get();
|
||||
rqstp->rq_reserved = serv->sv_max_mesg;
|
||||
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
|
||||
|
@ -906,7 +906,7 @@ int svc_send(struct svc_rqst *rqstp)
|
|||
xb->len = xb->head[0].iov_len +
|
||||
xb->page_len +
|
||||
xb->tail[0].iov_len;
|
||||
trace_svc_sendto(xb);
|
||||
trace_svc_xdr_sendto(rqstp, xb);
|
||||
trace_svc_stats_latency(rqstp);
|
||||
|
||||
len = xprt->xpt_ops->xpo_sendto(rqstp);
|
||||
|
|
|
@ -663,6 +663,7 @@ static void xprt_autoclose(struct work_struct *work)
|
|||
container_of(work, struct rpc_xprt, task_cleanup);
|
||||
unsigned int pflags = memalloc_nofs_save();
|
||||
|
||||
trace_xprt_disconnect_auto(xprt);
|
||||
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
||||
xprt->ops->close(xprt);
|
||||
xprt_release_write(xprt, NULL);
|
||||
|
@ -677,7 +678,7 @@ static void xprt_autoclose(struct work_struct *work)
|
|||
*/
|
||||
void xprt_disconnect_done(struct rpc_xprt *xprt)
|
||||
{
|
||||
dprintk("RPC: disconnected transport %p\n", xprt);
|
||||
trace_xprt_disconnect_done(xprt);
|
||||
spin_lock(&xprt->transport_lock);
|
||||
xprt_clear_connected(xprt);
|
||||
xprt_clear_write_space_locked(xprt);
|
||||
|
@ -694,6 +695,8 @@ EXPORT_SYMBOL_GPL(xprt_disconnect_done);
|
|||
*/
|
||||
void xprt_force_disconnect(struct rpc_xprt *xprt)
|
||||
{
|
||||
trace_xprt_disconnect_force(xprt);
|
||||
|
||||
/* Don't race with the test_bit() in xprt_clear_locked() */
|
||||
spin_lock(&xprt->transport_lock);
|
||||
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
||||
|
@ -832,8 +835,10 @@ void xprt_connect(struct rpc_task *task)
|
|||
if (!xprt_lock_write(xprt, task))
|
||||
return;
|
||||
|
||||
if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
|
||||
if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
|
||||
trace_xprt_disconnect_cleanup(xprt);
|
||||
xprt->ops->close(xprt);
|
||||
}
|
||||
|
||||
if (!xprt_connected(xprt)) {
|
||||
task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
|
||||
|
@ -1460,7 +1465,7 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
|
|||
*/
|
||||
req->rq_ntrans++;
|
||||
|
||||
trace_xprt_sendto(&req->rq_snd_buf);
|
||||
trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
|
||||
connect_cookie = xprt->connect_cookie;
|
||||
status = xprt->ops->send_request(req);
|
||||
if (status != 0) {
|
||||
|
@ -1903,11 +1908,8 @@ struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
|
|||
|
||||
found:
|
||||
xprt = t->setup(args);
|
||||
if (IS_ERR(xprt)) {
|
||||
dprintk("RPC: xprt_create_transport: failed, %ld\n",
|
||||
-PTR_ERR(xprt));
|
||||
if (IS_ERR(xprt))
|
||||
goto out;
|
||||
}
|
||||
if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
|
||||
xprt->idle_timeout = 0;
|
||||
INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
|
||||
|
@ -1928,8 +1930,7 @@ struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
|
|||
|
||||
rpc_xprt_debugfs_register(xprt);
|
||||
|
||||
dprintk("RPC: created transport %p with %u slots\n", xprt,
|
||||
xprt->max_reqs);
|
||||
trace_xprt_create(xprt);
|
||||
out:
|
||||
return xprt;
|
||||
}
|
||||
|
@ -1939,6 +1940,8 @@ static void xprt_destroy_cb(struct work_struct *work)
|
|||
struct rpc_xprt *xprt =
|
||||
container_of(work, struct rpc_xprt, task_cleanup);
|
||||
|
||||
trace_xprt_destroy(xprt);
|
||||
|
||||
rpc_xprt_debugfs_unregister(xprt);
|
||||
rpc_destroy_wait_queue(&xprt->binding);
|
||||
rpc_destroy_wait_queue(&xprt->pending);
|
||||
|
@ -1963,8 +1966,6 @@ static void xprt_destroy_cb(struct work_struct *work)
|
|||
*/
|
||||
static void xprt_destroy(struct rpc_xprt *xprt)
|
||||
{
|
||||
dprintk("RPC: destroying transport %p\n", xprt);
|
||||
|
||||
/*
|
||||
* Exclude transport connect/disconnect handlers and autoclose
|
||||
*/
|
||||
|
|
|
@ -892,8 +892,8 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
|
|||
* or privacy, direct data placement of individual data items
|
||||
* is not allowed.
|
||||
*/
|
||||
ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
|
||||
RPCAUTH_AUTH_DATATOUCH);
|
||||
ddp_allowed = !test_bit(RPCAUTH_AUTH_DATATOUCH,
|
||||
&rqst->rq_cred->cr_auth->au_flags);
|
||||
|
||||
/*
|
||||
* Chunks needed for results?
|
||||
|
|
|
@ -68,7 +68,7 @@
|
|||
* tunables
|
||||
*/
|
||||
|
||||
unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE;
|
||||
static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE;
|
||||
unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE;
|
||||
unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE;
|
||||
unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRWR;
|
||||
|
@ -281,8 +281,6 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
|
|||
{
|
||||
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
|
||||
|
||||
trace_xprtrdma_op_destroy(r_xprt);
|
||||
|
||||
cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
|
||||
|
||||
rpcrdma_xprt_disconnect(r_xprt);
|
||||
|
@ -365,10 +363,6 @@ xprt_setup_rdma(struct xprt_create *args)
|
|||
|
||||
xprt->max_payload = RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT;
|
||||
|
||||
dprintk("RPC: %s: %s:%s\n", __func__,
|
||||
xprt->address_strings[RPC_DISPLAY_ADDR],
|
||||
xprt->address_strings[RPC_DISPLAY_PORT]);
|
||||
trace_xprtrdma_create(new_xprt);
|
||||
return xprt;
|
||||
}
|
||||
|
||||
|
@ -385,8 +379,6 @@ void xprt_rdma_close(struct rpc_xprt *xprt)
|
|||
{
|
||||
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
|
||||
|
||||
trace_xprtrdma_op_close(r_xprt);
|
||||
|
||||
rpcrdma_xprt_disconnect(r_xprt);
|
||||
|
||||
xprt->reestablish_timeout = 0;
|
||||
|
|
|
@ -141,7 +141,6 @@ void rpcrdma_flush_disconnect(struct ib_cq *cq, struct ib_wc *wc)
|
|||
if (wc->status != IB_WC_SUCCESS &&
|
||||
r_xprt->rx_ep->re_connect_status == 1) {
|
||||
r_xprt->rx_ep->re_connect_status = -ECONNABORTED;
|
||||
trace_xprtrdma_flush_dct(r_xprt, wc->status);
|
||||
xprt_force_disconnect(xprt);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user