forked from luck/tmp_suning_uos_patched
SUNRPC: Ensure we always bump the backlog queue in xprt_free_slot
Whenever we free a slot, we know that the resulting xprt->num_reqs will be less than xprt->max_reqs, so we know that we can release at least one backlogged rpc_task. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com> Cc: stable@vger.kernel.org [>=3.1]
This commit is contained in:
parent
7fdcf13b29
commit
c25573b513
|
@ -995,13 +995,11 @@ static void xprt_alloc_slot(struct rpc_task *task)
|
|||
|
||||
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
|
||||
{
|
||||
if (xprt_dynamic_free_slot(xprt, req))
|
||||
return;
|
||||
|
||||
memset(req, 0, sizeof(*req)); /* mark unused */
|
||||
|
||||
spin_lock(&xprt->reserve_lock);
|
||||
list_add(&req->rq_list, &xprt->free);
|
||||
if (!xprt_dynamic_free_slot(xprt, req)) {
|
||||
memset(req, 0, sizeof(*req)); /* mark unused */
|
||||
list_add(&req->rq_list, &xprt->free);
|
||||
}
|
||||
rpc_wake_up_next(&xprt->backlog);
|
||||
spin_unlock(&xprt->reserve_lock);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user