forked from luck/tmp_suning_uos_patched
Merge branch 'nvme-4.11-rc' of git://git.infradead.org/nvme into for-linus
Sagi writes: This consists of some fixes for issues reported lately: - loop and rdma host driver cpu hotplug fixes - fix loop use-after-free - nvmet percpu_ref confirmation fix to fail ongoing requests - nvmet-rdma fix a non-initialized commands deref
This commit is contained in:
commit
a457d08e11
@ -343,8 +343,6 @@ static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
|
||||
struct ib_device *ibdev = dev->dev;
|
||||
int ret;
|
||||
|
||||
BUG_ON(queue_idx >= ctrl->queue_count);
|
||||
|
||||
ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
|
||||
DMA_TO_DEVICE);
|
||||
if (ret)
|
||||
@ -652,8 +650,22 @@ static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl)
|
||||
|
||||
static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
|
||||
{
|
||||
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
|
||||
unsigned int nr_io_queues;
|
||||
int i, ret;
|
||||
|
||||
nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
|
||||
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ctrl->queue_count = nr_io_queues + 1;
|
||||
if (ctrl->queue_count < 2)
|
||||
return 0;
|
||||
|
||||
dev_info(ctrl->ctrl.device,
|
||||
"creating %d I/O queues.\n", nr_io_queues);
|
||||
|
||||
for (i = 1; i < ctrl->queue_count; i++) {
|
||||
ret = nvme_rdma_init_queue(ctrl, i,
|
||||
ctrl->ctrl.opts->queue_size);
|
||||
@ -1791,20 +1803,8 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
|
||||
|
||||
static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
|
||||
{
|
||||
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
|
||||
int ret;
|
||||
|
||||
ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ctrl->queue_count = opts->nr_io_queues + 1;
|
||||
if (ctrl->queue_count < 2)
|
||||
return 0;
|
||||
|
||||
dev_info(ctrl->ctrl.device,
|
||||
"creating %d I/O queues.\n", opts->nr_io_queues);
|
||||
|
||||
ret = nvme_rdma_init_io_queues(ctrl);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -425,6 +425,13 @@ void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
|
||||
ctrl->sqs[qid] = sq;
|
||||
}
|
||||
|
||||
static void nvmet_confirm_sq(struct percpu_ref *ref)
|
||||
{
|
||||
struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
|
||||
|
||||
complete(&sq->confirm_done);
|
||||
}
|
||||
|
||||
void nvmet_sq_destroy(struct nvmet_sq *sq)
|
||||
{
|
||||
/*
|
||||
@ -433,7 +440,8 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
|
||||
*/
|
||||
if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
|
||||
nvmet_async_events_free(sq->ctrl);
|
||||
percpu_ref_kill(&sq->ref);
|
||||
percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
|
||||
wait_for_completion(&sq->confirm_done);
|
||||
wait_for_completion(&sq->free_done);
|
||||
percpu_ref_exit(&sq->ref);
|
||||
|
||||
@ -461,6 +469,7 @@ int nvmet_sq_init(struct nvmet_sq *sq)
|
||||
return ret;
|
||||
}
|
||||
init_completion(&sq->free_done);
|
||||
init_completion(&sq->confirm_done);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -223,8 +223,6 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
|
||||
static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
|
||||
struct nvme_loop_iod *iod, unsigned int queue_idx)
|
||||
{
|
||||
BUG_ON(queue_idx >= ctrl->queue_count);
|
||||
|
||||
iod->req.cmd = &iod->cmd;
|
||||
iod->req.rsp = &iod->rsp;
|
||||
iod->queue = &ctrl->queues[queue_idx];
|
||||
@ -288,9 +286,9 @@ static struct blk_mq_ops nvme_loop_admin_mq_ops = {
|
||||
|
||||
static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
|
||||
{
|
||||
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
|
||||
blk_cleanup_queue(ctrl->ctrl.admin_q);
|
||||
blk_mq_free_tag_set(&ctrl->admin_tag_set);
|
||||
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
|
||||
}
|
||||
|
||||
static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
|
||||
@ -314,6 +312,43 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
|
||||
kfree(ctrl);
|
||||
}
|
||||
|
||||
static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 1; i < ctrl->queue_count; i++)
|
||||
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
|
||||
}
|
||||
|
||||
static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
|
||||
{
|
||||
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
|
||||
unsigned int nr_io_queues;
|
||||
int ret, i;
|
||||
|
||||
nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
|
||||
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
|
||||
if (ret || !nr_io_queues)
|
||||
return ret;
|
||||
|
||||
dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
|
||||
|
||||
for (i = 1; i <= nr_io_queues; i++) {
|
||||
ctrl->queues[i].ctrl = ctrl;
|
||||
ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
|
||||
if (ret)
|
||||
goto out_destroy_queues;
|
||||
|
||||
ctrl->queue_count++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_destroy_queues:
|
||||
nvme_loop_destroy_io_queues(ctrl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
|
||||
{
|
||||
int error;
|
||||
@ -385,17 +420,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
|
||||
|
||||
static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
|
||||
{
|
||||
int i;
|
||||
|
||||
nvme_stop_keep_alive(&ctrl->ctrl);
|
||||
|
||||
if (ctrl->queue_count > 1) {
|
||||
nvme_stop_queues(&ctrl->ctrl);
|
||||
blk_mq_tagset_busy_iter(&ctrl->tag_set,
|
||||
nvme_cancel_request, &ctrl->ctrl);
|
||||
|
||||
for (i = 1; i < ctrl->queue_count; i++)
|
||||
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
|
||||
nvme_loop_destroy_io_queues(ctrl);
|
||||
}
|
||||
|
||||
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
|
||||
@ -467,19 +498,14 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
|
||||
if (ret)
|
||||
goto out_disable;
|
||||
|
||||
for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
|
||||
ctrl->queues[i].ctrl = ctrl;
|
||||
ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
|
||||
if (ret)
|
||||
goto out_free_queues;
|
||||
ret = nvme_loop_init_io_queues(ctrl);
|
||||
if (ret)
|
||||
goto out_destroy_admin;
|
||||
|
||||
ctrl->queue_count++;
|
||||
}
|
||||
|
||||
for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
|
||||
for (i = 1; i < ctrl->queue_count; i++) {
|
||||
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
|
||||
if (ret)
|
||||
goto out_free_queues;
|
||||
goto out_destroy_io;
|
||||
}
|
||||
|
||||
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
|
||||
@ -492,9 +518,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
|
||||
|
||||
return;
|
||||
|
||||
out_free_queues:
|
||||
for (i = 1; i < ctrl->queue_count; i++)
|
||||
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
|
||||
out_destroy_io:
|
||||
nvme_loop_destroy_io_queues(ctrl);
|
||||
out_destroy_admin:
|
||||
nvme_loop_destroy_admin_queue(ctrl);
|
||||
out_disable:
|
||||
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
|
||||
@ -533,25 +559,12 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
|
||||
|
||||
static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
|
||||
{
|
||||
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
|
||||
int ret, i;
|
||||
|
||||
ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
|
||||
if (ret || !opts->nr_io_queues)
|
||||
ret = nvme_loop_init_io_queues(ctrl);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
|
||||
opts->nr_io_queues);
|
||||
|
||||
for (i = 1; i <= opts->nr_io_queues; i++) {
|
||||
ctrl->queues[i].ctrl = ctrl;
|
||||
ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
|
||||
if (ret)
|
||||
goto out_destroy_queues;
|
||||
|
||||
ctrl->queue_count++;
|
||||
}
|
||||
|
||||
memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
|
||||
ctrl->tag_set.ops = &nvme_loop_mq_ops;
|
||||
ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
|
||||
@ -575,7 +588,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
|
||||
goto out_free_tagset;
|
||||
}
|
||||
|
||||
for (i = 1; i <= opts->nr_io_queues; i++) {
|
||||
for (i = 1; i < ctrl->queue_count; i++) {
|
||||
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
|
||||
if (ret)
|
||||
goto out_cleanup_connect_q;
|
||||
@ -588,8 +601,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
|
||||
out_free_tagset:
|
||||
blk_mq_free_tag_set(&ctrl->tag_set);
|
||||
out_destroy_queues:
|
||||
for (i = 1; i < ctrl->queue_count; i++)
|
||||
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
|
||||
nvme_loop_destroy_io_queues(ctrl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -73,6 +73,7 @@ struct nvmet_sq {
|
||||
u16 qid;
|
||||
u16 size;
|
||||
struct completion free_done;
|
||||
struct completion confirm_done;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -703,11 +703,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
|
||||
{
|
||||
u16 status;
|
||||
|
||||
cmd->queue = queue;
|
||||
cmd->n_rdma = 0;
|
||||
cmd->req.port = queue->port;
|
||||
|
||||
|
||||
ib_dma_sync_single_for_cpu(queue->dev->device,
|
||||
cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
|
||||
DMA_FROM_DEVICE);
|
||||
@ -760,9 +755,12 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
|
||||
cmd->queue = queue;
|
||||
rsp = nvmet_rdma_get_rsp(queue);
|
||||
rsp->queue = queue;
|
||||
rsp->cmd = cmd;
|
||||
rsp->flags = 0;
|
||||
rsp->req.cmd = cmd->nvme_cmd;
|
||||
rsp->req.port = queue->port;
|
||||
rsp->n_rdma = 0;
|
||||
|
||||
if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
|
||||
unsigned long flags;
|
||||
|
Loading…
Reference in New Issue
Block a user