isci: unify isci_request and scic_sds_request

They are one in the same object so remove the distinction.  The near
duplicate fields (owning_controller, and isci_host) will be cleaned up
after the scic_sds_contoller isci_host unification.

Reported-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Dan Williams 2011-06-27 14:57:03 -07:00
parent ba7cb22342
commit 5076a1a97e
13 changed files with 514 additions and 615 deletions

View File

@ -258,21 +258,20 @@ static void scic_sds_controller_task_completion(struct scic_sds_controller *scic
u32 index = SCU_GET_COMPLETION_INDEX(completion_entry);
struct isci_host *ihost = scic_to_ihost(scic);
struct isci_request *ireq = ihost->reqs[index];
struct scic_sds_request *sci_req = &ireq->sci;
/* Make sure that we really want to process this IO request */
if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
sci_req->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
ISCI_TAG_SEQ(sci_req->io_tag) == scic->io_request_sequence[index])
ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
ISCI_TAG_SEQ(ireq->io_tag) == scic->io_request_sequence[index])
/* Yep this is a valid io request pass it along to the io request handler */
scic_sds_io_request_tc_completion(sci_req, completion_entry);
scic_sds_io_request_tc_completion(ireq, completion_entry);
}
static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic,
u32 completion_entry)
{
u32 index;
struct scic_sds_request *io_request;
struct isci_request *ireq;
struct scic_sds_remote_device *device;
index = SCU_GET_COMPLETION_INDEX(completion_entry);
@ -280,41 +279,27 @@ static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic
switch (scu_get_command_request_type(completion_entry)) {
case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
io_request = &scic_to_ihost(scic)->reqs[index]->sci;
dev_warn(scic_to_dev(scic),
"%s: SCIC SDS Completion type SDMA %x for io request "
"%p\n",
__func__,
completion_entry,
io_request);
ireq = scic_to_ihost(scic)->reqs[index];
dev_warn(scic_to_dev(scic), "%s: %x for io request %p\n",
__func__, completion_entry, ireq);
/* @todo For a post TC operation we need to fail the IO
* request
*/
break;
case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
device = scic->device_table[index];
dev_warn(scic_to_dev(scic),
"%s: SCIC SDS Completion type SDMA %x for remote "
"device %p\n",
__func__,
completion_entry,
device);
dev_warn(scic_to_dev(scic), "%s: %x for device %p\n",
__func__, completion_entry, device);
/* @todo For a port RNC operation we need to fail the
* device
*/
break;
default:
dev_warn(scic_to_dev(scic),
"%s: SCIC SDS Completion unknown SDMA completion "
"type %x\n",
__func__,
completion_entry);
dev_warn(scic_to_dev(scic), "%s: unknown completion type %x\n",
__func__, completion_entry);
break;
}
}
@ -385,8 +370,8 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci
u32 completion_entry)
{
struct isci_host *ihost = scic_to_ihost(scic);
struct scic_sds_request *io_request;
struct scic_sds_remote_device *device;
struct isci_request *ireq;
struct scic_sds_phy *phy;
u32 index;
@ -418,17 +403,17 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci
break;
case SCU_EVENT_TYPE_TRANSPORT_ERROR:
io_request = &ihost->reqs[index]->sci;
scic_sds_io_request_event_handler(io_request, completion_entry);
ireq = ihost->reqs[index];
scic_sds_io_request_event_handler(ireq, completion_entry);
break;
case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
switch (scu_get_event_specifier(completion_entry)) {
case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
io_request = &ihost->reqs[index]->sci;
if (io_request != NULL)
scic_sds_io_request_event_handler(io_request, completion_entry);
ireq = ihost->reqs[index];
if (ireq != NULL)
scic_sds_io_request_event_handler(ireq, completion_entry);
else
dev_warn(scic_to_dev(scic),
"%s: SCIC Controller 0x%p received "
@ -1185,7 +1170,7 @@ static void isci_host_completion_routine(unsigned long data)
}
spin_lock_irq(&isci_host->scic_lock);
isci_free_tag(isci_host, request->sci.io_tag);
isci_free_tag(isci_host, request->io_tag);
spin_unlock_irq(&isci_host->scic_lock);
}
list_for_each_entry_safe(request, next_request, &errored_request_list,
@ -1222,7 +1207,7 @@ static void isci_host_completion_routine(unsigned long data)
* of pending requests.
*/
list_del_init(&request->dev_node);
isci_free_tag(isci_host, request->sci.io_tag);
isci_free_tag(isci_host, request->io_tag);
spin_unlock_irq(&isci_host->scic_lock);
}
}
@ -2486,8 +2471,8 @@ int isci_host_init(struct isci_host *isci_host)
if (!ireq)
return -ENOMEM;
ireq->sci.tc = &isci_host->sci.task_context_table[i];
ireq->sci.owning_controller = &isci_host->sci;
ireq->tc = &isci_host->sci.task_context_table[i];
ireq->owning_controller = &isci_host->sci;
spin_lock_init(&ireq->state_lock);
ireq->request_daddr = dma;
ireq->isci_host = isci_host;
@ -2600,7 +2585,7 @@ void scic_sds_controller_post_request(
writel(request, &scic->smu_registers->post_context_port);
}
struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 io_tag)
struct isci_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 io_tag)
{
u16 task_index;
u16 task_sequence;
@ -2614,7 +2599,7 @@ struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, u
task_sequence = ISCI_TAG_SEQ(io_tag);
if (task_sequence == scic->io_request_sequence[task_index])
return &ireq->sci;
return ireq;
}
}
@ -2814,7 +2799,7 @@ enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
*/
enum sci_status scic_controller_start_io(struct scic_sds_controller *scic,
struct scic_sds_remote_device *rdev,
struct scic_sds_request *req)
struct isci_request *ireq)
{
enum sci_status status;
@ -2823,12 +2808,12 @@ enum sci_status scic_controller_start_io(struct scic_sds_controller *scic,
return SCI_FAILURE_INVALID_STATE;
}
status = scic_sds_remote_device_start_io(scic, rdev, req);
status = scic_sds_remote_device_start_io(scic, rdev, ireq);
if (status != SCI_SUCCESS)
return status;
set_bit(IREQ_ACTIVE, &sci_req_to_ireq(req)->flags);
scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(req));
set_bit(IREQ_ACTIVE, &ireq->flags);
scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(ireq));
return SCI_SUCCESS;
}
@ -2851,7 +2836,7 @@ enum sci_status scic_controller_start_io(struct scic_sds_controller *scic,
enum sci_status scic_controller_terminate_request(
struct scic_sds_controller *scic,
struct scic_sds_remote_device *rdev,
struct scic_sds_request *req)
struct isci_request *ireq)
{
enum sci_status status;
@ -2861,7 +2846,7 @@ enum sci_status scic_controller_terminate_request(
return SCI_FAILURE_INVALID_STATE;
}
status = scic_sds_io_request_terminate(req);
status = scic_sds_io_request_terminate(ireq);
if (status != SCI_SUCCESS)
return status;
@ -2870,7 +2855,7 @@ enum sci_status scic_controller_terminate_request(
* request sub-type.
*/
scic_sds_controller_post_request(scic,
scic_sds_request_get_post_context(req) |
scic_sds_request_get_post_context(ireq) |
SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
return SCI_SUCCESS;
}
@ -2889,7 +2874,7 @@ enum sci_status scic_controller_terminate_request(
enum sci_status scic_controller_complete_io(
struct scic_sds_controller *scic,
struct scic_sds_remote_device *rdev,
struct scic_sds_request *request)
struct isci_request *ireq)
{
enum sci_status status;
u16 index;
@ -2899,12 +2884,12 @@ enum sci_status scic_controller_complete_io(
/* XXX: Implement this function */
return SCI_FAILURE;
case SCIC_READY:
status = scic_sds_remote_device_complete_io(scic, rdev, request);
status = scic_sds_remote_device_complete_io(scic, rdev, ireq);
if (status != SCI_SUCCESS)
return status;
index = ISCI_TAG_TCI(request->io_tag);
clear_bit(IREQ_ACTIVE, &sci_req_to_ireq(request)->flags);
index = ISCI_TAG_TCI(ireq->io_tag);
clear_bit(IREQ_ACTIVE, &ireq->flags);
return SCI_SUCCESS;
default:
dev_warn(scic_to_dev(scic), "invalid state to complete I/O");
@ -2913,17 +2898,17 @@ enum sci_status scic_controller_complete_io(
}
enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req)
enum sci_status scic_controller_continue_io(struct isci_request *ireq)
{
struct scic_sds_controller *scic = sci_req->owning_controller;
struct scic_sds_controller *scic = ireq->owning_controller;
if (scic->sm.current_state_id != SCIC_READY) {
dev_warn(scic_to_dev(scic), "invalid state to continue I/O");
return SCI_FAILURE_INVALID_STATE;
}
set_bit(IREQ_ACTIVE, &sci_req_to_ireq(sci_req)->flags);
scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(sci_req));
set_bit(IREQ_ACTIVE, &ireq->flags);
scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(ireq));
return SCI_SUCCESS;
}
@ -2939,9 +2924,8 @@ enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req)
enum sci_task_status scic_controller_start_task(
struct scic_sds_controller *scic,
struct scic_sds_remote_device *rdev,
struct scic_sds_request *req)
struct isci_request *ireq)
{
struct isci_request *ireq = sci_req_to_ireq(req);
enum sci_status status;
if (scic->sm.current_state_id != SCIC_READY) {
@ -2952,7 +2936,7 @@ enum sci_task_status scic_controller_start_task(
return SCI_TASK_FAILURE_INVALID_STATE;
}
status = scic_sds_remote_device_start_task(scic, rdev, req);
status = scic_sds_remote_device_start_task(scic, rdev, ireq);
switch (status) {
case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
set_bit(IREQ_ACTIVE, &ireq->flags);
@ -2967,7 +2951,7 @@ enum sci_task_status scic_controller_start_task(
set_bit(IREQ_ACTIVE, &ireq->flags);
scic_sds_controller_post_request(scic,
scic_sds_request_get_post_context(req));
scic_sds_request_get_post_context(ireq));
break;
default:
break;

View File

@ -64,7 +64,7 @@
#include "unsolicited_frame_control.h"
#include "probe_roms.h"
struct scic_sds_request;
struct isci_request;
struct scu_task_context;
@ -601,7 +601,7 @@ union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffe
struct scic_sds_controller *scic,
u16 node_id);
struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic,
struct isci_request *scic_request_by_tag(struct scic_sds_controller *scic,
u16 io_tag);
void scic_sds_controller_power_control_queue_insert(
@ -628,11 +628,11 @@ void scic_sds_controller_remote_device_stopped(
void scic_sds_controller_copy_task_context(
struct scic_sds_controller *scic,
struct scic_sds_request *this_request);
struct isci_request *ireq);
void scic_sds_controller_register_setup(struct scic_sds_controller *scic);
enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req);
enum sci_status scic_controller_continue_io(struct isci_request *ireq);
int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
void isci_host_scan_start(struct Scsi_Host *);
u16 isci_alloc_tag(struct isci_host *ihost);
@ -665,22 +665,22 @@ void scic_controller_disable_interrupts(
enum sci_status scic_controller_start_io(
struct scic_sds_controller *scic,
struct scic_sds_remote_device *remote_device,
struct scic_sds_request *io_request);
struct isci_request *ireq);
enum sci_task_status scic_controller_start_task(
struct scic_sds_controller *scic,
struct scic_sds_remote_device *remote_device,
struct scic_sds_request *task_request);
struct isci_request *ireq);
enum sci_status scic_controller_terminate_request(
struct scic_sds_controller *scic,
struct scic_sds_remote_device *remote_device,
struct scic_sds_request *request);
struct isci_request *ireq);
enum sci_status scic_controller_complete_io(
struct scic_sds_controller *scic,
struct scic_sds_remote_device *remote_device,
struct scic_sds_request *io_request);
struct isci_request *ireq);
void scic_sds_port_configuration_agent_construct(
struct scic_sds_port_configuration_agent *port_agent);

View File

@ -983,7 +983,7 @@ enum sci_status scic_sds_phy_frame_handler(struct scic_sds_phy *sci_phy,
"%s: in wrong state: %d\n", __func__, state);
return SCI_FAILURE_INVALID_STATE;
}
}
static void scic_sds_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm)

View File

@ -1611,7 +1611,7 @@ enum sci_status scic_sds_port_link_down(struct scic_sds_port *sci_port,
enum sci_status scic_sds_port_start_io(struct scic_sds_port *sci_port,
struct scic_sds_remote_device *sci_dev,
struct scic_sds_request *sci_req)
struct isci_request *ireq)
{
enum scic_sds_port_states state;
@ -1631,7 +1631,7 @@ enum sci_status scic_sds_port_start_io(struct scic_sds_port *sci_port,
enum sci_status scic_sds_port_complete_io(struct scic_sds_port *sci_port,
struct scic_sds_remote_device *sci_dev,
struct scic_sds_request *sci_req)
struct isci_request *ireq)
{
enum scic_sds_port_states state;

View File

@ -354,17 +354,17 @@ enum sci_status scic_sds_port_link_up(struct scic_sds_port *sci_port,
enum sci_status scic_sds_port_link_down(struct scic_sds_port *sci_port,
struct scic_sds_phy *sci_phy);
struct scic_sds_request;
struct isci_request;
struct scic_sds_remote_device;
enum sci_status scic_sds_port_start_io(
struct scic_sds_port *sci_port,
struct scic_sds_remote_device *sci_dev,
struct scic_sds_request *sci_req);
struct isci_request *ireq);
enum sci_status scic_sds_port_complete_io(
struct scic_sds_port *sci_port,
struct scic_sds_remote_device *sci_dev,
struct scic_sds_request *sci_req);
struct isci_request *ireq);
enum sas_linkrate scic_sds_port_get_max_allowed_speed(
struct scic_sds_port *sci_port);

View File

@ -94,7 +94,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost,
scic_controller_terminate_request(&ihost->sci,
&idev->sci,
&ireq->sci);
ireq);
}
/* Fall through into the default case... */
default:
@ -142,14 +142,13 @@ static enum sci_status scic_sds_remote_device_terminate_requests(struct scic_sds
for (i = 0; i < SCI_MAX_IO_REQUESTS && i < request_count; i++) {
struct isci_request *ireq = ihost->reqs[i];
struct scic_sds_request *sci_req = &ireq->sci;
enum sci_status s;
if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
sci_req->target_device != sci_dev)
ireq->target_device != sci_dev)
continue;
s = scic_controller_terminate_request(scic, sci_dev, sci_req);
s = scic_controller_terminate_request(scic, sci_dev, ireq);
if (s != SCI_SUCCESS)
status = s;
}
@ -299,7 +298,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_devi
case SCI_DEV_STOPPING:
case SCI_DEV_FAILED:
case SCI_DEV_RESETTING: {
struct scic_sds_request *sci_req;
struct isci_request *ireq;
struct ssp_frame_hdr hdr;
void *frame_header;
ssize_t word_cnt;
@ -313,10 +312,10 @@ enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_devi
word_cnt = sizeof(hdr) / sizeof(u32);
sci_swab32_cpy(&hdr, frame_header, word_cnt);
sci_req = scic_request_by_tag(scic, be16_to_cpu(hdr.tag));
if (sci_req && sci_req->target_device == sci_dev) {
ireq = scic_request_by_tag(scic, be16_to_cpu(hdr.tag));
if (ireq && ireq->target_device == sci_dev) {
/* The IO request is now in charge of releasing the frame */
status = scic_sds_io_request_frame_handler(sci_req, frame_index);
status = scic_sds_io_request_frame_handler(ireq, frame_index);
} else {
/* We could not map this tag to a valid IO
* request Just toss the frame and continue
@ -448,14 +447,14 @@ enum sci_status scic_sds_remote_device_event_handler(struct scic_sds_remote_devi
}
static void scic_sds_remote_device_start_request(struct scic_sds_remote_device *sci_dev,
struct scic_sds_request *sci_req,
struct isci_request *ireq,
enum sci_status status)
{
struct scic_sds_port *sci_port = sci_dev->owning_port;
/* cleanup requests that failed after starting on the port */
if (status != SCI_SUCCESS)
scic_sds_port_complete_io(sci_port, sci_dev, sci_req);
scic_sds_port_complete_io(sci_port, sci_dev, ireq);
else {
kref_get(&sci_dev_to_idev(sci_dev)->kref);
scic_sds_remote_device_increment_request_count(sci_dev);
@ -464,12 +463,11 @@ static void scic_sds_remote_device_start_request(struct scic_sds_remote_device *
enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic,
struct scic_sds_remote_device *sci_dev,
struct scic_sds_request *sci_req)
struct isci_request *ireq)
{
struct sci_base_state_machine *sm = &sci_dev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
struct scic_sds_port *sci_port = sci_dev->owning_port;
struct isci_request *ireq = sci_req_to_ireq(sci_req);
enum sci_status status;
switch (state) {
@ -491,15 +489,15 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic
* successful it will start the request for the port object then
* increment its own request count.
*/
status = scic_sds_port_start_io(sci_port, sci_dev, sci_req);
status = scic_sds_port_start_io(sci_port, sci_dev, ireq);
if (status != SCI_SUCCESS)
return status;
status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, sci_req);
status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, ireq);
if (status != SCI_SUCCESS)
break;
status = scic_sds_request_start(sci_req);
status = scic_sds_request_start(ireq);
break;
case SCI_STP_DEV_IDLE: {
/* handle the start io operation for a sata device that is in
@ -513,22 +511,22 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic
enum scic_sds_remote_device_states new_state;
struct sas_task *task = isci_request_access_task(ireq);
status = scic_sds_port_start_io(sci_port, sci_dev, sci_req);
status = scic_sds_port_start_io(sci_port, sci_dev, ireq);
if (status != SCI_SUCCESS)
return status;
status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, sci_req);
status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, ireq);
if (status != SCI_SUCCESS)
break;
status = scic_sds_request_start(sci_req);
status = scic_sds_request_start(ireq);
if (status != SCI_SUCCESS)
break;
if (task->ata_task.use_ncq)
new_state = SCI_STP_DEV_NCQ;
else {
sci_dev->working_request = sci_req;
sci_dev->working_request = ireq;
new_state = SCI_STP_DEV_CMD;
}
sci_change_state(sm, new_state);
@ -538,15 +536,15 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic
struct sas_task *task = isci_request_access_task(ireq);
if (task->ata_task.use_ncq) {
status = scic_sds_port_start_io(sci_port, sci_dev, sci_req);
status = scic_sds_port_start_io(sci_port, sci_dev, ireq);
if (status != SCI_SUCCESS)
return status;
status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, sci_req);
status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, ireq);
if (status != SCI_SUCCESS)
break;
status = scic_sds_request_start(sci_req);
status = scic_sds_request_start(ireq);
} else
return SCI_FAILURE_INVALID_STATE;
break;
@ -554,19 +552,19 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic
case SCI_STP_DEV_AWAIT_RESET:
return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
case SCI_SMP_DEV_IDLE:
status = scic_sds_port_start_io(sci_port, sci_dev, sci_req);
status = scic_sds_port_start_io(sci_port, sci_dev, ireq);
if (status != SCI_SUCCESS)
return status;
status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, sci_req);
status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, ireq);
if (status != SCI_SUCCESS)
break;
status = scic_sds_request_start(sci_req);
status = scic_sds_request_start(ireq);
if (status != SCI_SUCCESS)
break;
sci_dev->working_request = sci_req;
sci_dev->working_request = ireq;
sci_change_state(&sci_dev->sm, SCI_SMP_DEV_CMD);
break;
case SCI_STP_DEV_CMD:
@ -577,21 +575,21 @@ enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic
return SCI_FAILURE_INVALID_STATE;
}
scic_sds_remote_device_start_request(sci_dev, sci_req, status);
scic_sds_remote_device_start_request(sci_dev, ireq, status);
return status;
}
static enum sci_status common_complete_io(struct scic_sds_port *sci_port,
struct scic_sds_remote_device *sci_dev,
struct scic_sds_request *sci_req)
struct isci_request *ireq)
{
enum sci_status status;
status = scic_sds_request_complete(sci_req);
status = scic_sds_request_complete(ireq);
if (status != SCI_SUCCESS)
return status;
status = scic_sds_port_complete_io(sci_port, sci_dev, sci_req);
status = scic_sds_port_complete_io(sci_port, sci_dev, ireq);
if (status != SCI_SUCCESS)
return status;
@ -601,7 +599,7 @@ static enum sci_status common_complete_io(struct scic_sds_port *sci_port,
enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *scic,
struct scic_sds_remote_device *sci_dev,
struct scic_sds_request *sci_req)
struct isci_request *ireq)
{
struct sci_base_state_machine *sm = &sci_dev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
@ -623,16 +621,16 @@ enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *s
case SCI_DEV_READY:
case SCI_STP_DEV_AWAIT_RESET:
case SCI_DEV_RESETTING:
status = common_complete_io(sci_port, sci_dev, sci_req);
status = common_complete_io(sci_port, sci_dev, ireq);
break;
case SCI_STP_DEV_CMD:
case SCI_STP_DEV_NCQ:
case SCI_STP_DEV_NCQ_ERROR:
status = common_complete_io(sci_port, sci_dev, sci_req);
status = common_complete_io(sci_port, sci_dev, ireq);
if (status != SCI_SUCCESS)
break;
if (sci_req->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
/* This request causes hardware error, device needs to be Lun Reset.
* So here we force the state machine to IDLE state so the rest IOs
* can reach RNC state handler, these IOs will be completed by RNC with
@ -643,13 +641,13 @@ enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *s
sci_change_state(sm, SCI_STP_DEV_IDLE);
break;
case SCI_SMP_DEV_CMD:
status = common_complete_io(sci_port, sci_dev, sci_req);
status = common_complete_io(sci_port, sci_dev, ireq);
if (status != SCI_SUCCESS)
break;
sci_change_state(sm, SCI_SMP_DEV_IDLE);
break;
case SCI_DEV_STOPPING:
status = common_complete_io(sci_port, sci_dev, sci_req);
status = common_complete_io(sci_port, sci_dev, ireq);
if (status != SCI_SUCCESS)
break;
@ -664,7 +662,7 @@ enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *s
dev_err(scirdev_to_dev(sci_dev),
"%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
"could not complete\n", __func__, sci_port,
sci_dev, sci_req, status);
sci_dev, ireq, status);
else
isci_put_device(sci_dev_to_idev(sci_dev));
@ -682,7 +680,7 @@ static void scic_sds_remote_device_continue_request(void *dev)
enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *scic,
struct scic_sds_remote_device *sci_dev,
struct scic_sds_request *sci_req)
struct isci_request *ireq)
{
struct sci_base_state_machine *sm = &sci_dev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
@ -708,15 +706,15 @@ enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *sc
case SCI_STP_DEV_NCQ:
case SCI_STP_DEV_NCQ_ERROR:
case SCI_STP_DEV_AWAIT_RESET:
status = scic_sds_port_start_io(sci_port, sci_dev, sci_req);
status = scic_sds_port_start_io(sci_port, sci_dev, ireq);
if (status != SCI_SUCCESS)
return status;
status = scic_sds_remote_node_context_start_task(&sci_dev->rnc, sci_req);
status = scic_sds_remote_node_context_start_task(&sci_dev->rnc, ireq);
if (status != SCI_SUCCESS)
goto out;
status = scic_sds_request_start(sci_req);
status = scic_sds_request_start(ireq);
if (status != SCI_SUCCESS)
goto out;
@ -724,7 +722,7 @@ enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *sc
* replace the request that probably resulted in the task
* management request.
*/
sci_dev->working_request = sci_req;
sci_dev->working_request = ireq;
sci_change_state(sm, SCI_STP_DEV_CMD);
/* The remote node context must cleanup the TCi to NCQ mapping
@ -741,25 +739,25 @@ enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *sc
sci_dev);
out:
scic_sds_remote_device_start_request(sci_dev, sci_req, status);
scic_sds_remote_device_start_request(sci_dev, ireq, status);
/* We need to let the controller start request handler know that
* it can't post TC yet. We will provide a callback function to
* post TC when RNC gets resumed.
*/
return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
case SCI_DEV_READY:
status = scic_sds_port_start_io(sci_port, sci_dev, sci_req);
status = scic_sds_port_start_io(sci_port, sci_dev, ireq);
if (status != SCI_SUCCESS)
return status;
status = scic_sds_remote_node_context_start_task(&sci_dev->rnc, sci_req);
status = scic_sds_remote_node_context_start_task(&sci_dev->rnc, ireq);
if (status != SCI_SUCCESS)
break;
status = scic_sds_request_start(sci_req);
status = scic_sds_request_start(ireq);
break;
}
scic_sds_remote_device_start_request(sci_dev, sci_req, status);
scic_sds_remote_device_start_request(sci_dev, ireq, status);
return status;
}

View File

@ -120,7 +120,7 @@ struct scic_sds_remote_device {
* used only for SATA requests since the unsolicited frames we get from the
* hardware have no Tag value to look up the io request object.
*/
struct scic_sds_request *working_request;
struct isci_request *working_request;
/**
* This field contains the reason for the remote device going not_ready. It is
@ -466,17 +466,17 @@ enum sci_status scic_sds_remote_device_event_handler(
enum sci_status scic_sds_remote_device_start_io(
struct scic_sds_controller *controller,
struct scic_sds_remote_device *sci_dev,
struct scic_sds_request *io_request);
struct isci_request *ireq);
enum sci_status scic_sds_remote_device_start_task(
struct scic_sds_controller *controller,
struct scic_sds_remote_device *sci_dev,
struct scic_sds_request *io_request);
struct isci_request *ireq);
enum sci_status scic_sds_remote_device_complete_io(
struct scic_sds_controller *controller,
struct scic_sds_remote_device *sci_dev,
struct scic_sds_request *io_request);
struct isci_request *ireq);
enum sci_status scic_sds_remote_device_suspend(
struct scic_sds_remote_device *sci_dev,

View File

@ -598,7 +598,7 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_
}
enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_node_context *sci_rnc,
struct scic_sds_request *sci_req)
struct isci_request *ireq)
{
enum scis_sds_remote_node_context_states state;
@ -623,7 +623,7 @@ enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_nod
}
enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_node_context *sci_rnc,
struct scic_sds_request *sci_req)
struct isci_request *ireq)
{
enum scis_sds_remote_node_context_states state;

View File

@ -78,7 +78,7 @@
#define SCU_HARDWARE_SUSPENSION (0)
#define SCI_SOFTWARE_SUSPENSION (1)
struct scic_sds_request;
struct isci_request;
struct scic_sds_remote_device;
struct scic_sds_remote_node_context;
@ -220,8 +220,8 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_
scics_sds_remote_node_context_callback cb_fn,
void *cb_p);
enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_node_context *sci_rnc,
struct scic_sds_request *sci_req);
struct isci_request *ireq);
enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_node_context *sci_rnc,
struct scic_sds_request *sci_req);
struct isci_request *ireq);
#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -93,7 +93,7 @@ enum sci_request_protocol {
* isci_stp_request - extra request infrastructure to handle pio/atapi protocol
* @pio_len - number of bytes requested at PIO setup
* @status - pio setup ending status value to tell us if we need
* to wait for another fis or if the transfer is complete. Upon
* to wait for another fis or if the transfer is complete. Upon
* receipt of a d2h fis this will be the status field of that fis.
* @sgl - track pio transfer progress as we iterate through the sgl
* @device_cdb_len - atapi device advertises it's transfer constraints at setup
@ -110,69 +110,55 @@ struct isci_stp_request {
u32 device_cdb_len;
};
struct scic_sds_request {
/*
* This field contains the information for the base request state
* machine.
struct isci_request {
enum isci_request_status status;
#define IREQ_COMPLETE_IN_TARGET 0
#define IREQ_TERMINATED 1
#define IREQ_TMF 2
#define IREQ_ACTIVE 3
unsigned long flags;
/* XXX kill ttype and ttype_ptr, allocate full sas_task */
enum task_type ttype;
union ttype_ptr_union {
struct sas_task *io_task_ptr; /* When ttype==io_task */
struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
} ttype_ptr;
struct isci_host *isci_host;
/* For use in the requests_to_{complete|abort} lists: */
struct list_head completed_node;
/* For use in the reqs_in_process list: */
struct list_head dev_node;
spinlock_t state_lock;
dma_addr_t request_daddr;
dma_addr_t zero_scatter_daddr;
unsigned int num_sg_entries;
/* Note: "io_request_completion" is completed in two different ways
* depending on whether this is a TMF or regular request.
* - TMF requests are completed in the thread that started them;
* - regular requests are completed in the request completion callback
* function.
* This difference in operation allows the aborter of a TMF request
* to be sure that once the TMF request completes, the I/O that the
* TMF was aborting is guaranteed to have completed.
*
* XXX kill io_request_completion
*/
struct completion *io_request_completion;
struct sci_base_state_machine sm;
/*
* This field simply points to the controller to which this IO request
* is associated.
*/
struct scic_sds_controller *owning_controller;
/*
* This field simply points to the remote device to which this IO
* request is associated.
*/
struct scic_sds_remote_device *target_device;
/*
* This field indicates the IO tag for this request. The IO tag is
* comprised of the task_index and a sequence count. The sequence count
* is utilized to help identify tasks from one life to another.
*/
u16 io_tag;
/*
* This field specifies the protocol being utilized for this
* IO request.
*/
enum sci_request_protocol protocol;
/*
* This field indicates the completion status taken from the SCUs
* completion code. It indicates the completion result for the SCU
* hardware.
*/
u32 scu_status;
/*
* This field indicates the completion status returned to the SCI user.
* It indicates the users view of the io request completion.
*/
u32 sci_status;
/*
* This field contains the value to be utilized when posting
* (e.g. Post_TC, * Post_TC_Abort) this request to the silicon.
*/
u32 scu_status; /* hardware result */
u32 sci_status; /* upper layer disposition */
u32 post_context;
struct scu_task_context *tc;
/* could be larger with sg chaining */
#define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2)
struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
/*
* This field is a pointer to the stored rx frame data. It is used in
/* This field is a pointer to the stored rx frame data. It is used in
* STP internal requests and SMP response frames. If this field is
* non-NULL the saved frame must be released on IO request completion.
*
* @todo In the future do we want to keep a list of RX frame buffers?
*/
u32 saved_rx_frame_index;
@ -187,11 +173,9 @@ struct scic_sds_request {
u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
};
} ssp;
struct {
struct smp_resp rsp;
} smp;
struct {
struct isci_stp_request req;
struct host_to_dev_fis cmd;
@ -200,56 +184,11 @@ struct scic_sds_request {
};
};
static inline struct scic_sds_request *to_sci_req(struct isci_stp_request *stp_req)
static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
{
struct scic_sds_request *sci_req;
sci_req = container_of(stp_req, typeof(*sci_req), stp.req);
return sci_req;
}
struct isci_request {
enum isci_request_status status;
enum task_type ttype;
unsigned short io_tag;
#define IREQ_COMPLETE_IN_TARGET 0
#define IREQ_TERMINATED 1
#define IREQ_TMF 2
#define IREQ_ACTIVE 3
unsigned long flags;
union ttype_ptr_union {
struct sas_task *io_task_ptr; /* When ttype==io_task */
struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
} ttype_ptr;
struct isci_host *isci_host;
/* For use in the requests_to_{complete|abort} lists: */
struct list_head completed_node;
/* For use in the reqs_in_process list: */
struct list_head dev_node;
spinlock_t state_lock;
dma_addr_t request_daddr;
dma_addr_t zero_scatter_daddr;
unsigned int num_sg_entries; /* returned by pci_alloc_sg */
/** Note: "io_request_completion" is completed in two different ways
* depending on whether this is a TMF or regular request.
* - TMF requests are completed in the thread that started them;
* - regular requests are completed in the request completion callback
* function.
* This difference in operation allows the aborter of a TMF request
* to be sure that once the TMF request completes, the I/O that the
* TMF was aborting is guaranteed to have completed.
*/
struct completion *io_request_completion;
struct scic_sds_request sci;
};
static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_req)
{
struct isci_request *ireq = container_of(sci_req, typeof(*ireq), sci);
struct isci_request *ireq;
ireq = container_of(stp_req, typeof(*ireq), stp.req);
return ireq;
}
@ -366,32 +305,32 @@ enum sci_base_request_states {
*
* This macro will return the controller for this io request object
*/
#define scic_sds_request_get_controller(sci_req) \
((sci_req)->owning_controller)
#define scic_sds_request_get_controller(ireq) \
((ireq)->owning_controller)
/**
* scic_sds_request_get_device() -
*
* This macro will return the device for this io request object
*/
#define scic_sds_request_get_device(sci_req) \
((sci_req)->target_device)
#define scic_sds_request_get_device(ireq) \
((ireq)->target_device)
/**
* scic_sds_request_get_port() -
*
* This macro will return the port for this io request object
*/
#define scic_sds_request_get_port(sci_req) \
scic_sds_remote_device_get_port(scic_sds_request_get_device(sci_req))
#define scic_sds_request_get_port(ireq) \
scic_sds_remote_device_get_port(scic_sds_request_get_device(ireq))
/**
* scic_sds_request_get_post_context() -
*
* This macro returns the constructed post context result for the io request.
*/
#define scic_sds_request_get_post_context(sci_req) \
((sci_req)->post_context)
#define scic_sds_request_get_post_context(ireq) \
((ireq)->post_context)
/**
* scic_sds_request_get_task_context() -
@ -413,26 +352,25 @@ enum sci_base_request_states {
(request)->sci_status = (sci_status_code); \
}
enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req);
enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req);
enum sci_status scic_sds_request_start(struct isci_request *ireq);
enum sci_status scic_sds_io_request_terminate(struct isci_request *ireq);
enum sci_status
scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
scic_sds_io_request_event_handler(struct isci_request *ireq,
u32 event_code);
enum sci_status
scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
scic_sds_io_request_frame_handler(struct isci_request *ireq,
u32 frame_index);
enum sci_status
scic_sds_task_request_terminate(struct scic_sds_request *sci_req);
scic_sds_task_request_terminate(struct isci_request *ireq);
extern enum sci_status
scic_sds_request_complete(struct scic_sds_request *sci_req);
scic_sds_request_complete(struct isci_request *ireq);
extern enum sci_status
scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code);
scic_sds_io_request_tc_completion(struct isci_request *ireq, u32 code);
/* XXX open code in caller */
static inline dma_addr_t
scic_io_request_get_dma_addr(struct scic_sds_request *sci_req, void *virt_addr)
scic_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
{
struct isci_request *ireq = sci_req_to_ireq(sci_req);
char *requested_addr = (char *)virt_addr;
char *base_addr = (char *)ireq;
@ -565,14 +503,14 @@ enum sci_status
scic_task_request_construct(struct scic_sds_controller *scic,
struct scic_sds_remote_device *sci_dev,
u16 io_tag,
struct scic_sds_request *sci_req);
struct isci_request *ireq);
enum sci_status
scic_task_request_construct_ssp(struct scic_sds_request *sci_req);
scic_task_request_construct_ssp(struct isci_request *ireq);
enum sci_status
scic_task_request_construct_sata(struct scic_sds_request *sci_req);
scic_task_request_construct_sata(struct isci_request *ireq);
void
scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag);
void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req);
scic_stp_io_request_set_ncq_tag(struct isci_request *ireq, u16 ncq_tag);
void scic_sds_smp_request_copy_response(struct isci_request *ireq);
static inline int isci_task_is_ncq_recovery(struct sas_task *task)
{

View File

@ -70,7 +70,7 @@
struct host_to_dev_fis *isci_sata_task_to_fis_copy(struct sas_task *task)
{
struct isci_request *ireq = task->lldd_task;
struct host_to_dev_fis *fis = &ireq->sci.stp.cmd;
struct host_to_dev_fis *fis = &ireq->stp.cmd;
memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
@ -116,7 +116,7 @@ void isci_sata_set_ncq_tag(
struct isci_request *request = task->lldd_task;
register_fis->sector_count = qc->tag << 3;
scic_stp_io_request_set_ncq_tag(&request->sci, qc->tag);
scic_stp_io_request_set_ncq_tag(request, qc->tag);
}
/**
@ -154,7 +154,6 @@ void isci_request_process_stp_response(struct sas_task *task,
enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
{
struct scic_sds_request *sci_req = &ireq->sci;
struct isci_tmf *isci_tmf;
enum sci_status status;
@ -167,7 +166,7 @@ enum sci_status isci_sata_management_task_request_build(struct isci_request *ire
case isci_tmf_sata_srst_high:
case isci_tmf_sata_srst_low: {
struct host_to_dev_fis *fis = &sci_req->stp.cmd;
struct host_to_dev_fis *fis = &ireq->stp.cmd;
memset(fis, 0, sizeof(*fis));
@ -188,7 +187,7 @@ enum sci_status isci_sata_management_task_request_build(struct isci_request *ire
/* core builds the protocol specific request
* based on the h2d fis.
*/
status = scic_task_request_construct_sata(&ireq->sci);
status = scic_task_request_construct_sata(ireq);
return status;
}

View File

@ -258,7 +258,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
/* let the core do it's construct. */
status = scic_task_request_construct(&ihost->sci, &idev->sci, tag,
&ireq->sci);
ireq);
if (status != SCI_SUCCESS) {
dev_warn(&ihost->pdev->dev,
@ -272,7 +272,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
/* XXX convert to get this from task->tproto like other drivers */
if (dev->dev_type == SAS_END_DEV) {
isci_tmf->proto = SAS_PROTOCOL_SSP;
status = scic_task_request_construct_ssp(&ireq->sci);
status = scic_task_request_construct_ssp(ireq);
if (status != SCI_SUCCESS)
return NULL;
}
@ -337,7 +337,7 @@ int isci_task_execute_tmf(struct isci_host *ihost,
/* start the TMF io. */
status = scic_controller_start_task(&ihost->sci,
sci_device,
&ireq->sci);
ireq);
if (status != SCI_TASK_SUCCESS) {
dev_warn(&ihost->pdev->dev,
@ -371,7 +371,7 @@ int isci_task_execute_tmf(struct isci_host *ihost,
scic_controller_terminate_request(&ihost->sci,
&isci_device->sci,
&ireq->sci);
ireq);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
@ -565,7 +565,7 @@ static void isci_terminate_request_core(
status = scic_controller_terminate_request(
&isci_host->sci,
&isci_device->sci,
&isci_request->sci);
isci_request);
}
spin_unlock_irqrestore(&isci_host->scic_lock, flags);
@ -1235,7 +1235,6 @@ isci_task_request_complete(struct isci_host *ihost,
{
struct isci_tmf *tmf = isci_request_access_tmf(ireq);
struct completion *tmf_complete;
struct scic_sds_request *sci_req = &ireq->sci;
dev_dbg(&ihost->pdev->dev,
"%s: request = %p, status=%d\n",
@ -1248,18 +1247,18 @@ isci_task_request_complete(struct isci_host *ihost,
if (tmf->proto == SAS_PROTOCOL_SSP) {
memcpy(&tmf->resp.resp_iu,
&sci_req->ssp.rsp,
&ireq->ssp.rsp,
SSP_RESP_IU_MAX_SIZE);
} else if (tmf->proto == SAS_PROTOCOL_SATA) {
memcpy(&tmf->resp.d2h_fis,
&sci_req->stp.rsp,
&ireq->stp.rsp,
sizeof(struct dev_to_host_fis));
}
/* PRINT_TMF( ((struct isci_tmf *)request->task)); */
tmf_complete = tmf->complete;
scic_controller_complete_io(&ihost->sci, ireq->sci.target_device, &ireq->sci);
scic_controller_complete_io(&ihost->sci, ireq->target_device, ireq);
/* set the 'terminated' flag handle to make sure it cannot be terminated
* or completed again.
*/