forked from luck/tmp_suning_uos_patched
SCSI sg on 20190709
This topic branch covers a fundamental change in how our sg lists are allocated to make mq more efficient by reducing the size of the preallocated sg list. This necessitates a large number of driver changes because the previous guarantee that if a driver specified SG_ALL as the size of its scatter list, it would get a non-chained list and didn't need to bother with scatterlist iterators is now broken and every driver *must* use scatterlist iterators. This was broken out as a separate topic because we need to convert all the drivers before pulling the trigger and unconverted drivers kept being found, necessitating a rebase. Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com> -----BEGIN PGP SIGNATURE----- iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCXSTzzCYcamFtZXMuYm90 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishZB+AP9I8j/s wWfg0Z3WNuf4D5I3rH4x1J3cQTqPJed+RjwgcQEA1gZvtOTg1ZEn/CYMVnaB92x0 t6MZSchIaFXeqfD+E7U= =cv8o -----END PGP SIGNATURE----- Merge tag 'scsi-sg' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull SCSI scatter-gather list updates from James Bottomley: "This topic branch covers a fundamental change in how our sg lists are allocated to make mq more efficient by reducing the size of the preallocated sg list. This necessitates a large number of driver changes because the previous guarantee that if a driver specified SG_ALL as the size of its scatter list, it would get a non-chained list and didn't need to bother with scatterlist iterators is now broken and every driver *must* use scatterlist iterators. This was broken out as a separate topic because we need to convert all the drivers before pulling the trigger and unconverted drivers kept being found, necessitating a rebase" * tag 'scsi-sg' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (21 commits) scsi: core: don't preallocate small SGL in case of NO_SG_CHAIN scsi: lib/sg_pool.c: clear 'first_chunk' in case of no preallocation scsi: core: avoid preallocating big SGL for data scsi: core: avoid preallocating big SGL for protection information scsi: lib/sg_pool.c: improve APIs for allocating sg pool scsi: esp: use sg helper to iterate over scatterlist scsi: NCR5380: use sg helper to iterate over scatterlist scsi: wd33c93: use sg helper to iterate over scatterlist scsi: ppa: use sg helper to iterate over scatterlist scsi: pcmcia: nsp_cs: use sg helper to iterate over scatterlist scsi: imm: use sg helper to iterate over scatterlist scsi: aha152x: use sg helper to iterate over scatterlist scsi: s390: zfcp_fc: use sg helper to iterate over scatterlist scsi: staging: unisys: visorhba: use sg helper to iterate over scatterlist scsi: usb: image: microtek: use sg helper to iterate over scatterlist scsi: pmcraid: use sg helper to iterate over scatterlist scsi: ipr: use sg helper to iterate over scatterlist scsi: mvumi: use sg helper to iterate over scatterlist scsi: lpfc: use sg helper to iterate over scatterlist scsi: advansys: use sg helper to iterate over scatterlist ...
This commit is contained in:
commit
1f7563f743
|
@ -2112,7 +2112,8 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
|
|||
|
||||
freq->sg_table.sgl = freq->first_sgl;
|
||||
ret = sg_alloc_table_chained(&freq->sg_table,
|
||||
blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
|
||||
blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
|
||||
SG_CHUNK_SIZE);
|
||||
if (ret)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2122,7 +2123,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
|
|||
freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
|
||||
op->nents, dir);
|
||||
if (unlikely(freq->sg_cnt <= 0)) {
|
||||
sg_free_table_chained(&freq->sg_table, true);
|
||||
sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
|
||||
freq->sg_cnt = 0;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
@ -2148,7 +2149,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
|
|||
|
||||
nvme_cleanup_cmd(rq);
|
||||
|
||||
sg_free_table_chained(&freq->sg_table, true);
|
||||
sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
|
||||
|
||||
freq->sg_cnt = 0;
|
||||
}
|
||||
|
|
|
@ -1144,7 +1144,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
|
|||
WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
||||
|
||||
nvme_cleanup_cmd(rq);
|
||||
sg_free_table_chained(&req->sg_table, true);
|
||||
sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
|
||||
}
|
||||
|
||||
static int nvme_rdma_set_sg_null(struct nvme_command *c)
|
||||
|
@ -1259,7 +1259,8 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
|
|||
|
||||
req->sg_table.sgl = req->first_sgl;
|
||||
ret = sg_alloc_table_chained(&req->sg_table,
|
||||
blk_rq_nr_phys_segments(rq), req->sg_table.sgl);
|
||||
blk_rq_nr_phys_segments(rq), req->sg_table.sgl,
|
||||
SG_CHUNK_SIZE);
|
||||
if (ret)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1299,7 +1300,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
|
|||
req->nents, rq_data_dir(rq) ==
|
||||
WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
||||
out_free_table:
|
||||
sg_free_table_chained(&req->sg_table, true);
|
||||
sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ static void nvme_loop_complete_rq(struct request *req)
|
|||
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
|
||||
nvme_cleanup_cmd(req);
|
||||
sg_free_table_chained(&iod->sg_table, true);
|
||||
sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE);
|
||||
nvme_complete_rq(req);
|
||||
}
|
||||
|
||||
|
@ -157,7 +157,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
iod->sg_table.sgl = iod->first_sgl;
|
||||
if (sg_alloc_table_chained(&iod->sg_table,
|
||||
blk_rq_nr_phys_segments(req),
|
||||
iod->sg_table.sgl))
|
||||
iod->sg_table.sgl, SG_CHUNK_SIZE))
|
||||
return BLK_STS_RESOURCE;
|
||||
|
||||
iod->req.sg = iod->sg_table.sgl;
|
||||
|
|
|
@ -620,7 +620,7 @@ static void zfcp_fc_sg_free_table(struct scatterlist *sg, int count)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++, sg++)
|
||||
for (i = 0; i < count; i++, sg = sg_next(sg))
|
||||
if (sg)
|
||||
free_page((unsigned long) sg_virt(sg));
|
||||
else
|
||||
|
@ -641,7 +641,7 @@ static int zfcp_fc_sg_setup_table(struct scatterlist *sg, int count)
|
|||
int i;
|
||||
|
||||
sg_init_table(sg, count);
|
||||
for (i = 0; i < count; i++, sg++) {
|
||||
for (i = 0; i < count; i++, sg = sg_next(sg)) {
|
||||
addr = (void *) get_zeroed_page(GFP_KERNEL);
|
||||
if (!addr) {
|
||||
zfcp_fc_sg_free_table(sg, i);
|
||||
|
|
|
@ -149,12 +149,10 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
|
|||
|
||||
if (scsi_bufflen(cmd)) {
|
||||
cmd->SCp.buffer = scsi_sglist(cmd);
|
||||
cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
} else {
|
||||
cmd->SCp.buffer = NULL;
|
||||
cmd->SCp.buffers_residual = 0;
|
||||
cmd->SCp.ptr = NULL;
|
||||
cmd->SCp.this_residual = 0;
|
||||
}
|
||||
|
@ -163,6 +161,17 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
|
|||
cmd->SCp.Message = 0;
|
||||
}
|
||||
|
||||
static inline void advance_sg_buffer(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct scatterlist *s = cmd->SCp.buffer;
|
||||
|
||||
if (!cmd->SCp.this_residual && s && !sg_is_last(s)) {
|
||||
cmd->SCp.buffer = sg_next(s);
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* NCR5380_poll_politely2 - wait for two chip register values
|
||||
* @hostdata: host private data
|
||||
|
@ -1670,12 +1679,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
|||
sun3_dma_setup_done != cmd) {
|
||||
int count;
|
||||
|
||||
if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
|
||||
++cmd->SCp.buffer;
|
||||
--cmd->SCp.buffers_residual;
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
}
|
||||
advance_sg_buffer(cmd);
|
||||
|
||||
count = sun3scsi_dma_xfer_len(hostdata, cmd);
|
||||
|
||||
|
@ -1725,15 +1729,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
|||
* scatter-gather list, move onto the next one.
|
||||
*/
|
||||
|
||||
if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
|
||||
++cmd->SCp.buffer;
|
||||
--cmd->SCp.buffers_residual;
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
dsprintk(NDEBUG_INFORMATION, instance, "%d bytes and %d buffers left\n",
|
||||
advance_sg_buffer(cmd);
|
||||
dsprintk(NDEBUG_INFORMATION, instance,
|
||||
"this residual %d, sg ents %d\n",
|
||||
cmd->SCp.this_residual,
|
||||
cmd->SCp.buffers_residual);
|
||||
}
|
||||
sg_nents(cmd->SCp.buffer));
|
||||
|
||||
/*
|
||||
* The preferred transfer method is going to be
|
||||
|
@ -2126,12 +2126,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
|
|||
if (sun3_dma_setup_done != tmp) {
|
||||
int count;
|
||||
|
||||
if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) {
|
||||
++tmp->SCp.buffer;
|
||||
--tmp->SCp.buffers_residual;
|
||||
tmp->SCp.this_residual = tmp->SCp.buffer->length;
|
||||
tmp->SCp.ptr = sg_virt(tmp->SCp.buffer);
|
||||
}
|
||||
advance_sg_buffer(tmp);
|
||||
|
||||
count = sun3scsi_dma_xfer_len(hostdata, tmp);
|
||||
|
||||
|
|
|
@ -7710,7 +7710,7 @@ adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp,
|
|||
sg_block->sg_ptr = 0L; /* Last ADV_SG_BLOCK in list. */
|
||||
return ADV_SUCCESS;
|
||||
}
|
||||
slp++;
|
||||
slp = sg_next(slp);
|
||||
}
|
||||
sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
|
||||
prev_sg_block = sg_block;
|
||||
|
|
|
@ -937,7 +937,6 @@ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
|
|||
SCp.ptr : buffer pointer
|
||||
SCp.this_residual : buffer length
|
||||
SCp.buffer : next buffer
|
||||
SCp.buffers_residual : left buffers in list
|
||||
SCp.phase : current state of the command */
|
||||
|
||||
if ((phase & resetting) || !scsi_sglist(SCpnt)) {
|
||||
|
@ -945,13 +944,11 @@ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
|
|||
SCpnt->SCp.this_residual = 0;
|
||||
scsi_set_resid(SCpnt, 0);
|
||||
SCpnt->SCp.buffer = NULL;
|
||||
SCpnt->SCp.buffers_residual = 0;
|
||||
} else {
|
||||
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
|
||||
SCpnt->SCp.buffer = scsi_sglist(SCpnt);
|
||||
SCpnt->SCp.ptr = SG_ADDRESS(SCpnt->SCp.buffer);
|
||||
SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
|
||||
SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
|
||||
}
|
||||
|
||||
DO_LOCK(flags);
|
||||
|
@ -2019,10 +2016,9 @@ static void datai_run(struct Scsi_Host *shpnt)
|
|||
}
|
||||
|
||||
if (CURRENT_SC->SCp.this_residual == 0 &&
|
||||
CURRENT_SC->SCp.buffers_residual > 0) {
|
||||
!sg_is_last(CURRENT_SC->SCp.buffer)) {
|
||||
/* advance to next buffer */
|
||||
CURRENT_SC->SCp.buffers_residual--;
|
||||
CURRENT_SC->SCp.buffer++;
|
||||
CURRENT_SC->SCp.buffer = sg_next(CURRENT_SC->SCp.buffer);
|
||||
CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer);
|
||||
CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
|
||||
}
|
||||
|
@ -2125,10 +2121,10 @@ static void datao_run(struct Scsi_Host *shpnt)
|
|||
CMD_INC_RESID(CURRENT_SC, -2 * data_count);
|
||||
}
|
||||
|
||||
if(CURRENT_SC->SCp.this_residual==0 && CURRENT_SC->SCp.buffers_residual>0) {
|
||||
if (CURRENT_SC->SCp.this_residual == 0 &&
|
||||
!sg_is_last(CURRENT_SC->SCp.buffer)) {
|
||||
/* advance to next buffer */
|
||||
CURRENT_SC->SCp.buffers_residual--;
|
||||
CURRENT_SC->SCp.buffer++;
|
||||
CURRENT_SC->SCp.buffer = sg_next(CURRENT_SC->SCp.buffer);
|
||||
CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer);
|
||||
CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
|
||||
}
|
||||
|
@ -2147,22 +2143,26 @@ static void datao_run(struct Scsi_Host *shpnt)
|
|||
static void datao_end(struct Scsi_Host *shpnt)
|
||||
{
|
||||
if(TESTLO(DMASTAT, DFIFOEMP)) {
|
||||
int data_count = (DATA_LEN - scsi_get_resid(CURRENT_SC)) -
|
||||
GETSTCNT();
|
||||
u32 datao_cnt = GETSTCNT();
|
||||
int datao_out = DATA_LEN - scsi_get_resid(CURRENT_SC);
|
||||
int done;
|
||||
struct scatterlist *sg = scsi_sglist(CURRENT_SC);
|
||||
|
||||
CMD_INC_RESID(CURRENT_SC, data_count);
|
||||
CMD_INC_RESID(CURRENT_SC, datao_out - datao_cnt);
|
||||
|
||||
data_count -= CURRENT_SC->SCp.ptr -
|
||||
SG_ADDRESS(CURRENT_SC->SCp.buffer);
|
||||
while(data_count>0) {
|
||||
CURRENT_SC->SCp.buffer--;
|
||||
CURRENT_SC->SCp.buffers_residual++;
|
||||
data_count -= CURRENT_SC->SCp.buffer->length;
|
||||
done = scsi_bufflen(CURRENT_SC) - scsi_get_resid(CURRENT_SC);
|
||||
/* Locate the first SG entry not yet sent */
|
||||
while (done > 0 && !sg_is_last(sg)) {
|
||||
if (done < sg->length)
|
||||
break;
|
||||
done -= sg->length;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer) -
|
||||
data_count;
|
||||
CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length +
|
||||
data_count;
|
||||
|
||||
CURRENT_SC->SCp.buffer = sg;
|
||||
CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer) + done;
|
||||
CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length -
|
||||
done;
|
||||
}
|
||||
|
||||
SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
|
||||
|
@ -2490,7 +2490,7 @@ static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
|
|||
|
||||
seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |",
|
||||
scsi_get_resid(ptr), ptr->SCp.this_residual,
|
||||
ptr->SCp.buffers_residual);
|
||||
sg_nents(ptr->SCp.buffer) - 1);
|
||||
|
||||
if (ptr->SCp.phase & not_issued)
|
||||
seq_puts(m, "not issued|");
|
||||
|
|
|
@ -371,6 +371,7 @@ static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
|
|||
struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
|
||||
struct scatterlist *sg = scsi_sglist(cmd);
|
||||
int total = 0, i;
|
||||
struct scatterlist *s;
|
||||
|
||||
if (cmd->sc_data_direction == DMA_NONE)
|
||||
return;
|
||||
|
@ -381,16 +382,18 @@ static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
|
|||
* a dma address, so perform an identity mapping.
|
||||
*/
|
||||
spriv->num_sg = scsi_sg_count(cmd);
|
||||
for (i = 0; i < spriv->num_sg; i++) {
|
||||
sg[i].dma_address = (uintptr_t)sg_virt(&sg[i]);
|
||||
total += sg_dma_len(&sg[i]);
|
||||
|
||||
scsi_for_each_sg(cmd, s, spriv->num_sg, i) {
|
||||
s->dma_address = (uintptr_t)sg_virt(s);
|
||||
total += sg_dma_len(s);
|
||||
}
|
||||
} else {
|
||||
spriv->num_sg = scsi_dma_map(cmd);
|
||||
for (i = 0; i < spriv->num_sg; i++)
|
||||
total += sg_dma_len(&sg[i]);
|
||||
scsi_for_each_sg(cmd, s, spriv->num_sg, i)
|
||||
total += sg_dma_len(s);
|
||||
}
|
||||
spriv->cur_residue = sg_dma_len(sg);
|
||||
spriv->prv_sg = NULL;
|
||||
spriv->cur_sg = sg;
|
||||
spriv->tot_residue = total;
|
||||
}
|
||||
|
@ -444,7 +447,8 @@ static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
|
|||
p->tot_residue = 0;
|
||||
}
|
||||
if (!p->cur_residue && p->tot_residue) {
|
||||
p->cur_sg++;
|
||||
p->prv_sg = p->cur_sg;
|
||||
p->cur_sg = sg_next(p->cur_sg);
|
||||
p->cur_residue = sg_dma_len(p->cur_sg);
|
||||
}
|
||||
}
|
||||
|
@ -465,6 +469,7 @@ static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
|
|||
return;
|
||||
}
|
||||
ent->saved_cur_residue = spriv->cur_residue;
|
||||
ent->saved_prv_sg = spriv->prv_sg;
|
||||
ent->saved_cur_sg = spriv->cur_sg;
|
||||
ent->saved_tot_residue = spriv->tot_residue;
|
||||
}
|
||||
|
@ -479,6 +484,7 @@ static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
|
|||
return;
|
||||
}
|
||||
spriv->cur_residue = ent->saved_cur_residue;
|
||||
spriv->prv_sg = ent->saved_prv_sg;
|
||||
spriv->cur_sg = ent->saved_cur_sg;
|
||||
spriv->tot_residue = ent->saved_tot_residue;
|
||||
}
|
||||
|
@ -1647,7 +1653,7 @@ static int esp_msgin_process(struct esp *esp)
|
|||
spriv = ESP_CMD_PRIV(ent->cmd);
|
||||
|
||||
if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
|
||||
spriv->cur_sg--;
|
||||
spriv->cur_sg = spriv->prv_sg;
|
||||
spriv->cur_residue = 1;
|
||||
} else
|
||||
spriv->cur_residue++;
|
||||
|
|
|
@ -251,6 +251,7 @@
|
|||
struct esp_cmd_priv {
|
||||
int num_sg;
|
||||
int cur_residue;
|
||||
struct scatterlist *prv_sg;
|
||||
struct scatterlist *cur_sg;
|
||||
int tot_residue;
|
||||
};
|
||||
|
@ -273,6 +274,7 @@ struct esp_cmd_entry {
|
|||
struct scsi_cmnd *cmd;
|
||||
|
||||
unsigned int saved_cur_residue;
|
||||
struct scatterlist *saved_prv_sg;
|
||||
struct scatterlist *saved_cur_sg;
|
||||
unsigned int saved_tot_residue;
|
||||
|
||||
|
|
|
@ -687,7 +687,7 @@ static int imm_completion(struct scsi_cmnd *cmd)
|
|||
if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
|
||||
/* if scatter/gather, advance to the next segment */
|
||||
if (cmd->SCp.buffers_residual--) {
|
||||
cmd->SCp.buffer++;
|
||||
cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
|
||||
cmd->SCp.this_residual =
|
||||
cmd->SCp.buffer->length;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
|
|
|
@ -3901,22 +3901,23 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
|
|||
u8 *buffer, u32 len)
|
||||
{
|
||||
int bsize_elem, i, result = 0;
|
||||
struct scatterlist *scatterlist;
|
||||
struct scatterlist *sg;
|
||||
void *kaddr;
|
||||
|
||||
/* Determine the actual number of bytes per element */
|
||||
bsize_elem = PAGE_SIZE * (1 << sglist->order);
|
||||
|
||||
scatterlist = sglist->scatterlist;
|
||||
sg = sglist->scatterlist;
|
||||
|
||||
for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
|
||||
struct page *page = sg_page(&scatterlist[i]);
|
||||
for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg),
|
||||
buffer += bsize_elem) {
|
||||
struct page *page = sg_page(sg);
|
||||
|
||||
kaddr = kmap(page);
|
||||
memcpy(kaddr, buffer, bsize_elem);
|
||||
kunmap(page);
|
||||
|
||||
scatterlist[i].length = bsize_elem;
|
||||
sg->length = bsize_elem;
|
||||
|
||||
if (result != 0) {
|
||||
ipr_trace;
|
||||
|
@ -3925,13 +3926,13 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
|
|||
}
|
||||
|
||||
if (len % bsize_elem) {
|
||||
struct page *page = sg_page(&scatterlist[i]);
|
||||
struct page *page = sg_page(sg);
|
||||
|
||||
kaddr = kmap(page);
|
||||
memcpy(kaddr, buffer, len % bsize_elem);
|
||||
kunmap(page);
|
||||
|
||||
scatterlist[i].length = len % bsize_elem;
|
||||
sg->length = len % bsize_elem;
|
||||
}
|
||||
|
||||
sglist->buffer_len = len;
|
||||
|
@ -3952,6 +3953,7 @@ static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
|
|||
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
|
||||
struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
|
||||
struct scatterlist *scatterlist = sglist->scatterlist;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
ipr_cmd->dma_use_sg = sglist->num_dma_sg;
|
||||
|
@ -3960,10 +3962,10 @@ static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
|
|||
|
||||
ioarcb->ioadl_len =
|
||||
cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
|
||||
for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
|
||||
for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
|
||||
ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
|
||||
ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
|
||||
ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
|
||||
ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
|
||||
ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
|
||||
}
|
||||
|
||||
ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
|
||||
|
@ -3983,6 +3985,7 @@ static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
|
|||
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
|
||||
struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
|
||||
struct scatterlist *scatterlist = sglist->scatterlist;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
ipr_cmd->dma_use_sg = sglist->num_dma_sg;
|
||||
|
@ -3992,11 +3995,11 @@ static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
|
|||
ioarcb->ioadl_len =
|
||||
cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
|
||||
|
||||
for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
|
||||
for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
|
||||
ioadl[i].flags_and_data_len =
|
||||
cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
|
||||
cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg));
|
||||
ioadl[i].address =
|
||||
cpu_to_be32(sg_dma_address(&scatterlist[i]));
|
||||
cpu_to_be32(sg_dma_address(sg));
|
||||
}
|
||||
|
||||
ioadl[i-1].flags_and_data_len |=
|
||||
|
|
|
@ -2904,8 +2904,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
|||
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
|
||||
nvmewqe->context1 = ndlp;
|
||||
|
||||
for (i = 0; i < rsp->sg_cnt; i++) {
|
||||
sgel = &rsp->sg[i];
|
||||
for_each_sg(rsp->sg, sgel, rsp->sg_cnt, i) {
|
||||
physaddr = sg_dma_address(sgel);
|
||||
cnt = sg_dma_len(sgel);
|
||||
sgl->addr_hi = putPaddrHigh(physaddr);
|
||||
|
|
|
@ -195,23 +195,22 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
|
|||
unsigned int sgnum = scsi_sg_count(scmd);
|
||||
dma_addr_t busaddr;
|
||||
|
||||
sg = scsi_sglist(scmd);
|
||||
*sg_count = dma_map_sg(&mhba->pdev->dev, sg, sgnum,
|
||||
*sg_count = dma_map_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
|
||||
scmd->sc_data_direction);
|
||||
if (*sg_count > mhba->max_sge) {
|
||||
dev_err(&mhba->pdev->dev,
|
||||
"sg count[0x%x] is bigger than max sg[0x%x].\n",
|
||||
*sg_count, mhba->max_sge);
|
||||
dma_unmap_sg(&mhba->pdev->dev, sg, sgnum,
|
||||
dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
|
||||
scmd->sc_data_direction);
|
||||
return -1;
|
||||
}
|
||||
for (i = 0; i < *sg_count; i++) {
|
||||
busaddr = sg_dma_address(&sg[i]);
|
||||
scsi_for_each_sg(scmd, sg, *sg_count, i) {
|
||||
busaddr = sg_dma_address(sg);
|
||||
m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
|
||||
m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
|
||||
m_sg->flags = 0;
|
||||
sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
|
||||
sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(sg)));
|
||||
if ((i + 1) == *sg_count)
|
||||
m_sg->flags |= 1U << mhba->eot_flag;
|
||||
|
||||
|
|
|
@ -789,7 +789,7 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
|
|||
SCpnt->SCp.buffers_residual != 0 ) {
|
||||
//nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next timeout=%d", time_out);
|
||||
SCpnt->SCp.buffers_residual--;
|
||||
SCpnt->SCp.buffer++;
|
||||
SCpnt->SCp.buffer = sg_next(SCpnt->SCp.buffer);
|
||||
SCpnt->SCp.ptr = BUFFER_ADDR;
|
||||
SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
|
||||
time_out = 1000;
|
||||
|
@ -887,7 +887,7 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
|
|||
SCpnt->SCp.buffers_residual != 0 ) {
|
||||
//nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next");
|
||||
SCpnt->SCp.buffers_residual--;
|
||||
SCpnt->SCp.buffer++;
|
||||
SCpnt->SCp.buffer = sg_next(SCpnt->SCp.buffer);
|
||||
SCpnt->SCp.ptr = BUFFER_ADDR;
|
||||
SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
|
||||
time_out = 1000;
|
||||
|
|
|
@ -3255,7 +3255,7 @@ static int pmcraid_copy_sglist(
|
|||
int direction
|
||||
)
|
||||
{
|
||||
struct scatterlist *scatterlist;
|
||||
struct scatterlist *sg;
|
||||
void *kaddr;
|
||||
int bsize_elem;
|
||||
int i;
|
||||
|
@ -3264,10 +3264,10 @@ static int pmcraid_copy_sglist(
|
|||
/* Determine the actual number of bytes per element */
|
||||
bsize_elem = PAGE_SIZE * (1 << sglist->order);
|
||||
|
||||
scatterlist = sglist->scatterlist;
|
||||
sg = sglist->scatterlist;
|
||||
|
||||
for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
|
||||
struct page *page = sg_page(&scatterlist[i]);
|
||||
for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), buffer += bsize_elem) {
|
||||
struct page *page = sg_page(sg);
|
||||
|
||||
kaddr = kmap(page);
|
||||
if (direction == DMA_TO_DEVICE)
|
||||
|
@ -3282,11 +3282,11 @@ static int pmcraid_copy_sglist(
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
scatterlist[i].length = bsize_elem;
|
||||
sg->length = bsize_elem;
|
||||
}
|
||||
|
||||
if (len % bsize_elem) {
|
||||
struct page *page = sg_page(&scatterlist[i]);
|
||||
struct page *page = sg_page(sg);
|
||||
|
||||
kaddr = kmap(page);
|
||||
|
||||
|
@ -3297,7 +3297,7 @@ static int pmcraid_copy_sglist(
|
|||
|
||||
kunmap(page);
|
||||
|
||||
scatterlist[i].length = len % bsize_elem;
|
||||
sg->length = len % bsize_elem;
|
||||
}
|
||||
|
||||
if (rc) {
|
||||
|
|
|
@ -590,7 +590,7 @@ static int ppa_completion(struct scsi_cmnd *cmd)
|
|||
if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
|
||||
/* if scatter/gather, advance to the next segment */
|
||||
if (cmd->SCp.buffers_residual--) {
|
||||
cmd->SCp.buffer++;
|
||||
cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
|
||||
cmd->SCp.this_residual =
|
||||
cmd->SCp.buffer->length;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
|
|
|
@ -40,6 +40,18 @@
|
|||
#include "scsi_priv.h"
|
||||
#include "scsi_logging.h"
|
||||
|
||||
/*
|
||||
* Size of integrity metadata is usually small, 1 inline sg should
|
||||
* cover normal cases.
|
||||
*/
|
||||
#ifdef CONFIG_ARCH_NO_SG_CHAIN
|
||||
#define SCSI_INLINE_PROT_SG_CNT 0
|
||||
#define SCSI_INLINE_SG_CNT 0
|
||||
#else
|
||||
#define SCSI_INLINE_PROT_SG_CNT 1
|
||||
#define SCSI_INLINE_SG_CNT 2
|
||||
#endif
|
||||
|
||||
static struct kmem_cache *scsi_sdb_cache;
|
||||
static struct kmem_cache *scsi_sense_cache;
|
||||
static struct kmem_cache *scsi_sense_isadma_cache;
|
||||
|
@ -542,9 +554,11 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
|
|||
static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
|
||||
{
|
||||
if (cmd->sdb.table.nents)
|
||||
sg_free_table_chained(&cmd->sdb.table, true);
|
||||
sg_free_table_chained(&cmd->sdb.table,
|
||||
SCSI_INLINE_SG_CNT);
|
||||
if (scsi_prot_sg_count(cmd))
|
||||
sg_free_table_chained(&cmd->prot_sdb->table, true);
|
||||
sg_free_table_chained(&cmd->prot_sdb->table,
|
||||
SCSI_INLINE_PROT_SG_CNT);
|
||||
}
|
||||
|
||||
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
|
||||
|
@ -977,7 +991,8 @@ static blk_status_t scsi_init_sgtable(struct request *req,
|
|||
* If sg table allocation fails, requeue request later.
|
||||
*/
|
||||
if (unlikely(sg_alloc_table_chained(&sdb->table,
|
||||
blk_rq_nr_phys_segments(req), sdb->table.sgl)))
|
||||
blk_rq_nr_phys_segments(req), sdb->table.sgl,
|
||||
SCSI_INLINE_SG_CNT)))
|
||||
return BLK_STS_RESOURCE;
|
||||
|
||||
/*
|
||||
|
@ -1031,7 +1046,8 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
|
|||
ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
|
||||
|
||||
if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
|
||||
prot_sdb->table.sgl)) {
|
||||
prot_sdb->table.sgl,
|
||||
SCSI_INLINE_PROT_SG_CNT)) {
|
||||
ret = BLK_STS_RESOURCE;
|
||||
goto out_free_sgtables;
|
||||
}
|
||||
|
@ -1542,9 +1558,9 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
|
|||
}
|
||||
|
||||
/* Size in bytes of the sg-list stored in the scsi-mq command-private data. */
|
||||
static unsigned int scsi_mq_sgl_size(struct Scsi_Host *shost)
|
||||
static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
|
||||
{
|
||||
return min_t(unsigned int, shost->sg_tablesize, SG_CHUNK_SIZE) *
|
||||
return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) *
|
||||
sizeof(struct scatterlist);
|
||||
}
|
||||
|
||||
|
@ -1726,7 +1742,7 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
|||
if (scsi_host_get_prot(shost)) {
|
||||
sg = (void *)cmd + sizeof(struct scsi_cmnd) +
|
||||
shost->hostt->cmd_size;
|
||||
cmd->prot_sdb = (void *)sg + scsi_mq_sgl_size(shost);
|
||||
cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1820,10 +1836,11 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
|
|||
{
|
||||
unsigned int cmd_size, sgl_size;
|
||||
|
||||
sgl_size = scsi_mq_sgl_size(shost);
|
||||
sgl_size = scsi_mq_inline_sgl_size(shost);
|
||||
cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
|
||||
if (scsi_host_get_prot(shost))
|
||||
cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;
|
||||
cmd_size += sizeof(struct scsi_data_buffer) +
|
||||
sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
|
||||
|
||||
memset(&shost->tag_set, 0, sizeof(shost->tag_set));
|
||||
shost->tag_set.ops = &scsi_mq_ops;
|
||||
|
|
|
@ -335,7 +335,7 @@ static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
|
|||
BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
|
||||
|
||||
sge = &ctx->sgl->sge[0];
|
||||
for (i = 0; i < count; i++, sg++) {
|
||||
for (i = 0; i < count; i++, sg = sg_next(sg)) {
|
||||
sge[i].addr = sg_dma_address(sg);
|
||||
sge[i].length = sg_dma_len(sg);
|
||||
sge[i].flags = 0;
|
||||
|
|
|
@ -735,7 +735,7 @@ transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd,
|
|||
* source or destination for THIS transfer.
|
||||
*/
|
||||
if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
|
||||
++cmd->SCp.buffer;
|
||||
cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
|
||||
--cmd->SCp.buffers_residual;
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
|
|
|
@ -871,12 +871,11 @@ static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
|
|||
return;
|
||||
}
|
||||
|
||||
sg = scsi_sglist(scsicmd);
|
||||
for (i = 0; i < scsi_sg_count(scsicmd); i++) {
|
||||
this_page_orig = kmap_atomic(sg_page(sg + i));
|
||||
scsi_for_each_sg(scsicmd, sg, scsi_sg_count(scsicmd), i) {
|
||||
this_page_orig = kmap_atomic(sg_page(sg));
|
||||
this_page = (void *)((unsigned long)this_page_orig |
|
||||
sg[i].offset);
|
||||
memcpy(this_page, buf + bufind, sg[i].length);
|
||||
sg->offset);
|
||||
memcpy(this_page, buf + bufind, sg->length);
|
||||
kunmap_atomic(this_page_orig);
|
||||
}
|
||||
kfree(buf);
|
||||
|
|
|
@ -488,7 +488,6 @@ static void mts_command_done( struct urb *transfer )
|
|||
|
||||
static void mts_do_sg (struct urb* transfer)
|
||||
{
|
||||
struct scatterlist * sg;
|
||||
int status = transfer->status;
|
||||
MTS_INT_INIT();
|
||||
|
||||
|
@ -500,13 +499,12 @@ static void mts_do_sg (struct urb* transfer)
|
|||
mts_transfer_cleanup(transfer);
|
||||
}
|
||||
|
||||
sg = scsi_sglist(context->srb);
|
||||
context->fragment++;
|
||||
context->curr_sg = sg_next(context->curr_sg);
|
||||
mts_int_submit_urb(transfer,
|
||||
context->data_pipe,
|
||||
sg_virt(&sg[context->fragment]),
|
||||
sg[context->fragment].length,
|
||||
context->fragment + 1 == scsi_sg_count(context->srb) ?
|
||||
sg_virt(context->curr_sg),
|
||||
context->curr_sg->length,
|
||||
sg_is_last(context->curr_sg) ?
|
||||
mts_data_done : mts_do_sg);
|
||||
}
|
||||
|
||||
|
@ -526,22 +524,20 @@ static void
|
|||
mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc)
|
||||
{
|
||||
int pipe;
|
||||
struct scatterlist * sg;
|
||||
|
||||
MTS_DEBUG_GOT_HERE();
|
||||
|
||||
desc->context.instance = desc;
|
||||
desc->context.srb = srb;
|
||||
desc->context.fragment = 0;
|
||||
|
||||
if (!scsi_bufflen(srb)) {
|
||||
desc->context.data = NULL;
|
||||
desc->context.data_length = 0;
|
||||
return;
|
||||
} else {
|
||||
sg = scsi_sglist(srb);
|
||||
desc->context.data = sg_virt(&sg[0]);
|
||||
desc->context.data_length = sg[0].length;
|
||||
desc->context.curr_sg = scsi_sglist(srb);
|
||||
desc->context.data = sg_virt(desc->context.curr_sg);
|
||||
desc->context.data_length = desc->context.curr_sg->length;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ struct mts_transfer_context
|
|||
void *data;
|
||||
unsigned data_length;
|
||||
int data_pipe;
|
||||
int fragment;
|
||||
struct scatterlist *curr_sg;
|
||||
|
||||
u8 *scsi_status; /* status returned from ep_response after command completion */
|
||||
};
|
||||
|
|
|
@ -266,10 +266,11 @@ int sg_split(struct scatterlist *in, const int in_mapped_nents,
|
|||
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
|
||||
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
|
||||
|
||||
void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *);
|
||||
void __sg_free_table(struct sg_table *, unsigned int, unsigned int,
|
||||
sg_free_fn *);
|
||||
void sg_free_table(struct sg_table *);
|
||||
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
|
||||
struct scatterlist *, gfp_t, sg_alloc_fn *);
|
||||
struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *);
|
||||
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
|
||||
int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
|
||||
unsigned int n_pages, unsigned int offset,
|
||||
|
@ -331,9 +332,11 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_SG_POOL
|
||||
void sg_free_table_chained(struct sg_table *table, bool first_chunk);
|
||||
void sg_free_table_chained(struct sg_table *table,
|
||||
unsigned nents_first_chunk);
|
||||
int sg_alloc_table_chained(struct sg_table *table, int nents,
|
||||
struct scatterlist *first_chunk);
|
||||
struct scatterlist *first_chunk,
|
||||
unsigned nents_first_chunk);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -179,7 +179,8 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
|
|||
* __sg_free_table - Free a previously mapped sg table
|
||||
* @table: The sg table header to use
|
||||
* @max_ents: The maximum number of entries per single scatterlist
|
||||
* @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
|
||||
* @nents_first_chunk: Number of entries int the (preallocated) first
|
||||
* scatterlist chunk, 0 means no such preallocated first chunk
|
||||
* @free_fn: Free function
|
||||
*
|
||||
* Description:
|
||||
|
@ -189,9 +190,10 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
|
|||
*
|
||||
**/
|
||||
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
|
||||
bool skip_first_chunk, sg_free_fn *free_fn)
|
||||
unsigned int nents_first_chunk, sg_free_fn *free_fn)
|
||||
{
|
||||
struct scatterlist *sgl, *next;
|
||||
unsigned curr_max_ents = nents_first_chunk ?: max_ents;
|
||||
|
||||
if (unlikely(!table->sgl))
|
||||
return;
|
||||
|
@ -207,9 +209,9 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
|
|||
* sg_size is then one less than alloc size, since the last
|
||||
* element is the chain pointer.
|
||||
*/
|
||||
if (alloc_size > max_ents) {
|
||||
next = sg_chain_ptr(&sgl[max_ents - 1]);
|
||||
alloc_size = max_ents;
|
||||
if (alloc_size > curr_max_ents) {
|
||||
next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
|
||||
alloc_size = curr_max_ents;
|
||||
sg_size = alloc_size - 1;
|
||||
} else {
|
||||
sg_size = alloc_size;
|
||||
|
@ -217,11 +219,12 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
|
|||
}
|
||||
|
||||
table->orig_nents -= sg_size;
|
||||
if (skip_first_chunk)
|
||||
skip_first_chunk = false;
|
||||
if (nents_first_chunk)
|
||||
nents_first_chunk = 0;
|
||||
else
|
||||
free_fn(sgl, alloc_size);
|
||||
sgl = next;
|
||||
curr_max_ents = max_ents;
|
||||
}
|
||||
|
||||
table->sgl = NULL;
|
||||
|
@ -244,6 +247,8 @@ EXPORT_SYMBOL(sg_free_table);
|
|||
* @table: The sg table header to use
|
||||
* @nents: Number of entries in sg list
|
||||
* @max_ents: The maximum number of entries the allocator returns per call
|
||||
* @nents_first_chunk: Number of entries int the (preallocated) first
|
||||
* scatterlist chunk, 0 means no such preallocated chunk provided by user
|
||||
* @gfp_mask: GFP allocation mask
|
||||
* @alloc_fn: Allocator to use
|
||||
*
|
||||
|
@ -260,10 +265,13 @@ EXPORT_SYMBOL(sg_free_table);
|
|||
**/
|
||||
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
|
||||
unsigned int max_ents, struct scatterlist *first_chunk,
|
||||
gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
|
||||
unsigned int nents_first_chunk, gfp_t gfp_mask,
|
||||
sg_alloc_fn *alloc_fn)
|
||||
{
|
||||
struct scatterlist *sg, *prv;
|
||||
unsigned int left;
|
||||
unsigned curr_max_ents = nents_first_chunk ?: max_ents;
|
||||
unsigned prv_max_ents;
|
||||
|
||||
memset(table, 0, sizeof(*table));
|
||||
|
||||
|
@ -279,8 +287,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
|
|||
do {
|
||||
unsigned int sg_size, alloc_size = left;
|
||||
|
||||
if (alloc_size > max_ents) {
|
||||
alloc_size = max_ents;
|
||||
if (alloc_size > curr_max_ents) {
|
||||
alloc_size = curr_max_ents;
|
||||
sg_size = alloc_size - 1;
|
||||
} else
|
||||
sg_size = alloc_size;
|
||||
|
@ -314,7 +322,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
|
|||
* If this is not the first mapping, chain previous part.
|
||||
*/
|
||||
if (prv)
|
||||
sg_chain(prv, max_ents, sg);
|
||||
sg_chain(prv, prv_max_ents, sg);
|
||||
else
|
||||
table->sgl = sg;
|
||||
|
||||
|
@ -325,6 +333,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
|
|||
sg_mark_end(&sg[sg_size - 1]);
|
||||
|
||||
prv = sg;
|
||||
prv_max_ents = curr_max_ents;
|
||||
curr_max_ents = max_ents;
|
||||
} while (left);
|
||||
|
||||
return 0;
|
||||
|
@ -347,9 +357,9 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
|
|||
int ret;
|
||||
|
||||
ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
|
||||
NULL, gfp_mask, sg_kmalloc);
|
||||
NULL, 0, gfp_mask, sg_kmalloc);
|
||||
if (unlikely(ret))
|
||||
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
|
||||
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -70,18 +70,27 @@ static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask)
|
|||
/**
|
||||
* sg_free_table_chained - Free a previously mapped sg table
|
||||
* @table: The sg table header to use
|
||||
* @first_chunk: was first_chunk not NULL in sg_alloc_table_chained?
|
||||
* @nents_first_chunk: size of the first_chunk SGL passed to
|
||||
* sg_alloc_table_chained
|
||||
*
|
||||
* Description:
|
||||
* Free an sg table previously allocated and setup with
|
||||
* sg_alloc_table_chained().
|
||||
*
|
||||
* @nents_first_chunk has to be same with that same parameter passed
|
||||
* to sg_alloc_table_chained().
|
||||
*
|
||||
**/
|
||||
void sg_free_table_chained(struct sg_table *table, bool first_chunk)
|
||||
void sg_free_table_chained(struct sg_table *table,
|
||||
unsigned nents_first_chunk)
|
||||
{
|
||||
if (first_chunk && table->orig_nents <= SG_CHUNK_SIZE)
|
||||
if (table->orig_nents <= nents_first_chunk)
|
||||
return;
|
||||
__sg_free_table(table, SG_CHUNK_SIZE, first_chunk, sg_pool_free);
|
||||
|
||||
if (nents_first_chunk == 1)
|
||||
nents_first_chunk = 0;
|
||||
|
||||
__sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sg_free_table_chained);
|
||||
|
||||
|
@ -90,31 +99,41 @@ EXPORT_SYMBOL_GPL(sg_free_table_chained);
|
|||
* @table: The sg table header to use
|
||||
* @nents: Number of entries in sg list
|
||||
* @first_chunk: first SGL
|
||||
* @nents_first_chunk: number of the SGL of @first_chunk
|
||||
*
|
||||
* Description:
|
||||
* Allocate and chain SGLs in an sg table. If @nents@ is larger than
|
||||
* SG_CHUNK_SIZE a chained sg table will be setup.
|
||||
* @nents_first_chunk a chained sg table will be setup. @first_chunk is
|
||||
* ignored if nents_first_chunk <= 1 because user expects the SGL points
|
||||
* non-chain SGL.
|
||||
*
|
||||
**/
|
||||
int sg_alloc_table_chained(struct sg_table *table, int nents,
|
||||
struct scatterlist *first_chunk)
|
||||
struct scatterlist *first_chunk, unsigned nents_first_chunk)
|
||||
{
|
||||
int ret;
|
||||
|
||||
BUG_ON(!nents);
|
||||
|
||||
if (first_chunk) {
|
||||
if (nents <= SG_CHUNK_SIZE) {
|
||||
if (first_chunk && nents_first_chunk) {
|
||||
if (nents <= nents_first_chunk) {
|
||||
table->nents = table->orig_nents = nents;
|
||||
sg_init_table(table->sgl, nents);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* User supposes that the 1st SGL includes real entry */
|
||||
if (nents_first_chunk <= 1) {
|
||||
first_chunk = NULL;
|
||||
nents_first_chunk = 0;
|
||||
}
|
||||
|
||||
ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE,
|
||||
first_chunk, GFP_ATOMIC, sg_pool_alloc);
|
||||
first_chunk, nents_first_chunk,
|
||||
GFP_ATOMIC, sg_pool_alloc);
|
||||
if (unlikely(ret))
|
||||
sg_free_table_chained(table, (bool)first_chunk);
|
||||
sg_free_table_chained(table, nents_first_chunk);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sg_alloc_table_chained);
|
||||
|
|
|
@ -73,7 +73,8 @@ svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
|
|||
|
||||
ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
|
||||
if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
|
||||
ctxt->rw_sg_table.sgl)) {
|
||||
ctxt->rw_sg_table.sgl,
|
||||
SG_CHUNK_SIZE)) {
|
||||
kfree(ctxt);
|
||||
ctxt = NULL;
|
||||
}
|
||||
|
@ -84,7 +85,7 @@ svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
|
|||
static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_rw_ctxt *ctxt)
|
||||
{
|
||||
sg_free_table_chained(&ctxt->rw_sg_table, true);
|
||||
sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE);
|
||||
|
||||
spin_lock(&rdma->sc_rw_ctxt_lock);
|
||||
list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
|
||||
|
|
Loading…
Reference in New Issue
Block a user