forked from luck/tmp_suning_uos_patched
block: Consolidate phys_segment and hw_segment limits
Except for SCSI no device drivers distinguish between physical and hardware segment limits. Consolidate the two into a single segment limit. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
086fa5ff08
commit
8a78362c4e
|
@ -849,7 +849,7 @@ static int ubd_add(int n, char **error_out)
|
|||
}
|
||||
ubd_dev->queue->queuedata = ubd_dev;
|
||||
|
||||
blk_queue_max_hw_segments(ubd_dev->queue, MAX_SG);
|
||||
blk_queue_max_segments(ubd_dev->queue, MAX_SG);
|
||||
err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
|
||||
if(err){
|
||||
*error_out = "Failed to register device";
|
||||
|
|
|
@ -1614,8 +1614,7 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
|
|||
* limitation.
|
||||
*/
|
||||
blk_recalc_rq_segments(rq);
|
||||
if (rq->nr_phys_segments > queue_max_phys_segments(q) ||
|
||||
rq->nr_phys_segments > queue_max_hw_segments(q)) {
|
||||
if (rq->nr_phys_segments > queue_max_segments(q)) {
|
||||
printk(KERN_ERR "%s: over max segments limit.\n", __func__);
|
||||
return -EIO;
|
||||
}
|
||||
|
|
|
@ -206,8 +206,7 @@ static inline int ll_new_hw_segment(struct request_queue *q,
|
|||
{
|
||||
int nr_phys_segs = bio_phys_segments(q, bio);
|
||||
|
||||
if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
|
||||
req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
|
||||
if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) {
|
||||
req->cmd_flags |= REQ_NOMERGE;
|
||||
if (req == q->last_merge)
|
||||
q->last_merge = NULL;
|
||||
|
@ -300,10 +299,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
|
|||
total_phys_segments--;
|
||||
}
|
||||
|
||||
if (total_phys_segments > queue_max_phys_segments(q))
|
||||
return 0;
|
||||
|
||||
if (total_phys_segments > queue_max_hw_segments(q))
|
||||
if (total_phys_segments > queue_max_segments(q))
|
||||
return 0;
|
||||
|
||||
/* Merge is OK... */
|
||||
|
|
|
@ -91,8 +91,7 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
|
|||
*/
|
||||
void blk_set_default_limits(struct queue_limits *lim)
|
||||
{
|
||||
lim->max_phys_segments = MAX_PHYS_SEGMENTS;
|
||||
lim->max_hw_segments = MAX_HW_SEGMENTS;
|
||||
lim->max_segments = BLK_MAX_SEGMENTS;
|
||||
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
|
||||
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
|
||||
lim->max_sectors = BLK_DEF_MAX_SECTORS;
|
||||
|
@ -252,17 +251,15 @@ void blk_queue_max_discard_sectors(struct request_queue *q,
|
|||
EXPORT_SYMBOL(blk_queue_max_discard_sectors);
|
||||
|
||||
/**
|
||||
* blk_queue_max_phys_segments - set max phys segments for a request for this queue
|
||||
* blk_queue_max_segments - set max hw segments for a request for this queue
|
||||
* @q: the request queue for the device
|
||||
* @max_segments: max number of segments
|
||||
*
|
||||
* Description:
|
||||
* Enables a low level driver to set an upper limit on the number of
|
||||
* physical data segments in a request. This would be the largest sized
|
||||
* scatter list the driver could handle.
|
||||
* hw data segments in a request.
|
||||
**/
|
||||
void blk_queue_max_phys_segments(struct request_queue *q,
|
||||
unsigned short max_segments)
|
||||
void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
|
||||
{
|
||||
if (!max_segments) {
|
||||
max_segments = 1;
|
||||
|
@ -270,33 +267,9 @@ void blk_queue_max_phys_segments(struct request_queue *q,
|
|||
__func__, max_segments);
|
||||
}
|
||||
|
||||
q->limits.max_phys_segments = max_segments;
|
||||
q->limits.max_segments = max_segments;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_max_phys_segments);
|
||||
|
||||
/**
|
||||
* blk_queue_max_hw_segments - set max hw segments for a request for this queue
|
||||
* @q: the request queue for the device
|
||||
* @max_segments: max number of segments
|
||||
*
|
||||
* Description:
|
||||
* Enables a low level driver to set an upper limit on the number of
|
||||
* hw data segments in a request. This would be the largest number of
|
||||
* address/length pairs the host adapter can actually give at once
|
||||
* to the device.
|
||||
**/
|
||||
void blk_queue_max_hw_segments(struct request_queue *q,
|
||||
unsigned short max_segments)
|
||||
{
|
||||
if (!max_segments) {
|
||||
max_segments = 1;
|
||||
printk(KERN_INFO "%s: set to minimum %d\n",
|
||||
__func__, max_segments);
|
||||
}
|
||||
|
||||
q->limits.max_hw_segments = max_segments;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_max_hw_segments);
|
||||
EXPORT_SYMBOL(blk_queue_max_segments);
|
||||
|
||||
/**
|
||||
* blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
|
||||
|
@ -531,11 +504,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
|||
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
|
||||
b->seg_boundary_mask);
|
||||
|
||||
t->max_phys_segments = min_not_zero(t->max_phys_segments,
|
||||
b->max_phys_segments);
|
||||
|
||||
t->max_hw_segments = min_not_zero(t->max_hw_segments,
|
||||
b->max_hw_segments);
|
||||
t->max_segments = min_not_zero(t->max_segments, b->max_segments);
|
||||
|
||||
t->max_segment_size = min_not_zero(t->max_segment_size,
|
||||
b->max_segment_size);
|
||||
|
@ -739,22 +708,19 @@ EXPORT_SYMBOL(blk_queue_update_dma_pad);
|
|||
* does is adjust the queue so that the buf is always appended
|
||||
* silently to the scatterlist.
|
||||
*
|
||||
* Note: This routine adjusts max_hw_segments to make room for
|
||||
* appending the drain buffer. If you call
|
||||
* blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
|
||||
* calling this routine, you must set the limit to one fewer than your
|
||||
* device can support otherwise there won't be room for the drain
|
||||
* buffer.
|
||||
* Note: This routine adjusts max_hw_segments to make room for appending
|
||||
* the drain buffer. If you call blk_queue_max_segments() after calling
|
||||
* this routine, you must set the limit to one fewer than your device
|
||||
* can support otherwise there won't be room for the drain buffer.
|
||||
*/
|
||||
int blk_queue_dma_drain(struct request_queue *q,
|
||||
dma_drain_needed_fn *dma_drain_needed,
|
||||
void *buf, unsigned int size)
|
||||
{
|
||||
if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
|
||||
if (queue_max_segments(q) < 2)
|
||||
return -EINVAL;
|
||||
/* make room for appending the drain */
|
||||
blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
|
||||
blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
|
||||
blk_queue_max_segments(q, queue_max_segments(q) - 1);
|
||||
q->dma_drain_needed = dma_drain_needed;
|
||||
q->dma_drain_buffer = buf;
|
||||
q->dma_drain_size = size;
|
||||
|
|
|
@ -772,7 +772,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
|
|||
}
|
||||
|
||||
blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
|
||||
blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
|
||||
blk_queue_max_segments(sdev->request_queue, sg_tablesize);
|
||||
ata_port_printk(ap, KERN_INFO,
|
||||
"DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
|
||||
(unsigned long long)*ap->host->dev->dma_mask,
|
||||
|
|
|
@ -2534,7 +2534,7 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
|
|||
blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit);
|
||||
RequestQueue->queuedata = Controller;
|
||||
blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit);
|
||||
blk_queue_max_phys_segments(RequestQueue, Controller->DriverScatterGatherLimit);
|
||||
blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit);
|
||||
blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
|
||||
disk->queue = RequestQueue;
|
||||
sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n);
|
||||
|
|
|
@ -1797,10 +1797,7 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
|
|||
blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
|
||||
|
||||
/* This is a hardware imposed limit. */
|
||||
blk_queue_max_hw_segments(disk->queue, h->maxsgentries);
|
||||
|
||||
/* This is a limit in the driver and could be eliminated. */
|
||||
blk_queue_max_phys_segments(disk->queue, h->maxsgentries);
|
||||
blk_queue_max_segments(disk->queue, h->maxsgentries);
|
||||
|
||||
blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors);
|
||||
|
||||
|
|
|
@ -448,11 +448,8 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
|
|||
blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
|
||||
|
||||
/* This is a hardware imposed limit. */
|
||||
blk_queue_max_hw_segments(q, SG_MAX);
|
||||
blk_queue_max_segments(q, SG_MAX);
|
||||
|
||||
/* This is a driver limit and could be eliminated. */
|
||||
blk_queue_max_phys_segments(q, SG_MAX);
|
||||
|
||||
init_timer(&hba[i]->timer);
|
||||
hba[i]->timer.expires = jiffies + IDA_TIMER;
|
||||
hba[i]->timer.data = (unsigned long)hba[i];
|
||||
|
|
|
@ -710,8 +710,7 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu
|
|||
max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
|
||||
|
||||
blk_queue_max_hw_sectors(q, max_seg_s >> 9);
|
||||
blk_queue_max_phys_segments(q, max_segments ? max_segments : MAX_PHYS_SEGMENTS);
|
||||
blk_queue_max_hw_segments(q, max_segments ? max_segments : MAX_HW_SEGMENTS);
|
||||
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
|
||||
blk_queue_max_segment_size(q, max_seg_s);
|
||||
blk_queue_logical_block_size(q, 512);
|
||||
blk_queue_segment_boundary(q, PAGE_SIZE-1);
|
||||
|
|
|
@ -956,8 +956,7 @@ static int __init pf_init(void)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
blk_queue_max_phys_segments(pf_queue, cluster);
|
||||
blk_queue_max_hw_segments(pf_queue, cluster);
|
||||
blk_queue_max_segments(pf_queue, cluster);
|
||||
|
||||
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
|
||||
struct gendisk *disk = pf->disk;
|
||||
|
|
|
@ -950,14 +950,14 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
|
|||
static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
|
||||
{
|
||||
if ((pd->settings.size << 9) / CD_FRAMESIZE
|
||||
<= queue_max_phys_segments(q)) {
|
||||
<= queue_max_segments(q)) {
|
||||
/*
|
||||
* The cdrom device can handle one segment/frame
|
||||
*/
|
||||
clear_bit(PACKET_MERGE_SEGS, &pd->flags);
|
||||
return 0;
|
||||
} else if ((pd->settings.size << 9) / PAGE_SIZE
|
||||
<= queue_max_phys_segments(q)) {
|
||||
<= queue_max_segments(q)) {
|
||||
/*
|
||||
* We can handle this case at the expense of some extra memory
|
||||
* copies during write operations
|
||||
|
|
|
@ -482,8 +482,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
|
|||
blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
|
||||
ps3disk_prepare_flush);
|
||||
|
||||
blk_queue_max_phys_segments(queue, -1);
|
||||
blk_queue_max_hw_segments(queue, -1);
|
||||
blk_queue_max_segments(queue, -1);
|
||||
blk_queue_max_segment_size(queue, dev->bounce_size);
|
||||
|
||||
gendisk = alloc_disk(PS3DISK_MINORS);
|
||||
|
|
|
@ -751,8 +751,7 @@ static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
|
|||
priv->queue = queue;
|
||||
queue->queuedata = dev;
|
||||
blk_queue_make_request(queue, ps3vram_make_request);
|
||||
blk_queue_max_phys_segments(queue, MAX_PHYS_SEGMENTS);
|
||||
blk_queue_max_hw_segments(queue, MAX_HW_SEGMENTS);
|
||||
blk_queue_max_segments(queue, BLK_MAX_HW_SEGMENTS);
|
||||
blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE);
|
||||
blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS);
|
||||
|
||||
|
|
|
@ -691,8 +691,7 @@ static int probe_disk(struct vdc_port *port)
|
|||
|
||||
port->disk = g;
|
||||
|
||||
blk_queue_max_hw_segments(q, port->ring_cookies);
|
||||
blk_queue_max_phys_segments(q, port->ring_cookies);
|
||||
blk_queue_max_segments(q, port->ring_cookies);
|
||||
blk_queue_max_hw_sectors(q, port->max_xfer_size);
|
||||
g->major = vdc_major;
|
||||
g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
|
||||
|
|
|
@ -1518,8 +1518,7 @@ static int carm_init_disks(struct carm_host *host)
|
|||
break;
|
||||
}
|
||||
disk->queue = q;
|
||||
blk_queue_max_hw_segments(q, CARM_MAX_REQ_SG);
|
||||
blk_queue_max_phys_segments(q, CARM_MAX_REQ_SG);
|
||||
blk_queue_max_segments(q, CARM_MAX_REQ_SG);
|
||||
blk_queue_segment_boundary(q, CARM_SG_BOUNDARY);
|
||||
|
||||
q->queuedata = port;
|
||||
|
|
|
@ -2320,8 +2320,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
|
|||
disk->queue = q;
|
||||
|
||||
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
|
||||
blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
|
||||
blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
|
||||
blk_queue_max_segments(q, UB_MAX_REQ_SG);
|
||||
blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
|
||||
blk_queue_max_hw_sectors(q, UB_MAX_SECTORS);
|
||||
blk_queue_logical_block_size(q, lun->capacity.bsize);
|
||||
|
|
|
@ -471,8 +471,7 @@ static int probe_disk(struct viodasd_device *d)
|
|||
}
|
||||
|
||||
d->disk = g;
|
||||
blk_queue_max_hw_segments(q, VIOMAXBLOCKDMA);
|
||||
blk_queue_max_phys_segments(q, VIOMAXBLOCKDMA);
|
||||
blk_queue_max_segments(q, VIOMAXBLOCKDMA);
|
||||
blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS);
|
||||
g->major = VIODASD_MAJOR;
|
||||
g->first_minor = dev_no << PARTITION_SHIFT;
|
||||
|
|
|
@ -353,8 +353,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
|
|||
blk_queue_max_segment_size(rq, PAGE_SIZE);
|
||||
|
||||
/* Ensure a merged request will fit in a single I/O ring slot. */
|
||||
blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
|
||||
blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
|
||||
blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
|
||||
|
||||
/* Make sure buffer addresses are sector-aligned. */
|
||||
blk_queue_dma_alignment(rq, 511);
|
||||
|
|
|
@ -741,7 +741,7 @@ static int __devinit probe_gdrom_setupqueue(void)
|
|||
{
|
||||
blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
|
||||
/* using DMA so memory will need to be contiguous */
|
||||
blk_queue_max_hw_segments(gd.gdrom_rq, 1);
|
||||
blk_queue_max_segments(gd.gdrom_rq, 1);
|
||||
/* set a large max size to get most from DMA */
|
||||
blk_queue_max_segment_size(gd.gdrom_rq, 0x40000);
|
||||
gd.disk->queue = gd.gdrom_rq;
|
||||
|
|
|
@ -616,8 +616,7 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
|||
gendisk->first_minor = deviceno;
|
||||
strncpy(gendisk->disk_name, c->name,
|
||||
sizeof(gendisk->disk_name));
|
||||
blk_queue_max_hw_segments(q, 1);
|
||||
blk_queue_max_phys_segments(q, 1);
|
||||
blk_queue_max_segments(q, 1);
|
||||
blk_queue_max_hw_sectors(q, 4096 / 512);
|
||||
gendisk->queue = q;
|
||||
gendisk->fops = &viocd_fops;
|
||||
|
|
|
@ -790,8 +790,7 @@ static int ide_init_queue(ide_drive_t *drive)
|
|||
max_sg_entries >>= 1;
|
||||
#endif /* CONFIG_PCI */
|
||||
|
||||
blk_queue_max_hw_segments(q, max_sg_entries);
|
||||
blk_queue_max_phys_segments(q, max_sg_entries);
|
||||
blk_queue_max_segments(q, max_sg_entries);
|
||||
|
||||
/* assign drive queue */
|
||||
drive->queue = q;
|
||||
|
|
|
@ -3739,7 +3739,7 @@ static int bio_fits_rdev(struct bio *bi)
|
|||
if ((bi->bi_size>>9) > queue_max_sectors(q))
|
||||
return 0;
|
||||
blk_recount_segments(q, bi);
|
||||
if (bi->bi_phys_segments > queue_max_phys_segments(q))
|
||||
if (bi->bi_phys_segments > queue_max_segments(q))
|
||||
return 0;
|
||||
|
||||
if (q->merge_bvec_fn)
|
||||
|
|
|
@ -1227,8 +1227,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
|
|||
|
||||
blk_queue_bounce_limit(msb->queue, limit);
|
||||
blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);
|
||||
blk_queue_max_phys_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
|
||||
blk_queue_max_hw_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
|
||||
blk_queue_max_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
|
||||
blk_queue_max_segment_size(msb->queue,
|
||||
MSPRO_BLOCK_MAX_PAGES * msb->page_size);
|
||||
|
||||
|
|
|
@ -1065,9 +1065,8 @@ static int i2o_block_probe(struct device *dev)
|
|||
queue = gd->queue;
|
||||
queue->queuedata = i2o_blk_dev;
|
||||
|
||||
blk_queue_max_phys_segments(queue, I2O_MAX_PHYS_SEGMENTS);
|
||||
blk_queue_max_hw_sectors(queue, max_sectors);
|
||||
blk_queue_max_hw_segments(queue, i2o_sg_tablesize(c, body_size));
|
||||
blk_queue_max_segments(queue, i2o_sg_tablesize(c, body_size));
|
||||
|
||||
osm_debug("max sectors = %d\n", queue->max_sectors);
|
||||
osm_debug("phys segments = %d\n", queue->max_phys_segments);
|
||||
|
|
|
@ -155,8 +155,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
|
|||
if (mq->bounce_buf) {
|
||||
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
|
||||
blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
|
||||
blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
|
||||
blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
|
||||
blk_queue_max_segments(mq->queue, bouncesz / 512);
|
||||
blk_queue_max_segment_size(mq->queue, bouncesz);
|
||||
|
||||
mq->sg = kmalloc(sizeof(struct scatterlist),
|
||||
|
@ -182,8 +181,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
|
|||
blk_queue_bounce_limit(mq->queue, limit);
|
||||
blk_queue_max_hw_sectors(mq->queue,
|
||||
min(host->max_blk_count, host->max_req_size / 512));
|
||||
blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
|
||||
blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
|
||||
blk_queue_max_segments(mq->queue, host->max_hw_segs);
|
||||
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
|
||||
|
||||
mq->sg = kmalloc(sizeof(struct scatterlist) *
|
||||
|
|
|
@ -2130,8 +2130,7 @@ static void dasd_setup_queue(struct dasd_block *block)
|
|||
blk_queue_logical_block_size(block->request_queue, block->bp_block);
|
||||
max = block->base->discipline->max_blocks << block->s2b_shift;
|
||||
blk_queue_max_hw_sectors(block->request_queue, max);
|
||||
blk_queue_max_phys_segments(block->request_queue, -1L);
|
||||
blk_queue_max_hw_segments(block->request_queue, -1L);
|
||||
blk_queue_max_segments(block->request_queue, -1L);
|
||||
/* with page sized segments we can translate each segement into
|
||||
* one idaw/tidaw
|
||||
*/
|
||||
|
|
|
@ -223,8 +223,7 @@ tapeblock_setup_device(struct tape_device * device)
|
|||
|
||||
blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
|
||||
blk_queue_max_hw_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
|
||||
blk_queue_max_phys_segments(blkdat->request_queue, -1L);
|
||||
blk_queue_max_hw_segments(blkdat->request_queue, -1L);
|
||||
blk_queue_max_segments(blkdat->request_queue, -1L);
|
||||
blk_queue_max_segment_size(blkdat->request_queue, -1L);
|
||||
blk_queue_segment_boundary(blkdat->request_queue, -1L);
|
||||
|
||||
|
|
|
@ -4195,7 +4195,7 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
|
|||
if (tgt->service_parms.class3_parms[0] & 0x80000000)
|
||||
rport->supported_classes |= FC_COS_CLASS3;
|
||||
if (rport->rqst_q)
|
||||
blk_queue_max_hw_segments(rport->rqst_q, 1);
|
||||
blk_queue_max_segments(rport->rqst_q, 1);
|
||||
} else
|
||||
tgt_dbg(tgt, "rport add failed\n");
|
||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||
|
@ -4669,7 +4669,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
|||
}
|
||||
|
||||
if (shost_to_fc_host(shost)->rqst_q)
|
||||
blk_queue_max_hw_segments(shost_to_fc_host(shost)->rqst_q, 1);
|
||||
blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
|
||||
dev_set_drvdata(dev, vhost);
|
||||
spin_lock(&ibmvfc_driver_lock);
|
||||
list_add_tail(&vhost->queue, &ibmvfc_head);
|
||||
|
|
|
@ -1624,8 +1624,8 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
|
|||
/*
|
||||
* this limit is imposed by hardware restrictions
|
||||
*/
|
||||
blk_queue_max_hw_segments(q, shost->sg_tablesize);
|
||||
blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
|
||||
blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
|
||||
SCSI_MAX_SG_CHAIN_SEGMENTS));
|
||||
|
||||
blk_queue_max_hw_sectors(q, shost->max_sectors);
|
||||
blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
|
||||
|
|
|
@ -287,8 +287,7 @@ sg_open(struct inode *inode, struct file *filp)
|
|||
if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
|
||||
sdp->sgdebug = 0;
|
||||
q = sdp->device->request_queue;
|
||||
sdp->sg_tablesize = min(queue_max_hw_segments(q),
|
||||
queue_max_phys_segments(q));
|
||||
sdp->sg_tablesize = queue_max_segments(q);
|
||||
}
|
||||
if ((sfp = sg_add_sfp(sdp, dev)))
|
||||
filp->private_data = sfp;
|
||||
|
@ -1376,8 +1375,7 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
|
|||
sdp->device = scsidp;
|
||||
INIT_LIST_HEAD(&sdp->sfds);
|
||||
init_waitqueue_head(&sdp->o_excl_wait);
|
||||
sdp->sg_tablesize = min(queue_max_hw_segments(q),
|
||||
queue_max_phys_segments(q));
|
||||
sdp->sg_tablesize = queue_max_segments(q);
|
||||
sdp->index = k;
|
||||
kref_init(&sdp->d_ref);
|
||||
|
||||
|
|
|
@ -3983,8 +3983,7 @@ static int st_probe(struct device *dev)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
i = min(queue_max_hw_segments(SDp->request_queue),
|
||||
queue_max_phys_segments(SDp->request_queue));
|
||||
i = queue_max_segments(SDp->request_queue);
|
||||
if (st_max_sg_segs < i)
|
||||
i = st_max_sg_segs;
|
||||
buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
|
||||
|
|
|
@ -363,10 +363,7 @@ static int blkvsc_probe(struct device *device)
|
|||
blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock);
|
||||
|
||||
blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE);
|
||||
blk_queue_max_phys_segments(blkdev->gd->queue,
|
||||
MAX_MULTIPAGE_BUFFER_COUNT);
|
||||
blk_queue_max_hw_segments(blkdev->gd->queue,
|
||||
MAX_MULTIPAGE_BUFFER_COUNT);
|
||||
blk_queue_max_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
|
||||
blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1);
|
||||
blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY);
|
||||
blk_queue_dma_alignment(blkdev->gd->queue, 511);
|
||||
|
|
9
fs/bio.c
9
fs/bio.c
|
@ -507,10 +507,8 @@ int bio_get_nr_vecs(struct block_device *bdev)
|
|||
int nr_pages;
|
||||
|
||||
nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
if (nr_pages > queue_max_phys_segments(q))
|
||||
nr_pages = queue_max_phys_segments(q);
|
||||
if (nr_pages > queue_max_hw_segments(q))
|
||||
nr_pages = queue_max_hw_segments(q);
|
||||
if (nr_pages > queue_max_segments(q))
|
||||
nr_pages = queue_max_segments(q);
|
||||
|
||||
return nr_pages;
|
||||
}
|
||||
|
@ -575,8 +573,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
|
|||
* make this too complex.
|
||||
*/
|
||||
|
||||
while (bio->bi_phys_segments >= queue_max_phys_segments(q)
|
||||
|| bio->bi_phys_segments >= queue_max_hw_segments(q)) {
|
||||
while (bio->bi_phys_segments >= queue_max_segments(q)) {
|
||||
|
||||
if (retried_segments)
|
||||
return 0;
|
||||
|
|
|
@ -316,8 +316,7 @@ struct queue_limits {
|
|||
unsigned int discard_alignment;
|
||||
|
||||
unsigned short logical_block_size;
|
||||
unsigned short max_hw_segments;
|
||||
unsigned short max_phys_segments;
|
||||
unsigned short max_segments;
|
||||
|
||||
unsigned char misaligned;
|
||||
unsigned char discard_misaligned;
|
||||
|
@ -929,8 +928,19 @@ static inline void blk_queue_max_sectors(struct request_queue *q, unsigned int m
|
|||
blk_queue_max_hw_sectors(q, max);
|
||||
}
|
||||
|
||||
extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
|
||||
extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
|
||||
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
|
||||
|
||||
static inline void blk_queue_max_phys_segments(struct request_queue *q, unsigned short max)
|
||||
{
|
||||
blk_queue_max_segments(q, max);
|
||||
}
|
||||
|
||||
static inline void blk_queue_max_hw_segments(struct request_queue *q, unsigned short max)
|
||||
{
|
||||
blk_queue_max_segments(q, max);
|
||||
}
|
||||
|
||||
|
||||
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_max_discard_sectors(struct request_queue *q,
|
||||
unsigned int max_discard_sectors);
|
||||
|
@ -1055,14 +1065,9 @@ static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
|
|||
return q->limits.max_hw_sectors;
|
||||
}
|
||||
|
||||
static inline unsigned short queue_max_hw_segments(struct request_queue *q)
|
||||
static inline unsigned short queue_max_segments(struct request_queue *q)
|
||||
{
|
||||
return q->limits.max_hw_segments;
|
||||
}
|
||||
|
||||
static inline unsigned short queue_max_phys_segments(struct request_queue *q)
|
||||
{
|
||||
return q->limits.max_phys_segments;
|
||||
return q->limits.max_segments;
|
||||
}
|
||||
|
||||
static inline unsigned int queue_max_segment_size(struct request_queue *q)
|
||||
|
|
|
@ -385,7 +385,7 @@
|
|||
/* defines for max_sectors and max_phys_segments */
|
||||
#define I2O_MAX_SECTORS 1024
|
||||
#define I2O_MAX_SECTORS_LIMITED 128
|
||||
#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
|
||||
#define I2O_MAX_PHYS_SEGMENTS BLK_MAX_SEGMENTS
|
||||
|
||||
/*
|
||||
* Message structures
|
||||
|
|
Loading…
Reference in New Issue
Block a user