forked from luck/tmp_suning_uos_patched
block, drivers: add REQ_OP_FLUSH operation
This adds a REQ_OP_FLUSH operation that is sent to request_fn based drivers by the block layer's flush code, instead of sending requests with the request->cmd_flags REQ_FLUSH bit set. Signed-off-by: Mike Christie <mchristi@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
4e1b2d52a8
commit
3a5e02ced1
|
@ -73,9 +73,9 @@ doing:
|
|||
|
||||
blk_queue_write_cache(sdkp->disk->queue, true, false);
|
||||
|
||||
and handle empty REQ_FLUSH requests in its prep_fn/request_fn. Note that
|
||||
and handle empty REQ_OP_FLUSH requests in its prep_fn/request_fn. Note that
|
||||
REQ_FLUSH requests with a payload are automatically turned into a sequence
|
||||
of an empty REQ_FLUSH request followed by the actual write by the block
|
||||
of an empty REQ_OP_FLUSH request followed by the actual write by the block
|
||||
layer. For devices that also support the FUA bit the block layer needs
|
||||
to be told to pass through the REQ_FUA bit using:
|
||||
|
||||
|
@ -83,4 +83,4 @@ to be told to pass through the REQ_FUA bit using:
|
|||
|
||||
and the driver must handle write requests that have the REQ_FUA bit set
|
||||
in prep_fn/request_fn. If the FUA bit is not natively supported the block
|
||||
layer turns it into an empty REQ_FLUSH request after the actual write.
|
||||
layer turns it into an empty REQ_OP_FLUSH request after the actual write.
|
||||
|
|
|
@ -1286,7 +1286,7 @@ static void do_ubd_request(struct request_queue *q)
|
|||
|
||||
req = dev->request;
|
||||
|
||||
if (req->cmd_flags & REQ_FLUSH) {
|
||||
if (req_op(req) == REQ_OP_FLUSH) {
|
||||
io_req = kmalloc(sizeof(struct io_thread_req),
|
||||
GFP_ATOMIC);
|
||||
if (io_req == NULL) {
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
* The actual execution of flush is double buffered. Whenever a request
|
||||
* needs to execute PRE or POSTFLUSH, it queues at
|
||||
* fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
|
||||
* flush is issued and the pending_idx is toggled. When the flush
|
||||
* REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush
|
||||
* completes, all the requests which were pending are proceeded to the next
|
||||
* step. This allows arbitrary merging of different types of FLUSH/FUA
|
||||
* requests.
|
||||
|
@ -330,7 +330,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
|
|||
}
|
||||
|
||||
flush_rq->cmd_type = REQ_TYPE_FS;
|
||||
flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
|
||||
req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH | REQ_FLUSH_SEQ);
|
||||
flush_rq->rq_disk = first_rq->rq_disk;
|
||||
flush_rq->end_io = flush_end_io;
|
||||
|
||||
|
|
|
@ -542,7 +542,7 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
|
|||
pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
|
||||
|
||||
if (op_is_write(req_op(rq))) {
|
||||
if (rq->cmd_flags & REQ_FLUSH)
|
||||
if (req_op(rq) == REQ_OP_FLUSH)
|
||||
ret = lo_req_flush(lo, rq);
|
||||
else if (req_op(rq) == REQ_OP_DISCARD)
|
||||
ret = lo_discard(lo, rq, pos);
|
||||
|
@ -1659,7 +1659,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
if (lo->lo_state != Lo_bound)
|
||||
return -EIO;
|
||||
|
||||
if (lo->use_dio && (!(cmd->rq->cmd_flags & REQ_FLUSH) ||
|
||||
if (lo->use_dio && (req_op(cmd->rq) != REQ_OP_FLUSH ||
|
||||
req_op(cmd->rq) == REQ_OP_DISCARD))
|
||||
cmd->use_aio = true;
|
||||
else
|
||||
|
|
|
@ -284,7 +284,7 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
|
|||
type = NBD_CMD_DISC;
|
||||
else if (req_op(req) == REQ_OP_DISCARD)
|
||||
type = NBD_CMD_TRIM;
|
||||
else if (req->cmd_flags & REQ_FLUSH)
|
||||
else if (req_op(req) == REQ_OP_FLUSH)
|
||||
type = NBD_CMD_FLUSH;
|
||||
else if (rq_data_dir(req) == WRITE)
|
||||
type = NBD_CMD_WRITE;
|
||||
|
|
|
@ -321,7 +321,7 @@ static void osdblk_rq_fn(struct request_queue *q)
|
|||
* driver-specific, etc.
|
||||
*/
|
||||
|
||||
do_flush = rq->cmd_flags & REQ_FLUSH;
|
||||
do_flush = (req_op(rq) == REQ_OP_FLUSH);
|
||||
do_write = (rq_data_dir(rq) == WRITE);
|
||||
|
||||
if (!do_flush) { /* osd_flush does not use a bio */
|
||||
|
|
|
@ -196,7 +196,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
|
|||
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
|
||||
|
||||
while ((req = blk_fetch_request(q))) {
|
||||
if (req->cmd_flags & REQ_FLUSH) {
|
||||
if (req_op(req) == REQ_OP_FLUSH) {
|
||||
if (ps3disk_submit_flush_request(dev, req))
|
||||
break;
|
||||
} else if (req->cmd_type == REQ_TYPE_FS) {
|
||||
|
@ -256,7 +256,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
if (req->cmd_flags & REQ_FLUSH) {
|
||||
if (req_op(req) == REQ_OP_FLUSH) {
|
||||
read = 0;
|
||||
op = "flush";
|
||||
} else {
|
||||
|
|
|
@ -597,7 +597,7 @@ static void skd_request_fn(struct request_queue *q)
|
|||
data_dir = rq_data_dir(req);
|
||||
io_flags = req->cmd_flags;
|
||||
|
||||
if (io_flags & REQ_FLUSH)
|
||||
if (req_op(req) == REQ_OP_FLUSH)
|
||||
flush++;
|
||||
|
||||
if (io_flags & REQ_FUA)
|
||||
|
|
|
@ -172,7 +172,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
|
||||
|
||||
vbr->req = req;
|
||||
if (req->cmd_flags & REQ_FLUSH) {
|
||||
if (req_op(req) == REQ_OP_FLUSH) {
|
||||
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
|
||||
vbr->out_hdr.sector = 0;
|
||||
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
|
||||
|
|
|
@ -743,7 +743,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
|
|||
* The indirect operation can only be a BLKIF_OP_READ or
|
||||
* BLKIF_OP_WRITE
|
||||
*/
|
||||
BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
|
||||
BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
|
||||
ring_req->operation = BLKIF_OP_INDIRECT;
|
||||
ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
|
||||
BLKIF_OP_WRITE : BLKIF_OP_READ;
|
||||
|
@ -755,7 +755,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
|
|||
ring_req->u.rw.handle = info->handle;
|
||||
ring_req->operation = rq_data_dir(req) ?
|
||||
BLKIF_OP_WRITE : BLKIF_OP_READ;
|
||||
if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
|
||||
if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
|
||||
/*
|
||||
* Ideally we can do an unordered flush-to-disk.
|
||||
* In case the backend onlysupports barriers, use that.
|
||||
|
@ -865,7 +865,7 @@ static inline bool blkif_request_flush_invalid(struct request *req,
|
|||
struct blkfront_info *info)
|
||||
{
|
||||
return ((req->cmd_type != REQ_TYPE_FS) ||
|
||||
((req->cmd_flags & REQ_FLUSH) &&
|
||||
((req_op(req) == REQ_OP_FLUSH) &&
|
||||
!(info->feature_flush & REQ_FLUSH)) ||
|
||||
((req->cmd_flags & REQ_FUA) &&
|
||||
!(info->feature_flush & REQ_FUA)));
|
||||
|
@ -2055,7 +2055,7 @@ static int blkif_recover(struct blkfront_info *info)
|
|||
/*
|
||||
* Get the bios in the request so we can re-queue them.
|
||||
*/
|
||||
if (copy[i].request->cmd_flags & REQ_FLUSH ||
|
||||
if (req_op(copy[i].request) == REQ_OP_FLUSH ||
|
||||
req_op(copy[i].request) == REQ_OP_DISCARD ||
|
||||
copy[i].request->cmd_flags & (REQ_FUA | REQ_SECURE)) {
|
||||
/*
|
||||
|
|
|
@ -431,7 +431,7 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
|
|||
ide_drive_t *drive = q->queuedata;
|
||||
struct ide_cmd *cmd;
|
||||
|
||||
if (!(rq->cmd_flags & REQ_FLUSH))
|
||||
if (req_op(rq) != REQ_OP_FLUSH)
|
||||
return BLKPREP_OK;
|
||||
|
||||
if (rq->special) {
|
||||
|
|
|
@ -2171,7 +2171,7 @@ static void dm_request_fn(struct request_queue *q)
|
|||
|
||||
/* always use block 0 to find the target for flushes for now */
|
||||
pos = 0;
|
||||
if (!(rq->cmd_flags & REQ_FLUSH))
|
||||
if (req_op(rq) != REQ_OP_FLUSH)
|
||||
pos = blk_rq_pos(rq);
|
||||
|
||||
if ((dm_request_peeked_before_merge_deadline(md) &&
|
||||
|
|
|
@ -1722,7 +1722,8 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
|
|||
!IS_ALIGNED(blk_rq_sectors(next), 8))
|
||||
break;
|
||||
|
||||
if (req_op(next) == REQ_OP_DISCARD || next->cmd_flags & REQ_FLUSH)
|
||||
if (req_op(next) == REQ_OP_DISCARD ||
|
||||
req_op(next) == REQ_OP_FLUSH)
|
||||
break;
|
||||
|
||||
if (rq_data_dir(cur) != rq_data_dir(next))
|
||||
|
@ -2147,7 +2148,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||
struct mmc_card *card = md->queue.card;
|
||||
struct mmc_host *host = card->host;
|
||||
unsigned long flags;
|
||||
unsigned int cmd_flags = req ? req->cmd_flags : 0;
|
||||
|
||||
if (req && !mq->mqrq_prev->req)
|
||||
/* claim host only for the first request */
|
||||
|
@ -2171,7 +2171,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||
ret = mmc_blk_issue_secdiscard_rq(mq, req);
|
||||
else
|
||||
ret = mmc_blk_issue_discard_rq(mq, req);
|
||||
} else if (cmd_flags & REQ_FLUSH) {
|
||||
} else if (req && req_op(req) == REQ_OP_FLUSH) {
|
||||
/* complete ongoing async transfer before issuing flush */
|
||||
if (card->host->areq)
|
||||
mmc_blk_issue_rw_rq(mq, NULL);
|
||||
|
|
|
@ -3,7 +3,8 @@
|
|||
|
||||
static inline bool mmc_req_is_special(struct request *req)
|
||||
{
|
||||
return req && (req->cmd_flags & REQ_FLUSH || req_op(req) == REQ_OP_DISCARD);
|
||||
return req &&
|
||||
(req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD);
|
||||
}
|
||||
|
||||
struct request;
|
||||
|
|
|
@ -87,7 +87,7 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
|
|||
if (req->cmd_type != REQ_TYPE_FS)
|
||||
return -EIO;
|
||||
|
||||
if (req->cmd_flags & REQ_FLUSH)
|
||||
if (req_op(req) == REQ_OP_FLUSH)
|
||||
return tr->flush(dev);
|
||||
|
||||
if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
|
||||
|
|
|
@ -290,7 +290,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
|
|||
|
||||
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
|
||||
memcpy(cmd, req->cmd, sizeof(*cmd));
|
||||
else if (req->cmd_flags & REQ_FLUSH)
|
||||
else if (req_op(req) == REQ_OP_FLUSH)
|
||||
nvme_setup_flush(ns, cmd);
|
||||
else if (req_op(req) == REQ_OP_DISCARD)
|
||||
ret = nvme_setup_discard(ns, req, cmd);
|
||||
|
|
|
@ -1143,12 +1143,11 @@ static int sd_init_command(struct scsi_cmnd *cmd)
|
|||
return sd_setup_discard_cmnd(cmd);
|
||||
case REQ_OP_WRITE_SAME:
|
||||
return sd_setup_write_same_cmnd(cmd);
|
||||
case REQ_OP_FLUSH:
|
||||
return sd_setup_flush_cmnd(cmd);
|
||||
case REQ_OP_READ:
|
||||
case REQ_OP_WRITE:
|
||||
if (rq->cmd_flags & REQ_FLUSH)
|
||||
return sd_setup_flush_cmnd(cmd);
|
||||
else
|
||||
return sd_setup_read_write_cmnd(cmd);
|
||||
return sd_setup_read_write_cmnd(cmd);
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
|
|
@ -249,9 +249,10 @@ enum req_op {
|
|||
REQ_OP_WRITE,
|
||||
REQ_OP_DISCARD, /* request to discard sectors */
|
||||
REQ_OP_WRITE_SAME, /* write same block many times */
|
||||
REQ_OP_FLUSH, /* request for cache flush */
|
||||
};
|
||||
|
||||
#define REQ_OP_BITS 2
|
||||
#define REQ_OP_BITS 3
|
||||
|
||||
typedef unsigned int blk_qc_t;
|
||||
#define BLK_QC_T_NONE -1U
|
||||
|
|
|
@ -666,6 +666,9 @@ static inline bool rq_mergeable(struct request *rq)
|
|||
if (rq->cmd_type != REQ_TYPE_FS)
|
||||
return false;
|
||||
|
||||
if (req_op(rq) == REQ_OP_FLUSH)
|
||||
return false;
|
||||
|
||||
if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
|
||||
return false;
|
||||
|
||||
|
|
|
@ -223,6 +223,8 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
|
|||
what |= MASK_TC_BIT(op_flags, FUA);
|
||||
if (op == REQ_OP_DISCARD)
|
||||
what |= BLK_TC_ACT(BLK_TC_DISCARD);
|
||||
if (op == REQ_OP_FLUSH)
|
||||
what |= BLK_TC_ACT(BLK_TC_FLUSH);
|
||||
|
||||
pid = tsk->pid;
|
||||
if (act_log_check(bt, what, sector, pid))
|
||||
|
@ -1788,6 +1790,9 @@ void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes)
|
|||
case REQ_OP_DISCARD:
|
||||
rwbs[i++] = 'D';
|
||||
break;
|
||||
case REQ_OP_FLUSH:
|
||||
rwbs[i++] = 'F';
|
||||
break;
|
||||
case REQ_OP_READ:
|
||||
rwbs[i++] = 'R';
|
||||
break;
|
||||
|
|
Loading…
Reference in New Issue
Block a user