block: remove RQF_COPY_USER

The RQF_COPY_USER is set for bio where the passthrough request mapping
helpers decided that bounce buffering is required.  It is then used to
pad scatterlist for drivers that required it.  But given that
non-passthrough requests are per definition aligned, and directly mapped
pass-through request must be aligned it is not actually required at all.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2020-04-14 09:42:21 +02:00 committed by Jens Axboe
parent 9bc5c397d8
commit e64a0e1692
4 changed files with 2 additions and 13 deletions

View File

@ -654,8 +654,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
bio = rq->bio; bio = rq->bio;
} while (iov_iter_count(&i)); } while (iov_iter_count(&i));
if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->rq_flags |= RQF_COPY_USER;
return 0; return 0;
unmap_rq: unmap_rq:
@ -731,7 +729,6 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
{ {
int reading = rq_data_dir(rq) == READ; int reading = rq_data_dir(rq) == READ;
unsigned long addr = (unsigned long) kbuf; unsigned long addr = (unsigned long) kbuf;
int do_copy = 0;
struct bio *bio, *orig_bio; struct bio *bio, *orig_bio;
int ret; int ret;
@ -740,8 +737,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
if (!len || !kbuf) if (!len || !kbuf)
return -EINVAL; return -EINVAL;
do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf))
if (do_copy)
bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
else else
bio = bio_map_kern(q, kbuf, len, gfp_mask); bio = bio_map_kern(q, kbuf, len, gfp_mask);
@ -752,9 +748,6 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
bio->bi_opf &= ~REQ_OP_MASK; bio->bi_opf &= ~REQ_OP_MASK;
bio->bi_opf |= req_op(rq); bio->bi_opf |= req_op(rq);
if (do_copy)
rq->rq_flags |= RQF_COPY_USER;
orig_bio = bio; orig_bio = bio;
ret = blk_rq_append_bio(rq, &bio); ret = blk_rq_append_bio(rq, &bio);
if (unlikely(ret)) { if (unlikely(ret)) {

View File

@ -532,8 +532,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
else if (rq->bio) else if (rq->bio)
nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
if (unlikely(rq->rq_flags & RQF_COPY_USER) && if (blk_rq_bytes(rq) && (blk_rq_bytes(rq) & q->dma_pad_mask)) {
(blk_rq_bytes(rq) & q->dma_pad_mask)) {
unsigned int pad_len = unsigned int pad_len =
(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;

View File

@ -292,7 +292,6 @@ static const char *const rqf_name[] = {
RQF_NAME(MQ_INFLIGHT), RQF_NAME(MQ_INFLIGHT),
RQF_NAME(DONTPREP), RQF_NAME(DONTPREP),
RQF_NAME(PREEMPT), RQF_NAME(PREEMPT),
RQF_NAME(COPY_USER),
RQF_NAME(FAILED), RQF_NAME(FAILED),
RQF_NAME(QUIET), RQF_NAME(QUIET),
RQF_NAME(ELVPRIV), RQF_NAME(ELVPRIV),

View File

@ -82,8 +82,6 @@ typedef __u32 __bitwise req_flags_t;
/* set for "ide_preempt" requests and also for requests for which the SCSI /* set for "ide_preempt" requests and also for requests for which the SCSI
"quiesce" state must be ignored. */ "quiesce" state must be ignored. */
#define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) #define RQF_PREEMPT ((__force req_flags_t)(1 << 8))
/* contains copies of user pages */
#define RQF_COPY_USER ((__force req_flags_t)(1 << 9))
/* vaguely specified driver internal error. Ignored by the block layer */ /* vaguely specified driver internal error. Ignored by the block layer */
#define RQF_FAILED ((__force req_flags_t)(1 << 10)) #define RQF_FAILED ((__force req_flags_t)(1 << 10))
/* don't warn about errors */ /* don't warn about errors */