xfs: cleanup I/O-related buffer flags

Remove the unused and misnamed _XBF_RUN_QUEUES flag, rename XBF_LOG_BUFFER
to the more fitting XBF_SYNCIO, and split XBF_ORDERED into XBF_FUA and
XBF_FLUSH to allow more fine grained control over the bio flags.  Also
cleanup processing of the flags in _xfs_buf_ioapply to make more sense,
and renumber the sparse flag number space to group flags by purpose.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Alex Elder <aelder@sgi.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
This commit is contained in:
Christoph Hellwig 2011-07-08 14:36:32 +02:00
parent c8da0faf6b
commit 1d5ae5dfee
3 changed files with 39 additions and 45 deletions

View File

@ -592,10 +592,8 @@ _xfs_buf_read(
ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE))); ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \ bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | XBF_READ_AHEAD);
XBF_READ_AHEAD | _XBF_RUN_QUEUES); bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
XBF_READ_AHEAD | _XBF_RUN_QUEUES);
status = xfs_buf_iorequest(bp); status = xfs_buf_iorequest(bp);
if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC)) if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC))
@ -1211,23 +1209,21 @@ _xfs_buf_ioapply(
total_nr_pages = bp->b_page_count; total_nr_pages = bp->b_page_count;
map_i = 0; map_i = 0;
if (bp->b_flags & XBF_ORDERED) { if (bp->b_flags & XBF_WRITE) {
ASSERT(!(bp->b_flags & XBF_READ)); if (bp->b_flags & XBF_SYNCIO)
rw = WRITE_FLUSH_FUA; rw = WRITE_SYNC;
} else if (bp->b_flags & XBF_LOG_BUFFER) { else
ASSERT(!(bp->b_flags & XBF_READ_AHEAD)); rw = WRITE;
bp->b_flags &= ~_XBF_RUN_QUEUES; if (bp->b_flags & XBF_FUA)
rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC; rw |= REQ_FUA;
} else if (bp->b_flags & _XBF_RUN_QUEUES) { if (bp->b_flags & XBF_FLUSH)
ASSERT(!(bp->b_flags & XBF_READ_AHEAD)); rw |= REQ_FLUSH;
bp->b_flags &= ~_XBF_RUN_QUEUES; } else if (bp->b_flags & XBF_READ_AHEAD) {
rw = (bp->b_flags & XBF_WRITE) ? WRITE_META : READ_META; rw = READA;
} else { } else {
rw = (bp->b_flags & XBF_WRITE) ? WRITE : rw = READ;
(bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
} }
next_chunk: next_chunk:
atomic_inc(&bp->b_io_remaining); atomic_inc(&bp->b_io_remaining);
nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
@ -1689,8 +1685,7 @@ xfs_buf_delwri_split(
break; break;
} }
bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q| bp->b_flags &= ~(XBF_DELWRI | _XBF_DELWRI_Q);
_XBF_RUN_QUEUES);
bp->b_flags |= XBF_WRITE; bp->b_flags |= XBF_WRITE;
list_move_tail(&bp->b_list, list); list_move_tail(&bp->b_list, list);
trace_xfs_buf_delwri_split(bp, _RET_IP_); trace_xfs_buf_delwri_split(bp, _RET_IP_);

View File

@ -46,43 +46,46 @@ typedef enum {
#define XBF_READ (1 << 0) /* buffer intended for reading from device */ #define XBF_READ (1 << 0) /* buffer intended for reading from device */
#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */ #define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
#define XBF_MAPPED (1 << 2) /* buffer mapped (b_addr valid) */ #define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */
#define XBF_MAPPED (1 << 3) /* buffer mapped (b_addr valid) */
#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
#define XBF_DELWRI (1 << 6) /* buffer has dirty pages */ #define XBF_DELWRI (1 << 6) /* buffer has dirty pages */
#define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */ #define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */
#define XBF_ORDERED (1 << 11)/* use ordered writes */
#define XBF_READ_AHEAD (1 << 12)/* asynchronous read-ahead */ /* I/O hints for the BIO layer */
#define XBF_LOG_BUFFER (1 << 13)/* this is a buffer used for the log */ #define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */
#define XBF_FUA (1 << 11)/* force cache write through mode */
#define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */
/* flags used only as arguments to access routines */ /* flags used only as arguments to access routines */
#define XBF_LOCK (1 << 14)/* lock requested */ #define XBF_LOCK (1 << 15)/* lock requested */
#define XBF_TRYLOCK (1 << 15)/* lock requested, but do not wait */ #define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */
#define XBF_DONT_BLOCK (1 << 16)/* do not block in current thread */ #define XBF_DONT_BLOCK (1 << 17)/* do not block in current thread */
/* flags used only internally */ /* flags used only internally */
#define _XBF_PAGES (1 << 18)/* backed by refcounted pages */ #define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
#define _XBF_RUN_QUEUES (1 << 19)/* run block device task queue */ #define _XBF_KMEM (1 << 21)/* backed by heap memory */
#define _XBF_KMEM (1 << 20)/* backed by heap memory */ #define _XBF_DELWRI_Q (1 << 22)/* buffer on delwri queue */
#define _XBF_DELWRI_Q (1 << 21)/* buffer on delwri queue */
typedef unsigned int xfs_buf_flags_t; typedef unsigned int xfs_buf_flags_t;
#define XFS_BUF_FLAGS \ #define XFS_BUF_FLAGS \
{ XBF_READ, "READ" }, \ { XBF_READ, "READ" }, \
{ XBF_WRITE, "WRITE" }, \ { XBF_WRITE, "WRITE" }, \
{ XBF_READ_AHEAD, "READ_AHEAD" }, \
{ XBF_MAPPED, "MAPPED" }, \ { XBF_MAPPED, "MAPPED" }, \
{ XBF_ASYNC, "ASYNC" }, \ { XBF_ASYNC, "ASYNC" }, \
{ XBF_DONE, "DONE" }, \ { XBF_DONE, "DONE" }, \
{ XBF_DELWRI, "DELWRI" }, \ { XBF_DELWRI, "DELWRI" }, \
{ XBF_STALE, "STALE" }, \ { XBF_STALE, "STALE" }, \
{ XBF_ORDERED, "ORDERED" }, \ { XBF_SYNCIO, "SYNCIO" }, \
{ XBF_READ_AHEAD, "READ_AHEAD" }, \ { XBF_FUA, "FUA" }, \
{ XBF_FLUSH, "FLUSH" }, \
{ XBF_LOCK, "LOCK" }, /* should never be set */\ { XBF_LOCK, "LOCK" }, /* should never be set */\
{ XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\ { XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\
{ XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\ { XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\
{ _XBF_PAGES, "PAGES" }, \ { _XBF_PAGES, "PAGES" }, \
{ _XBF_RUN_QUEUES, "RUN_QUEUES" }, \
{ _XBF_KMEM, "KMEM" }, \ { _XBF_KMEM, "KMEM" }, \
{ _XBF_DELWRI_Q, "DELWRI_Q" } { _XBF_DELWRI_Q, "DELWRI_Q" }
@ -230,8 +233,9 @@ extern void xfs_buf_terminate(void);
#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags) #define XFS_BUF_BFLAGS(bp) ((bp)->b_flags)
#define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \ #define XFS_BUF_ZEROFLAGS(bp) \
~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED)) ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI| \
XBF_SYNCIO|XBF_FUA|XBF_FLUSH))
void xfs_buf_stale(struct xfs_buf *bp); void xfs_buf_stale(struct xfs_buf *bp);
#define XFS_BUF_STALE(bp) xfs_buf_stale(bp); #define XFS_BUF_STALE(bp) xfs_buf_stale(bp);
@ -263,10 +267,6 @@ void xfs_buf_stale(struct xfs_buf *bp);
#define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC) #define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC)
#define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC) #define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC)
#define XFS_BUF_ORDERED(bp) ((bp)->b_flags |= XBF_ORDERED)
#define XFS_BUF_UNORDERED(bp) ((bp)->b_flags &= ~XBF_ORDERED)
#define XFS_BUF_ISORDERED(bp) ((bp)->b_flags & XBF_ORDERED)
#define XFS_BUF_HOLD(bp) xfs_buf_hold(bp) #define XFS_BUF_HOLD(bp) xfs_buf_hold(bp)
#define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ) #define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ)
#define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ) #define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ)

View File

@ -1268,7 +1268,6 @@ xlog_bdstrat(
return 0; return 0;
} }
bp->b_flags |= _XBF_RUN_QUEUES;
xfs_buf_iorequest(bp); xfs_buf_iorequest(bp);
return 0; return 0;
} }
@ -1369,7 +1368,7 @@ xlog_sync(xlog_t *log,
XFS_BUF_ZEROFLAGS(bp); XFS_BUF_ZEROFLAGS(bp);
XFS_BUF_BUSY(bp); XFS_BUF_BUSY(bp);
XFS_BUF_ASYNC(bp); XFS_BUF_ASYNC(bp);
bp->b_flags |= XBF_LOG_BUFFER; bp->b_flags |= XBF_SYNCIO;
if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) { if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
/* /*
@ -1380,7 +1379,7 @@ xlog_sync(xlog_t *log,
*/ */
if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp) if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
XFS_BUF_ORDERED(bp); bp->b_flags |= XBF_FUA | XBF_FLUSH;
} }
ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
@ -1413,9 +1412,9 @@ xlog_sync(xlog_t *log,
XFS_BUF_ZEROFLAGS(bp); XFS_BUF_ZEROFLAGS(bp);
XFS_BUF_BUSY(bp); XFS_BUF_BUSY(bp);
XFS_BUF_ASYNC(bp); XFS_BUF_ASYNC(bp);
bp->b_flags |= XBF_LOG_BUFFER; bp->b_flags |= XBF_SYNCIO;
if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
XFS_BUF_ORDERED(bp); bp->b_flags |= XBF_FUA | XBF_FLUSH;
dptr = XFS_BUF_PTR(bp); dptr = XFS_BUF_PTR(bp);
/* /*
* Bump the cycle numbers at the start of each block * Bump the cycle numbers at the start of each block