forked from luck/tmp_suning_uos_patched
xfs: assure zeroed memory buffers for certain kmem allocations
Guarantee zeroed memory buffers for cases where potential memory leak to disk can occur. In these cases, kmem_alloc is used and doesn't zero the buffer, opening the possibility of information leakage to disk. Use existing infrastucture (xfs_buf_allocate_memory) to obtain the already zeroed buffer from kernel memory. This solution avoids the performance issue that would occur if a wholesale change to replace kmem_alloc with kmem_zalloc was done. Signed-off-by: Bill O'Donnell <billodo@redhat.com> [darrick: fix bitwise complaint about kmflag_mask] Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
This commit is contained in:
parent
d5cc14d9f9
commit
3219e8cf0d
|
@ -345,6 +345,15 @@ xfs_buf_allocate_memory(
|
|||
unsigned short page_count, i;
|
||||
xfs_off_t start, end;
|
||||
int error;
|
||||
xfs_km_flags_t kmflag_mask = 0;
|
||||
|
||||
/*
|
||||
* assure zeroed buffer for non-read cases.
|
||||
*/
|
||||
if (!(flags & XBF_READ)) {
|
||||
kmflag_mask |= KM_ZERO;
|
||||
gfp_mask |= __GFP_ZERO;
|
||||
}
|
||||
|
||||
/*
|
||||
* for buffers that are contained within a single page, just allocate
|
||||
|
@ -354,7 +363,8 @@ xfs_buf_allocate_memory(
|
|||
size = BBTOB(bp->b_length);
|
||||
if (size < PAGE_SIZE) {
|
||||
int align_mask = xfs_buftarg_dma_alignment(bp->b_target);
|
||||
bp->b_addr = kmem_alloc_io(size, align_mask, KM_NOFS);
|
||||
bp->b_addr = kmem_alloc_io(size, align_mask,
|
||||
KM_NOFS | kmflag_mask);
|
||||
if (!bp->b_addr) {
|
||||
/* low memory - use alloc_page loop instead */
|
||||
goto use_alloc_page;
|
||||
|
|
|
@ -1443,7 +1443,7 @@ xlog_alloc_log(
|
|||
prev_iclog = iclog;
|
||||
|
||||
iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask,
|
||||
KM_MAYFAIL);
|
||||
KM_MAYFAIL | KM_ZERO);
|
||||
if (!iclog->ic_data)
|
||||
goto out_free_iclog;
|
||||
#ifdef DEBUG
|
||||
|
|
|
@ -127,7 +127,7 @@ xlog_alloc_buffer(
|
|||
if (nbblks > 1 && log->l_sectBBsize > 1)
|
||||
nbblks += log->l_sectBBsize;
|
||||
nbblks = round_up(nbblks, log->l_sectBBsize);
|
||||
return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL);
|
||||
return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue
Block a user