[XFS] Add a debug flag for allocations which are known to be larger than
one page. SGI-PV: 955302 SGI-Modid: xfs-linux-melb:xfs-kern:26800a Signed-off-by: Nathan Scott <nathans@sgi.com> Signed-off-by: Tim Shimmin <tes@sgi.com>
This commit is contained in:
parent
3f89243c5b
commit
efb8ad7e94
|
@ -34,6 +34,14 @@ kmem_alloc(size_t size, unsigned int __nocast flags)
|
|||
gfp_t lflags = kmem_flags_convert(flags);
|
||||
void *ptr;
|
||||
|
||||
#ifdef DEBUG
|
||||
if (unlikely(!(flags & KM_LARGE) && (size > PAGE_SIZE))) {
|
||||
printk(KERN_WARNING "Large %s attempt, size=%ld\n",
|
||||
__FUNCTION__, (long)size);
|
||||
dump_stack();
|
||||
}
|
||||
#endif
|
||||
|
||||
do {
|
||||
if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS)
|
||||
ptr = kmalloc(size, lflags);
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#define KM_NOSLEEP 0x0002u
|
||||
#define KM_NOFS 0x0004u
|
||||
#define KM_MAYFAIL 0x0008u
|
||||
#define KM_LARGE 0x0010u
|
||||
|
||||
/*
|
||||
* We use a special process flag to avoid recursive callbacks into
|
||||
|
@ -41,7 +42,7 @@ kmem_flags_convert(unsigned int __nocast flags)
|
|||
{
|
||||
gfp_t lflags;
|
||||
|
||||
BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL));
|
||||
BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_LARGE));
|
||||
|
||||
if (flags & KM_NOSLEEP) {
|
||||
lflags = GFP_ATOMIC | __GFP_NOWARN;
|
||||
|
|
|
@ -768,7 +768,7 @@ xfs_buf_get_noaddr(
|
|||
_xfs_buf_initialize(bp, target, 0, len, 0);
|
||||
|
||||
try_again:
|
||||
data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
|
||||
data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL | KM_LARGE);
|
||||
if (unlikely(data == NULL))
|
||||
goto fail_free_buf;
|
||||
|
||||
|
|
|
@ -112,17 +112,17 @@ xfs_Gqm_init(void)
|
|||
{
|
||||
xfs_dqhash_t *udqhash, *gdqhash;
|
||||
xfs_qm_t *xqm;
|
||||
uint i, hsize, flags = KM_SLEEP | KM_MAYFAIL;
|
||||
uint i, hsize, flags = KM_SLEEP | KM_MAYFAIL | KM_LARGE;
|
||||
|
||||
/*
|
||||
* Initialize the dquot hash tables.
|
||||
*/
|
||||
hsize = XFS_QM_HASHSIZE_HIGH;
|
||||
while (!(udqhash = kmem_zalloc(hsize * sizeof(xfs_dqhash_t), flags))) {
|
||||
while (!(udqhash = kmem_zalloc(hsize * sizeof(*udqhash), flags))) {
|
||||
if ((hsize >>= 1) <= XFS_QM_HASHSIZE_LOW)
|
||||
flags = KM_SLEEP;
|
||||
}
|
||||
gdqhash = kmem_zalloc(hsize * sizeof(xfs_dqhash_t), KM_SLEEP);
|
||||
gdqhash = kmem_zalloc(hsize * sizeof(*gdqhash), KM_SLEEP | KM_LARGE);
|
||||
ndquot = hsize << 8;
|
||||
|
||||
xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
|
||||
|
|
|
@ -75,7 +75,7 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep)
|
|||
sleep);
|
||||
} else {
|
||||
ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)),
|
||||
sleep);
|
||||
sleep | KM_LARGE);
|
||||
}
|
||||
|
||||
if (ktep == NULL) {
|
||||
|
|
|
@ -50,7 +50,7 @@ void
|
|||
xfs_ihash_init(xfs_mount_t *mp)
|
||||
{
|
||||
__uint64_t icount;
|
||||
uint i, flags = KM_SLEEP | KM_MAYFAIL;
|
||||
uint i, flags = KM_SLEEP | KM_MAYFAIL | KM_LARGE;
|
||||
|
||||
if (!mp->m_ihsize) {
|
||||
icount = mp->m_maxicount ? mp->m_maxicount :
|
||||
|
@ -95,7 +95,7 @@ xfs_chash_init(xfs_mount_t *mp)
|
|||
mp->m_chsize = min_t(uint, mp->m_chsize, mp->m_ihsize);
|
||||
mp->m_chash = (xfs_chash_t *)kmem_zalloc(mp->m_chsize
|
||||
* sizeof(xfs_chash_t),
|
||||
KM_SLEEP);
|
||||
KM_SLEEP | KM_LARGE);
|
||||
for (i = 0; i < mp->m_chsize; i++) {
|
||||
spinlock_init(&mp->m_chash[i].ch_lock,"xfshash");
|
||||
}
|
||||
|
|
|
@ -1196,7 +1196,7 @@ xlog_alloc_log(xfs_mount_t *mp,
|
|||
kmem_zalloc(sizeof(xlog_in_core_t), KM_SLEEP);
|
||||
iclog = *iclogp;
|
||||
iclog->hic_data = (xlog_in_core_2_t *)
|
||||
kmem_zalloc(iclogsize, KM_SLEEP);
|
||||
kmem_zalloc(iclogsize, KM_SLEEP | KM_LARGE);
|
||||
|
||||
iclog->ic_prev = prev_iclog;
|
||||
prev_iclog = iclog;
|
||||
|
|
Loading…
Reference in New Issue
Block a user