forked from luck/tmp_suning_uos_patched
7878c231da
It turned out that DEBUG_SLAB_LEAK is still broken even after recent
recue efforts that when there is a large number of objects like
kmemleak_object which is normal on a debug kernel,
# grep kmemleak /proc/slabinfo
kmemleak_object 2243606 3436210 ...
reading /proc/slab_allocators could easily loop forever while processing
the kmemleak_object cache and any additional freeing or allocating
objects will trigger a reprocessing. To make a situation worse,
soft-lockups could easily happen in this sitatuion which will call
printk() to allocate more kmemleak objects to guarantee an infinite
loop.
Also, since it seems no one had noticed when it was totally broken
more than 2-year ago - see the commit fcf88917dd
("slab: fix a crash
by reading /proc/slab_allocators"), probably nobody cares about it
anymore due to the decline of the SLAB. Just remove it entirely.
Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Qian Cai <cai@lca.pw>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
118 lines
2.9 KiB
C
118 lines
2.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_SLAB_DEF_H
|
|
#define _LINUX_SLAB_DEF_H
|
|
|
|
#include <linux/reciprocal_div.h>
|
|
|
|
/*
|
|
* Definitions unique to the original Linux SLAB allocator.
|
|
*/
|
|
|
|
struct kmem_cache {
|
|
struct array_cache __percpu *cpu_cache;
|
|
|
|
/* 1) Cache tunables. Protected by slab_mutex */
|
|
unsigned int batchcount;
|
|
unsigned int limit;
|
|
unsigned int shared;
|
|
|
|
unsigned int size;
|
|
struct reciprocal_value reciprocal_buffer_size;
|
|
/* 2) touched by every alloc & free from the backend */
|
|
|
|
slab_flags_t flags; /* constant flags */
|
|
unsigned int num; /* # of objs per slab */
|
|
|
|
/* 3) cache_grow/shrink */
|
|
/* order of pgs per slab (2^n) */
|
|
unsigned int gfporder;
|
|
|
|
/* force GFP flags, e.g. GFP_DMA */
|
|
gfp_t allocflags;
|
|
|
|
size_t colour; /* cache colouring range */
|
|
unsigned int colour_off; /* colour offset */
|
|
struct kmem_cache *freelist_cache;
|
|
unsigned int freelist_size;
|
|
|
|
/* constructor func */
|
|
void (*ctor)(void *obj);
|
|
|
|
/* 4) cache creation/removal */
|
|
const char *name;
|
|
struct list_head list;
|
|
int refcount;
|
|
int object_size;
|
|
int align;
|
|
|
|
/* 5) statistics */
|
|
#ifdef CONFIG_DEBUG_SLAB
|
|
unsigned long num_active;
|
|
unsigned long num_allocations;
|
|
unsigned long high_mark;
|
|
unsigned long grown;
|
|
unsigned long reaped;
|
|
unsigned long errors;
|
|
unsigned long max_freeable;
|
|
unsigned long node_allocs;
|
|
unsigned long node_frees;
|
|
unsigned long node_overflow;
|
|
atomic_t allochit;
|
|
atomic_t allocmiss;
|
|
atomic_t freehit;
|
|
atomic_t freemiss;
|
|
|
|
/*
|
|
* If debugging is enabled, then the allocator can add additional
|
|
* fields and/or padding to every object. 'size' contains the total
|
|
* object size including these internal fields, while 'obj_offset'
|
|
* and 'object_size' contain the offset to the user object and its
|
|
* size.
|
|
*/
|
|
int obj_offset;
|
|
#endif /* CONFIG_DEBUG_SLAB */
|
|
|
|
#ifdef CONFIG_MEMCG
|
|
struct memcg_cache_params memcg_params;
|
|
#endif
|
|
#ifdef CONFIG_KASAN
|
|
struct kasan_cache kasan_info;
|
|
#endif
|
|
|
|
#ifdef CONFIG_SLAB_FREELIST_RANDOM
|
|
unsigned int *random_seq;
|
|
#endif
|
|
|
|
unsigned int useroffset; /* Usercopy region offset */
|
|
unsigned int usersize; /* Usercopy region size */
|
|
|
|
struct kmem_cache_node *node[MAX_NUMNODES];
|
|
};
|
|
|
|
static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
|
|
void *x)
|
|
{
|
|
void *object = x - (x - page->s_mem) % cache->size;
|
|
void *last_object = page->s_mem + (cache->num - 1) * cache->size;
|
|
|
|
if (unlikely(object > last_object))
|
|
return last_object;
|
|
else
|
|
return object;
|
|
}
|
|
|
|
/*
|
|
* We want to avoid an expensive divide : (offset / cache->size)
|
|
* Using the fact that size is a constant for a particular cache,
|
|
* we can replace (offset / cache->size) by
|
|
* reciprocal_divide(offset, cache->reciprocal_buffer_size)
|
|
*/
|
|
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
|
|
const struct page *page, void *obj)
|
|
{
|
|
u32 offset = (obj - page->s_mem);
|
|
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
|
|
}
|
|
|
|
#endif /* _LINUX_SLAB_DEF_H */
|