forked from luck/tmp_suning_uos_patched
slub: proper kmemleak tracking if CONFIG_SLUB_DEBUG disabled
Move all kmemleak calls into hook functions, and make it so that all hooks (both inside and outside of #ifdef CONFIG_SLUB_DEBUG) call the appropriate kmemleak routines. This allows for kmemleak to be configured independently of slub debug features. It also fixes a bug where kmemleak was only partially enabled in some configurations. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Roman Bobniev <Roman.Bobniev@sonymobile.com> Signed-off-by: Tim Bird <tim.bird@sonymobile.com> Signed-off-by: Pekka Enberg <penberg@iki.fi>
This commit is contained in:
parent
6e4664525b
commit
d56791b38e
35
mm/slub.c
35
mm/slub.c
|
@ -928,6 +928,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
|
|||
* Hooks for other subsystems that check memory allocations. In a typical
|
||||
* production configuration these hooks all should produce no code at all.
|
||||
*/
|
||||
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
|
||||
{
|
||||
kmemleak_alloc(ptr, size, 1, flags);
|
||||
}
|
||||
|
||||
static inline void kfree_hook(const void *x)
|
||||
{
|
||||
kmemleak_free(x);
|
||||
}
|
||||
|
||||
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
|
||||
{
|
||||
flags &= gfp_allowed_mask;
|
||||
|
@ -1253,13 +1263,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
|
|||
static inline void dec_slabs_node(struct kmem_cache *s, int node,
|
||||
int objects) {}
|
||||
|
||||
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
|
||||
{
|
||||
kmemleak_alloc(ptr, size, 1, flags);
|
||||
}
|
||||
|
||||
static inline void kfree_hook(const void *x)
|
||||
{
|
||||
kmemleak_free(x);
|
||||
}
|
||||
|
||||
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
|
||||
{ return 0; }
|
||||
|
||||
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
|
||||
void *object) {}
|
||||
void *object)
|
||||
{
|
||||
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
|
||||
flags & gfp_allowed_mask);
|
||||
}
|
||||
|
||||
static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
|
||||
static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
||||
{
|
||||
kmemleak_free_recursive(x, s->flags);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SLUB_DEBUG */
|
||||
|
||||
|
@ -3265,7 +3292,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
|
|||
if (page)
|
||||
ptr = page_address(page);
|
||||
|
||||
kmemleak_alloc(ptr, size, 1, flags);
|
||||
kmalloc_large_node_hook(ptr, size, flags);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
@ -3365,7 +3392,7 @@ void kfree(const void *x)
|
|||
page = virt_to_head_page(x);
|
||||
if (unlikely(!PageSlab(page))) {
|
||||
BUG_ON(!PageCompound(page));
|
||||
kmemleak_free(x);
|
||||
kfree_hook(x);
|
||||
__free_memcg_kmem_pages(page, compound_order(page));
|
||||
return;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user