forked from luck/tmp_suning_uos_patched
[PATCH] mm/: make functions static
This patch makes the following needlessly global functions static: - slab.c: kmem_find_general_cachep() - swap.c: __page_cache_release() - vmalloc.c: __vmalloc_node() Signed-off-by: Adrian Bunk <bunk@stusta.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
204ec841fb
commit
b221385bc4
|
@ -318,8 +318,6 @@ static inline int get_page_unless_zero(struct page *page)
|
|||
return atomic_inc_not_zero(&page->_count);
|
||||
}
|
||||
|
||||
extern void FASTCALL(__page_cache_release(struct page *));
|
||||
|
||||
static inline int page_count(struct page *page)
|
||||
{
|
||||
if (unlikely(PageCompound(page)))
|
||||
|
|
|
@ -67,7 +67,6 @@ extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
|
|||
extern void kmem_cache_free(kmem_cache_t *, void *);
|
||||
extern unsigned int kmem_cache_size(kmem_cache_t *);
|
||||
extern const char *kmem_cache_name(kmem_cache_t *);
|
||||
extern kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags);
|
||||
|
||||
/* Size description struct for general caches. */
|
||||
struct cache_sizes {
|
||||
|
@ -223,7 +222,6 @@ extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
|
|||
/* SLOB allocator routines */
|
||||
|
||||
void kmem_cache_init(void);
|
||||
struct kmem_cache *kmem_find_general_cachep(size_t, gfp_t gfpflags);
|
||||
struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t,
|
||||
unsigned long,
|
||||
void (*)(void *, struct kmem_cache *, unsigned long),
|
||||
|
|
|
@ -44,8 +44,6 @@ extern void *vmalloc_32_user(unsigned long size);
|
|||
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
|
||||
extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
|
||||
pgprot_t prot);
|
||||
extern void *__vmalloc_node(unsigned long size, gfp_t gfp_mask,
|
||||
pgprot_t prot, int node);
|
||||
extern void vfree(void *addr);
|
||||
|
||||
extern void *vmap(struct page **pages, unsigned int count,
|
||||
|
|
|
@ -768,11 +768,10 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
|
|||
return csizep->cs_cachep;
|
||||
}
|
||||
|
||||
struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
|
||||
static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
|
||||
{
|
||||
return __find_general_cachep(size, gfpflags);
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_find_general_cachep);
|
||||
|
||||
static size_t slab_mgmt_size(size_t nr_objs, size_t align)
|
||||
{
|
||||
|
|
39
mm/swap.c
39
mm/swap.c
|
@ -34,6 +34,25 @@
|
|||
/* How many pages do we try to swap or page in/out together? */
|
||||
int page_cluster;
|
||||
|
||||
/*
|
||||
* This path almost never happens for VM activity - pages are normally
|
||||
* freed via pagevecs. But it gets used by networking.
|
||||
*/
|
||||
static void fastcall __page_cache_release(struct page *page)
|
||||
{
|
||||
if (PageLRU(page)) {
|
||||
unsigned long flags;
|
||||
struct zone *zone = page_zone(page);
|
||||
|
||||
spin_lock_irqsave(&zone->lru_lock, flags);
|
||||
VM_BUG_ON(!PageLRU(page));
|
||||
__ClearPageLRU(page);
|
||||
del_page_from_lru(zone, page);
|
||||
spin_unlock_irqrestore(&zone->lru_lock, flags);
|
||||
}
|
||||
free_hot_page(page);
|
||||
}
|
||||
|
||||
static void put_compound_page(struct page *page)
|
||||
{
|
||||
page = (struct page *)page_private(page);
|
||||
|
@ -222,26 +241,6 @@ int lru_add_drain_all(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This path almost never happens for VM activity - pages are normally
|
||||
* freed via pagevecs. But it gets used by networking.
|
||||
*/
|
||||
void fastcall __page_cache_release(struct page *page)
|
||||
{
|
||||
if (PageLRU(page)) {
|
||||
unsigned long flags;
|
||||
struct zone *zone = page_zone(page);
|
||||
|
||||
spin_lock_irqsave(&zone->lru_lock, flags);
|
||||
VM_BUG_ON(!PageLRU(page));
|
||||
__ClearPageLRU(page);
|
||||
del_page_from_lru(zone, page);
|
||||
spin_unlock_irqrestore(&zone->lru_lock, flags);
|
||||
}
|
||||
free_hot_page(page);
|
||||
}
|
||||
EXPORT_SYMBOL(__page_cache_release);
|
||||
|
||||
/*
|
||||
* Batched page_cache_release(). Decrement the reference count on all the
|
||||
* passed pages. If it fell to zero then remove the page from the LRU and
|
||||
|
|
|
@ -24,6 +24,9 @@
|
|||
DEFINE_RWLOCK(vmlist_lock);
|
||||
struct vm_struct *vmlist;
|
||||
|
||||
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
|
||||
int node);
|
||||
|
||||
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
|
||||
{
|
||||
pte_t *pte;
|
||||
|
@ -478,8 +481,8 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
|
|||
* allocator with @gfp_mask flags. Map them into contiguous
|
||||
* kernel virtual space, using a pagetable protection of @prot.
|
||||
*/
|
||||
void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
|
||||
int node)
|
||||
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
|
||||
int node)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
|
||||
|
@ -493,7 +496,6 @@ void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
|
|||
|
||||
return __vmalloc_area_node(area, gfp_mask, prot, node);
|
||||
}
|
||||
EXPORT_SYMBOL(__vmalloc_node);
|
||||
|
||||
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue
Block a user