mm: rename global_page_state to global_zone_page_state
global_page_state is error prone as a recent bug report pointed out [1]. It only returns proper values for zone based counters as the enum it gets suggests. We already have global_node_page_state so let's rename global_page_state to global_zone_page_state to be more explicit here. All existing users seems to be correct: $ git grep "global_page_state(NR_" | sed 's@.*(\(NR_[A-Z_]*\)).*@\1@' | sort | uniq -c 2 NR_BOUNCE 2 NR_FREE_CMA_PAGES 11 NR_FREE_PAGES 1 NR_KERNEL_STACK_KB 1 NR_MLOCK 2 NR_PAGETABLE This patch shouldn't introduce any functional change. [1] http://lkml.kernel.org/r/201707260628.v6Q6SmaS030814@www262.sakura.ne.jp Link: http://lkml.kernel.org/r/20170801134256.5400-2-hannes@cmpxchg.org Signed-off-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp> Cc: Josef Bacik <josef@toxicpanda.com> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
4da243ac1c
commit
c41f012ade
|
@ -80,7 +80,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
|
||||||
show_val_kb(m, "Active(file): ", pages[LRU_ACTIVE_FILE]);
|
show_val_kb(m, "Active(file): ", pages[LRU_ACTIVE_FILE]);
|
||||||
show_val_kb(m, "Inactive(file): ", pages[LRU_INACTIVE_FILE]);
|
show_val_kb(m, "Inactive(file): ", pages[LRU_INACTIVE_FILE]);
|
||||||
show_val_kb(m, "Unevictable: ", pages[LRU_UNEVICTABLE]);
|
show_val_kb(m, "Unevictable: ", pages[LRU_UNEVICTABLE]);
|
||||||
show_val_kb(m, "Mlocked: ", global_page_state(NR_MLOCK));
|
show_val_kb(m, "Mlocked: ", global_zone_page_state(NR_MLOCK));
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
show_val_kb(m, "HighTotal: ", i.totalhigh);
|
show_val_kb(m, "HighTotal: ", i.totalhigh);
|
||||||
|
@ -114,9 +114,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
|
||||||
show_val_kb(m, "SUnreclaim: ",
|
show_val_kb(m, "SUnreclaim: ",
|
||||||
global_node_page_state(NR_SLAB_UNRECLAIMABLE));
|
global_node_page_state(NR_SLAB_UNRECLAIMABLE));
|
||||||
seq_printf(m, "KernelStack: %8lu kB\n",
|
seq_printf(m, "KernelStack: %8lu kB\n",
|
||||||
global_page_state(NR_KERNEL_STACK_KB));
|
global_zone_page_state(NR_KERNEL_STACK_KB));
|
||||||
show_val_kb(m, "PageTables: ",
|
show_val_kb(m, "PageTables: ",
|
||||||
global_page_state(NR_PAGETABLE));
|
global_zone_page_state(NR_PAGETABLE));
|
||||||
#ifdef CONFIG_QUICKLIST
|
#ifdef CONFIG_QUICKLIST
|
||||||
show_val_kb(m, "Quicklists: ", quicklist_total_size());
|
show_val_kb(m, "Quicklists: ", quicklist_total_size());
|
||||||
#endif
|
#endif
|
||||||
|
@ -124,7 +124,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
|
||||||
show_val_kb(m, "NFS_Unstable: ",
|
show_val_kb(m, "NFS_Unstable: ",
|
||||||
global_node_page_state(NR_UNSTABLE_NFS));
|
global_node_page_state(NR_UNSTABLE_NFS));
|
||||||
show_val_kb(m, "Bounce: ",
|
show_val_kb(m, "Bounce: ",
|
||||||
global_page_state(NR_BOUNCE));
|
global_zone_page_state(NR_BOUNCE));
|
||||||
show_val_kb(m, "WritebackTmp: ",
|
show_val_kb(m, "WritebackTmp: ",
|
||||||
global_node_page_state(NR_WRITEBACK_TEMP));
|
global_node_page_state(NR_WRITEBACK_TEMP));
|
||||||
show_val_kb(m, "CommitLimit: ", vm_commit_limit());
|
show_val_kb(m, "CommitLimit: ", vm_commit_limit());
|
||||||
|
@ -151,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
|
||||||
#ifdef CONFIG_CMA
|
#ifdef CONFIG_CMA
|
||||||
show_val_kb(m, "CmaTotal: ", totalcma_pages);
|
show_val_kb(m, "CmaTotal: ", totalcma_pages);
|
||||||
show_val_kb(m, "CmaFree: ",
|
show_val_kb(m, "CmaFree: ",
|
||||||
global_page_state(NR_FREE_CMA_PAGES));
|
global_zone_page_state(NR_FREE_CMA_PAGES));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
hugetlb_report_meminfo(m);
|
hugetlb_report_meminfo(m);
|
||||||
|
|
|
@ -263,8 +263,8 @@ extern unsigned long totalreserve_pages;
|
||||||
extern unsigned long nr_free_buffer_pages(void);
|
extern unsigned long nr_free_buffer_pages(void);
|
||||||
extern unsigned long nr_free_pagecache_pages(void);
|
extern unsigned long nr_free_pagecache_pages(void);
|
||||||
|
|
||||||
/* Definition of global_page_state not available yet */
|
/* Definition of global_zone_page_state not available yet */
|
||||||
#define nr_free_pages() global_page_state(NR_FREE_PAGES)
|
#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
|
||||||
|
|
||||||
|
|
||||||
/* linux/mm/swap.c */
|
/* linux/mm/swap.c */
|
||||||
|
|
|
@ -123,7 +123,7 @@ static inline void node_page_state_add(long x, struct pglist_data *pgdat,
|
||||||
atomic_long_add(x, &vm_node_stat[item]);
|
atomic_long_add(x, &vm_node_stat[item]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long global_page_state(enum zone_stat_item item)
|
static inline unsigned long global_zone_page_state(enum zone_stat_item item)
|
||||||
{
|
{
|
||||||
long x = atomic_long_read(&vm_zone_stat[item]);
|
long x = atomic_long_read(&vm_zone_stat[item]);
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
@ -199,7 +199,7 @@ extern unsigned long sum_zone_node_page_state(int node,
|
||||||
extern unsigned long node_page_state(struct pglist_data *pgdat,
|
extern unsigned long node_page_state(struct pglist_data *pgdat,
|
||||||
enum node_stat_item item);
|
enum node_stat_item item);
|
||||||
#else
|
#else
|
||||||
#define sum_zone_node_page_state(node, item) global_page_state(item)
|
#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
|
||||||
#define node_page_state(node, item) global_node_page_state(item)
|
#define node_page_state(node, item) global_node_page_state(item)
|
||||||
#endif /* CONFIG_NUMA */
|
#endif /* CONFIG_NUMA */
|
||||||
|
|
||||||
|
|
|
@ -3514,7 +3514,7 @@ static int init_user_reserve(void)
|
||||||
{
|
{
|
||||||
unsigned long free_kbytes;
|
unsigned long free_kbytes;
|
||||||
|
|
||||||
free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
|
free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
|
||||||
|
|
||||||
sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
|
sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3535,7 +3535,7 @@ static int init_admin_reserve(void)
|
||||||
{
|
{
|
||||||
unsigned long free_kbytes;
|
unsigned long free_kbytes;
|
||||||
|
|
||||||
free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
|
free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
|
||||||
|
|
||||||
sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
|
sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3579,7 +3579,7 @@ static int reserve_mem_notifier(struct notifier_block *nb,
|
||||||
|
|
||||||
break;
|
break;
|
||||||
case MEM_OFFLINE:
|
case MEM_OFFLINE:
|
||||||
free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
|
free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
|
||||||
|
|
||||||
if (sysctl_user_reserve_kbytes > free_kbytes) {
|
if (sysctl_user_reserve_kbytes > free_kbytes) {
|
||||||
init_user_reserve();
|
init_user_reserve();
|
||||||
|
|
|
@ -1962,7 +1962,7 @@ static int __meminit init_user_reserve(void)
|
||||||
{
|
{
|
||||||
unsigned long free_kbytes;
|
unsigned long free_kbytes;
|
||||||
|
|
||||||
free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
|
free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
|
||||||
|
|
||||||
sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
|
sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1983,7 +1983,7 @@ static int __meminit init_admin_reserve(void)
|
||||||
{
|
{
|
||||||
unsigned long free_kbytes;
|
unsigned long free_kbytes;
|
||||||
|
|
||||||
free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
|
free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
|
||||||
|
|
||||||
sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
|
sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -363,7 +363,7 @@ static unsigned long global_dirtyable_memory(void)
|
||||||
{
|
{
|
||||||
unsigned long x;
|
unsigned long x;
|
||||||
|
|
||||||
x = global_page_state(NR_FREE_PAGES);
|
x = global_zone_page_state(NR_FREE_PAGES);
|
||||||
/*
|
/*
|
||||||
* Pages reserved for the kernel should not be considered
|
* Pages reserved for the kernel should not be considered
|
||||||
* dirtyable, to prevent a situation where reclaim has to
|
* dirtyable, to prevent a situation where reclaim has to
|
||||||
|
@ -1405,7 +1405,7 @@ void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time)
|
||||||
* will look to see if it needs to start dirty throttling.
|
* will look to see if it needs to start dirty throttling.
|
||||||
*
|
*
|
||||||
* If dirty_poll_interval is too low, big NUMA machines will call the expensive
|
* If dirty_poll_interval is too low, big NUMA machines will call the expensive
|
||||||
* global_page_state() too often. So scale it near-sqrt to the safety margin
|
* global_zone_page_state() too often. So scale it near-sqrt to the safety margin
|
||||||
* (the number of pages we may dirty without exceeding the dirty limits).
|
* (the number of pages we may dirty without exceeding the dirty limits).
|
||||||
*/
|
*/
|
||||||
static unsigned long dirty_poll_interval(unsigned long dirty,
|
static unsigned long dirty_poll_interval(unsigned long dirty,
|
||||||
|
|
|
@ -4509,7 +4509,7 @@ long si_mem_available(void)
|
||||||
* Estimate the amount of memory available for userspace allocations,
|
* Estimate the amount of memory available for userspace allocations,
|
||||||
* without causing swapping.
|
* without causing swapping.
|
||||||
*/
|
*/
|
||||||
available = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
|
available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Not all the page cache can be freed, otherwise the system will
|
* Not all the page cache can be freed, otherwise the system will
|
||||||
|
@ -4538,7 +4538,7 @@ void si_meminfo(struct sysinfo *val)
|
||||||
{
|
{
|
||||||
val->totalram = totalram_pages;
|
val->totalram = totalram_pages;
|
||||||
val->sharedram = global_node_page_state(NR_SHMEM);
|
val->sharedram = global_node_page_state(NR_SHMEM);
|
||||||
val->freeram = global_page_state(NR_FREE_PAGES);
|
val->freeram = global_zone_page_state(NR_FREE_PAGES);
|
||||||
val->bufferram = nr_blockdev_pages();
|
val->bufferram = nr_blockdev_pages();
|
||||||
val->totalhigh = totalhigh_pages;
|
val->totalhigh = totalhigh_pages;
|
||||||
val->freehigh = nr_free_highpages();
|
val->freehigh = nr_free_highpages();
|
||||||
|
@ -4673,11 +4673,11 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
|
||||||
global_node_page_state(NR_SLAB_UNRECLAIMABLE),
|
global_node_page_state(NR_SLAB_UNRECLAIMABLE),
|
||||||
global_node_page_state(NR_FILE_MAPPED),
|
global_node_page_state(NR_FILE_MAPPED),
|
||||||
global_node_page_state(NR_SHMEM),
|
global_node_page_state(NR_SHMEM),
|
||||||
global_page_state(NR_PAGETABLE),
|
global_zone_page_state(NR_PAGETABLE),
|
||||||
global_page_state(NR_BOUNCE),
|
global_zone_page_state(NR_BOUNCE),
|
||||||
global_page_state(NR_FREE_PAGES),
|
global_zone_page_state(NR_FREE_PAGES),
|
||||||
free_pcp,
|
free_pcp,
|
||||||
global_page_state(NR_FREE_CMA_PAGES));
|
global_zone_page_state(NR_FREE_CMA_PAGES));
|
||||||
|
|
||||||
for_each_online_pgdat(pgdat) {
|
for_each_online_pgdat(pgdat) {
|
||||||
if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
|
if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
|
||||||
|
|
|
@ -614,7 +614,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
|
if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
|
||||||
free = global_page_state(NR_FREE_PAGES);
|
free = global_zone_page_state(NR_FREE_PAGES);
|
||||||
free += global_node_page_state(NR_FILE_PAGES);
|
free += global_node_page_state(NR_FILE_PAGES);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1502,7 +1502,7 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
|
||||||
if (!v)
|
if (!v)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
|
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
|
||||||
v[i] = global_page_state(i);
|
v[i] = global_zone_page_state(i);
|
||||||
v += NR_VM_ZONE_STAT_ITEMS;
|
v += NR_VM_ZONE_STAT_ITEMS;
|
||||||
|
|
||||||
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
|
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
|
||||||
|
@ -1591,7 +1591,7 @@ int vmstat_refresh(struct ctl_table *table, int write,
|
||||||
* which can equally be echo'ed to or cat'ted from (by root),
|
* which can equally be echo'ed to or cat'ted from (by root),
|
||||||
* can be used to update the stats just before reading them.
|
* can be used to update the stats just before reading them.
|
||||||
*
|
*
|
||||||
* Oh, and since global_page_state() etc. are so careful to hide
|
* Oh, and since global_zone_page_state() etc. are so careful to hide
|
||||||
* transiently negative values, report an error here if any of
|
* transiently negative values, report an error here if any of
|
||||||
* the stats is negative, so we know to go looking for imbalance.
|
* the stats is negative, so we know to go looking for imbalance.
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in New Issue
Block a user