forked from luck/tmp_suning_uos_patched
c6830c2260
commit 21a3c96
uses node_start/end_pfn(nid) for detection start/end
of nodes. But, it's not defined in linux/mmzone.h but defined in
/arch/???/include/mmzone.h which is included only under
CONFIG_NEED_MULTIPLE_NODES=y.
Then, we see
mm/page_cgroup.c: In function 'page_cgroup_init':
mm/page_cgroup.c:308: error: implicit declaration of function 'node_start_pfn'
mm/page_cgroup.c:309: error: implicit declaration of function 'node_end_pfn'
So, fixiing page_cgroup.c is an idea...
But node_start_pfn()/node_end_pfn() is a very generic macro and
should be implemented in the same manner for all archs.
(m32r has different implementation...)
This patch removes definitions of node_start/end_pfn() in each archs
and defines a unified one in linux/mmzone.h. It's not under
CONFIG_NEED_MULTIPLE_NODES, now.
A result of macro expansion is here (mm/page_cgroup.c)
for !NUMA
start_pfn = ((&contig_page_data)->node_start_pfn);
end_pfn = ({ pg_data_t *__pgdat = (&contig_page_data); __pgdat->node_start_pfn + __pgdat->node_spanned_pages;});
for NUMA (x86-64)
start_pfn = ((node_data[nid])->node_start_pfn);
end_pfn = ({ pg_data_t *__pgdat = (node_data[nid]); __pgdat->node_start_pfn + __pgdat->node_spanned_pages;});
Changelog:
- fixed to avoid using "nid" twice in node_end_pfn() macro.
Reported-and-acked-by: Randy Dunlap <randy.dunlap@oracle.com>
Reported-and-tested-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
67 lines
1.6 KiB
C
67 lines
1.6 KiB
C
#ifndef _PARISC_MMZONE_H
|
|
#define _PARISC_MMZONE_H
|
|
|
|
#ifdef CONFIG_DISCONTIGMEM
|
|
|
|
#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
|
|
extern int npmem_ranges;
|
|
|
|
struct node_map_data {
|
|
pg_data_t pg_data;
|
|
};
|
|
|
|
extern struct node_map_data node_data[];
|
|
|
|
#define NODE_DATA(nid) (&node_data[nid].pg_data)
|
|
|
|
/* We have these possible memory map layouts:
|
|
* Astro: 0-3.75, 67.75-68, 4-64
|
|
* zx1: 0-1, 257-260, 4-256
|
|
* Stretch (N-class): 0-2, 4-32, 34-xxx
|
|
*/
|
|
|
|
/* Since each 1GB can only belong to one region (node), we can create
|
|
* an index table for pfn to nid lookup; each entry in pfnnid_map
|
|
* represents 1GB, and contains the node that the memory belongs to. */
|
|
|
|
#define PFNNID_SHIFT (30 - PAGE_SHIFT)
|
|
#define PFNNID_MAP_MAX 512 /* support 512GB */
|
|
extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
|
|
|
|
#ifndef CONFIG_64BIT
|
|
#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
|
|
#else
|
|
/* io can be 0xf0f0f0f0f0xxxxxx or 0xfffffffff0000000 */
|
|
#define pfn_is_io(pfn) ((pfn & (0xf000000000000000UL >> PAGE_SHIFT)) == (0xf000000000000000UL >> PAGE_SHIFT))
|
|
#endif
|
|
|
|
static inline int pfn_to_nid(unsigned long pfn)
|
|
{
|
|
unsigned int i;
|
|
unsigned char r;
|
|
|
|
if (unlikely(pfn_is_io(pfn)))
|
|
return 0;
|
|
|
|
i = pfn >> PFNNID_SHIFT;
|
|
BUG_ON(i >= sizeof(pfnnid_map) / sizeof(pfnnid_map[0]));
|
|
r = pfnnid_map[i];
|
|
BUG_ON(r == 0xff);
|
|
|
|
return (int)r;
|
|
}
|
|
|
|
static inline int pfn_valid(int pfn)
|
|
{
|
|
int nid = pfn_to_nid(pfn);
|
|
|
|
if (nid >= 0)
|
|
return (pfn < node_end_pfn(nid));
|
|
return 0;
|
|
}
|
|
|
|
#else /* !CONFIG_DISCONTIGMEM */
|
|
#define MAX_PHYSMEM_RANGES 1
|
|
#endif
|
|
#endif /* _PARISC_MMZONE_H */
|