forked from luck/tmp_suning_uos_patched
memcg: correctly order reading PCG_USED and pc->mem_cgroup
The placement of the read-side barrier is confused: the writer first sets pc->mem_cgroup, then PCG_USED. The read-side barrier has to be between testing PCG_USED and reading pc->mem_cgroup. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
2550326ac7
commit
713735b423
|
@ -836,13 +836,12 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
|
|||
return;
|
||||
|
||||
pc = lookup_page_cgroup(page);
|
||||
/*
|
||||
* Used bit is set without atomic ops but after smp_wmb().
|
||||
* For making pc->mem_cgroup visible, insert smp_rmb() here.
|
||||
*/
|
||||
smp_rmb();
|
||||
/* unused or root page is not rotated. */
|
||||
if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup))
|
||||
if (!PageCgroupUsed(pc))
|
||||
return;
|
||||
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
|
||||
smp_rmb();
|
||||
if (mem_cgroup_is_root(pc->mem_cgroup))
|
||||
return;
|
||||
mz = page_cgroup_zoneinfo(pc);
|
||||
list_move(&pc->lru, &mz->lists[lru]);
|
||||
|
@ -857,14 +856,10 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
|
|||
return;
|
||||
pc = lookup_page_cgroup(page);
|
||||
VM_BUG_ON(PageCgroupAcctLRU(pc));
|
||||
/*
|
||||
* Used bit is set without atomic ops but after smp_wmb().
|
||||
* For making pc->mem_cgroup visible, insert smp_rmb() here.
|
||||
*/
|
||||
smp_rmb();
|
||||
if (!PageCgroupUsed(pc))
|
||||
return;
|
||||
|
||||
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
|
||||
smp_rmb();
|
||||
mz = page_cgroup_zoneinfo(pc);
|
||||
/* huge page split is done under lru_lock. so, we have no races. */
|
||||
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
|
||||
|
@ -1031,14 +1026,10 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page)
|
|||
return NULL;
|
||||
|
||||
pc = lookup_page_cgroup(page);
|
||||
/*
|
||||
* Used bit is set without atomic ops but after smp_wmb().
|
||||
* For making pc->mem_cgroup visible, insert smp_rmb() here.
|
||||
*/
|
||||
smp_rmb();
|
||||
if (!PageCgroupUsed(pc))
|
||||
return NULL;
|
||||
|
||||
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
|
||||
smp_rmb();
|
||||
mz = page_cgroup_zoneinfo(pc);
|
||||
if (!mz)
|
||||
return NULL;
|
||||
|
|
Loading…
Reference in New Issue
Block a user