forked from luck/tmp_suning_uos_patched
ksm: keep quiet while list empty
ksm_scan_thread already sleeps in wait_event_interruptible until setting ksm_run activates it; but if there's nothing on its list to look at, i.e. nobody has yet said madvise MADV_MERGEABLE, it's a shame to be clocking up system time and full_scans: ksmd_should_run added to check that too. And move the mutex_lock out around it: the new counts showed that when ksm_run is stopped, a little work often got done afterwards, because it had been read before taking the mutex. Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Acked-by: Izik Eidus <ieidus@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
26465d3ea5
commit
6e15838425
28
mm/ksm.c
28
mm/ksm.c
|
@ -1280,21 +1280,27 @@ static void ksm_do_scan(unsigned int scan_npages)
|
|||
}
|
||||
}
|
||||
|
||||
static int ksmd_should_run(void)
|
||||
{
|
||||
return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
|
||||
}
|
||||
|
||||
static int ksm_scan_thread(void *nothing)
|
||||
{
|
||||
set_user_nice(current, 5);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
if (ksm_run & KSM_RUN_MERGE) {
|
||||
mutex_lock(&ksm_thread_mutex);
|
||||
mutex_lock(&ksm_thread_mutex);
|
||||
if (ksmd_should_run())
|
||||
ksm_do_scan(ksm_thread_pages_to_scan);
|
||||
mutex_unlock(&ksm_thread_mutex);
|
||||
mutex_unlock(&ksm_thread_mutex);
|
||||
|
||||
if (ksmd_should_run()) {
|
||||
schedule_timeout_interruptible(
|
||||
msecs_to_jiffies(ksm_thread_sleep_millisecs));
|
||||
} else {
|
||||
wait_event_interruptible(ksm_thread_wait,
|
||||
(ksm_run & KSM_RUN_MERGE) ||
|
||||
kthread_should_stop());
|
||||
ksmd_should_run() || kthread_should_stop());
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -1339,10 +1345,16 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
|
|||
|
||||
int __ksm_enter(struct mm_struct *mm)
|
||||
{
|
||||
struct mm_slot *mm_slot = alloc_mm_slot();
|
||||
struct mm_slot *mm_slot;
|
||||
int needs_wakeup;
|
||||
|
||||
mm_slot = alloc_mm_slot();
|
||||
if (!mm_slot)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Check ksm_run too? Would need tighter locking */
|
||||
needs_wakeup = list_empty(&ksm_mm_head.mm_list);
|
||||
|
||||
spin_lock(&ksm_mmlist_lock);
|
||||
insert_to_mm_slots_hash(mm, mm_slot);
|
||||
/*
|
||||
|
@ -1354,6 +1366,10 @@ int __ksm_enter(struct mm_struct *mm)
|
|||
spin_unlock(&ksm_mmlist_lock);
|
||||
|
||||
set_bit(MMF_VM_MERGEABLE, &mm->flags);
|
||||
|
||||
if (needs_wakeup)
|
||||
wake_up_interruptible(&ksm_thread_wait);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user