forked from luck/tmp_suning_uos_patched
Merge branch 'pm-freezer' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc into pm-freezer
* 'pm-freezer' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc: (24 commits) freezer: fix wait_event_freezable/__thaw_task races freezer: kill unused set_freezable_with_signal() dmatest: don't use set_freezable_with_signal() usb_storage: don't use set_freezable_with_signal() freezer: remove unused @sig_only from freeze_task() freezer: use lock_task_sighand() in fake_signal_wake_up() freezer: restructure __refrigerator() freezer: fix set_freezable[_with_signal]() race freezer: remove should_send_signal() and update frozen() freezer: remove now unused TIF_FREEZE freezer: make freezing() test freeze conditions in effect instead of TIF_FREEZE cgroup_freezer: prepare for removal of TIF_FREEZE freezer: clean up freeze_processes() failure path freezer: kill PF_FREEZING freezer: test freezable conditions while holding freezer_lock freezer: make freezing indicate freeze condition in effect freezer: use dedicated lock instead of task_lock() + memory barrier freezer: don't distinguish nosig tasks on thaw freezer: remove racy clear_freeze_flag() and set PF_NOFREEZE on dead tasks freezer: rename thaw_process() to __thaw_task() and simplify the implementation ...
This commit is contained in:
commit
986b11c3ee
@ -21,7 +21,7 @@ freeze_processes() (defined in kernel/power/process.c) is called. It executes
|
||||
try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
|
||||
either wakes them up, if they are kernel threads, or sends fake signals to them,
|
||||
if they are user space processes. A task that has TIF_FREEZE set, should react
|
||||
to it by calling the function called refrigerator() (defined in
|
||||
to it by calling the function called __refrigerator() (defined in
|
||||
kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
|
||||
to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
|
||||
Then, we say that the task is 'frozen' and therefore the set of functions
|
||||
@ -29,10 +29,10 @@ handling this mechanism is referred to as 'the freezer' (these functions are
|
||||
defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
|
||||
User space processes are generally frozen before kernel threads.
|
||||
|
||||
It is not recommended to call refrigerator() directly. Instead, it is
|
||||
recommended to use the try_to_freeze() function (defined in
|
||||
include/linux/freezer.h), that checks the task's TIF_FREEZE flag and makes the
|
||||
task enter refrigerator() if the flag is set.
|
||||
__refrigerator() must not be called directly. Instead, use the
|
||||
try_to_freeze() function (defined in include/linux/freezer.h), that checks
|
||||
the task's TIF_FREEZE flag and makes the task enter __refrigerator() if the
|
||||
flag is set.
|
||||
|
||||
For user space processes try_to_freeze() is called automatically from the
|
||||
signal-handling code, but the freezable kernel threads need to call it
|
||||
@ -61,13 +61,13 @@ wait_event_freezable() and wait_event_freezable_timeout() macros.
|
||||
After the system memory state has been restored from a hibernation image and
|
||||
devices have been reinitialized, the function thaw_processes() is called in
|
||||
order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that
|
||||
have been frozen leave refrigerator() and continue running.
|
||||
have been frozen leave __refrigerator() and continue running.
|
||||
|
||||
III. Which kernel threads are freezable?
|
||||
|
||||
Kernel threads are not freezable by default. However, a kernel thread may clear
|
||||
PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE
|
||||
directly is strongly discouraged). From this point it is regarded as freezable
|
||||
directly is not allowed). From this point it is regarded as freezable
|
||||
and must call try_to_freeze() in a suitable place.
|
||||
|
||||
IV. Why do we do that?
|
||||
|
@ -79,7 +79,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
|
||||
#define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */
|
||||
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */
|
||||
#define TIF_FREEZE 16 /* is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
@ -87,7 +86,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
/* Work to do on interrupt/exception return. */
|
||||
#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
||||
|
@ -142,7 +142,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
|
||||
#define TIF_POLLING_NRFLAG 16
|
||||
#define TIF_USING_IWMMXT 17
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 19
|
||||
#define TIF_RESTORE_SIGMASK 20
|
||||
#define TIF_SECCOMP 21
|
||||
|
||||
@ -152,7 +151,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
|
||||
|
@ -85,7 +85,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */
|
||||
#define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */
|
||||
#define TIF_NOTIFY_RESUME 9 /* callback before returning to user */
|
||||
#define TIF_FREEZE 29
|
||||
#define TIF_DEBUG 30 /* debugging enabled */
|
||||
#define TIF_USERSPACE 31 /* true if FS sets userspace */
|
||||
|
||||
@ -98,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
|
||||
/* Note: The masks below must never span more than 16 bits! */
|
||||
|
||||
|
@ -100,7 +100,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
||||
#define TIF_FREEZE 6 /* is freezing for suspend */
|
||||
#define TIF_IRQ_SYNC 7 /* sync pipeline stage */
|
||||
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
|
||||
#define TIF_SINGLESTEP 9
|
||||
@ -111,7 +110,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
#define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
|
||||
|
@ -86,7 +86,6 @@ struct thread_info {
|
||||
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
|
||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 18 /* is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
@ -94,7 +93,6 @@ struct thread_info {
|
||||
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||
|
@ -111,7 +111,6 @@ register struct thread_info *__current_thread_info asm("gr15");
|
||||
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 18 /* freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
@ -120,7 +119,6 @@ register struct thread_info *__current_thread_info asm("gr15");
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
|
||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||
|
@ -90,7 +90,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
||||
#define TIF_NOTIFY_RESUME 6 /* callback before returning to user */
|
||||
#define TIF_FREEZE 16 /* is freezing for suspend */
|
||||
|
||||
/* as above, but as bit values */
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
@ -99,7 +98,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||
|
||||
|
@ -113,7 +113,6 @@ struct thread_info {
|
||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||
#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
|
||||
#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
|
||||
#define TIF_FREEZE 20 /* is freezing for suspend */
|
||||
#define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
@ -126,7 +125,6 @@ struct thread_info {
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
|
||||
#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE)
|
||||
|
||||
/* "work to do on user-return" bits */
|
||||
|
@ -138,7 +138,6 @@ static inline unsigned int get_thread_fault_code(void)
|
||||
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
|
||||
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 19 /* is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
@ -149,7 +148,6 @@ static inline unsigned int get_thread_fault_code(void)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||
|
@ -103,7 +103,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_DELAYED_TRACE 14 /* single step a syscall */
|
||||
#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
|
||||
#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 17 /* thread is freezing for suspend */
|
||||
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal */
|
||||
|
||||
#endif /* _ASM_M68K_THREAD_INFO_H */
|
||||
|
@ -125,7 +125,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_MEMDIE 6 /* is terminating due to OOM killer */
|
||||
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
|
||||
#define TIF_SECCOMP 10 /* secure computing */
|
||||
#define TIF_FREEZE 14 /* Freezing for suspend */
|
||||
|
||||
/* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_POLLING_NRFLAG 16
|
||||
@ -137,7 +136,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_IRET (1 << TIF_IRET)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
|
||||
|
@ -117,7 +117,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
|
||||
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
|
||||
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 19
|
||||
#define TIF_FIXADE 20 /* Fix address errors in software */
|
||||
#define TIF_LOGADE 21 /* Log address errors to syslog */
|
||||
#define TIF_32BIT_REGS 22 /* also implies 16/32 fprs */
|
||||
@ -141,7 +140,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
#define _TIF_FIXADE (1<<TIF_FIXADE)
|
||||
#define _TIF_LOGADE (1<<TIF_LOGADE)
|
||||
#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS)
|
||||
|
@ -165,7 +165,6 @@ extern void free_thread_info(struct thread_info *);
|
||||
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 18 /* freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE +(1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_NOTIFY_RESUME +(1 << TIF_NOTIFY_RESUME)
|
||||
@ -174,7 +173,6 @@ extern void free_thread_info(struct thread_info *);
|
||||
#define _TIF_SINGLESTEP +(1 << TIF_SINGLESTEP)
|
||||
#define _TIF_RESTORE_SIGMASK +(1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_POLLING_NRFLAG +(1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE +(1 << TIF_FREEZE)
|
||||
|
||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||
|
@ -58,7 +58,6 @@ struct thread_info {
|
||||
#define TIF_32BIT 4 /* 32 bit binary */
|
||||
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
|
||||
#define TIF_FREEZE 7 /* is freezing for suspend */
|
||||
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
|
||||
#define TIF_SINGLESTEP 9 /* single stepping? */
|
||||
#define TIF_BLOCKSTEP 10 /* branch stepping? */
|
||||
@ -69,7 +68,6 @@ struct thread_info {
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_32BIT (1 << TIF_32BIT)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
|
||||
|
@ -109,7 +109,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
|
||||
#define TIF_NOERROR 12 /* Force successful syscall return */
|
||||
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
|
||||
#define TIF_FREEZE 14 /* Freezing for suspend */
|
||||
#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
|
||||
#define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
|
||||
|
||||
@ -127,7 +126,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
|
||||
#define _TIF_NOERROR (1<<TIF_NOERROR)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
|
||||
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
||||
|
@ -102,7 +102,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
|
||||
#define TIF_SINGLE_STEP 20 /* This task is single stepped */
|
||||
#define TIF_FREEZE 21 /* thread is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL (1<<TIF_SYSCALL)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
@ -119,7 +118,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_31BIT (1<<TIF_31BIT)
|
||||
#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define is_32bit_task() (test_thread_flag(TIF_31BIT))
|
||||
|
@ -122,7 +122,6 @@ extern void init_thread_xstate(void);
|
||||
#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
|
||||
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 19 /* Freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
@ -133,7 +132,6 @@ extern void init_thread_xstate(void);
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
|
||||
/*
|
||||
* _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we
|
||||
|
@ -133,7 +133,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
|
||||
#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
|
||||
* TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 10 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 11 /* is freezing for suspend */
|
||||
|
||||
/* as above, but as bit values */
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
@ -147,7 +146,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
|
||||
#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \
|
||||
_TIF_SIGPENDING | \
|
||||
_TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
@ -225,7 +225,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
|
||||
/* flag bit 12 is available */
|
||||
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
|
||||
#define TIF_POLLING_NRFLAG 14
|
||||
#define TIF_FREEZE 15 /* is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
@ -237,7 +236,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
|
||||
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
|
||||
_TIF_DO_NOTIFY_RESUME_MASK | \
|
||||
|
@ -71,7 +71,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
||||
#define TIF_SYSCALL_AUDIT 6
|
||||
#define TIF_RESTORE_SIGMASK 7
|
||||
#define TIF_FREEZE 16 /* is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
@ -80,6 +79,5 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
|
||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
|
||||
#endif
|
||||
|
@ -135,14 +135,12 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
|
||||
#define TIF_SYSCALL_TRACE 8
|
||||
#define TIF_MEMDIE 18
|
||||
#define TIF_FREEZE 19
|
||||
#define TIF_RESTORE_SIGMASK 20
|
||||
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
|
||||
/*
|
||||
|
@ -90,7 +90,6 @@ struct thread_info {
|
||||
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
|
||||
#define TIF_DEBUG 21 /* uses debug registers */
|
||||
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
|
||||
#define TIF_FREEZE 23 /* is freezing for suspend */
|
||||
#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
|
||||
#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
|
||||
#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
|
||||
@ -112,7 +111,6 @@ struct thread_info {
|
||||
#define _TIF_FORK (1 << TIF_FORK)
|
||||
#define _TIF_DEBUG (1 << TIF_DEBUG)
|
||||
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
|
||||
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
|
||||
#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
|
||||
|
@ -132,7 +132,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
|
||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_FREEZE 17 /* is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
@ -141,7 +140,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_IRET (1<<TIF_IRET)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||
|
@ -475,8 +475,6 @@ static int btmrvl_service_main_thread(void *data)
|
||||
|
||||
init_waitqueue_entry(&wait, current);
|
||||
|
||||
current->flags |= PF_NOFREEZE;
|
||||
|
||||
for (;;) {
|
||||
add_wait_queue(&thread->wait_q, &wait);
|
||||
|
||||
|
@ -214,9 +214,18 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
|
||||
return error_count;
|
||||
}
|
||||
|
||||
static void dmatest_callback(void *completion)
|
||||
/* poor man's completion - we want to use wait_event_freezable() on it */
|
||||
struct dmatest_done {
|
||||
bool done;
|
||||
wait_queue_head_t *wait;
|
||||
};
|
||||
|
||||
static void dmatest_callback(void *arg)
|
||||
{
|
||||
complete(completion);
|
||||
struct dmatest_done *done = arg;
|
||||
|
||||
done->done = true;
|
||||
wake_up_all(done->wait);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -235,7 +244,9 @@ static void dmatest_callback(void *completion)
|
||||
*/
|
||||
static int dmatest_func(void *data)
|
||||
{
|
||||
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
|
||||
struct dmatest_thread *thread = data;
|
||||
struct dmatest_done done = { .wait = &done_wait };
|
||||
struct dma_chan *chan;
|
||||
const char *thread_name;
|
||||
unsigned int src_off, dst_off, len;
|
||||
@ -252,7 +263,7 @@ static int dmatest_func(void *data)
|
||||
int i;
|
||||
|
||||
thread_name = current->comm;
|
||||
set_freezable_with_signal();
|
||||
set_freezable();
|
||||
|
||||
ret = -ENOMEM;
|
||||
|
||||
@ -306,9 +317,6 @@ static int dmatest_func(void *data)
|
||||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
dma_addr_t dma_srcs[src_cnt];
|
||||
dma_addr_t dma_dsts[dst_cnt];
|
||||
struct completion cmp;
|
||||
unsigned long start, tmo, end = 0 /* compiler... */;
|
||||
bool reload = true;
|
||||
u8 align = 0;
|
||||
|
||||
total_tests++;
|
||||
@ -391,9 +399,9 @@ static int dmatest_func(void *data)
|
||||
continue;
|
||||
}
|
||||
|
||||
init_completion(&cmp);
|
||||
done.done = false;
|
||||
tx->callback = dmatest_callback;
|
||||
tx->callback_param = &cmp;
|
||||
tx->callback_param = &done;
|
||||
cookie = tx->tx_submit(tx);
|
||||
|
||||
if (dma_submit_error(cookie)) {
|
||||
@ -407,20 +415,20 @@ static int dmatest_func(void *data)
|
||||
}
|
||||
dma_async_issue_pending(chan);
|
||||
|
||||
do {
|
||||
start = jiffies;
|
||||
if (reload)
|
||||
end = start + msecs_to_jiffies(timeout);
|
||||
else if (end <= start)
|
||||
end = start + 1;
|
||||
tmo = wait_for_completion_interruptible_timeout(&cmp,
|
||||
end - start);
|
||||
reload = try_to_freeze();
|
||||
} while (tmo == -ERESTARTSYS);
|
||||
wait_event_freezable_timeout(done_wait, done.done,
|
||||
msecs_to_jiffies(timeout));
|
||||
|
||||
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
|
||||
|
||||
if (tmo == 0) {
|
||||
if (!done.done) {
|
||||
/*
|
||||
* We're leaving the timed out dma operation with
|
||||
* dangling pointer to done_wait. To make this
|
||||
* correct, we'll need to allocate wait_done for
|
||||
* each test iteration and perform "who's gonna
|
||||
* free it this time?" dancing. For now, just
|
||||
* leave it dangling.
|
||||
*/
|
||||
pr_warning("%s: #%u: test timed out\n",
|
||||
thread_name, total_tests - 1);
|
||||
failed_tests++;
|
||||
|
@ -138,8 +138,6 @@ static int twl6030_irq_thread(void *data)
|
||||
static const unsigned max_i2c_errors = 100;
|
||||
int ret;
|
||||
|
||||
current->flags |= PF_NOFREEZE;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
int i;
|
||||
union {
|
||||
|
@ -750,7 +750,7 @@ static int stir_transmit_thread(void *arg)
|
||||
|
||||
write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
|
||||
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
|
||||
if (change_speed(stir, stir->speed))
|
||||
break;
|
||||
|
@ -2456,8 +2456,9 @@ static int hotkey_kthread(void *data)
|
||||
u32 poll_mask, event_mask;
|
||||
unsigned int si, so;
|
||||
unsigned long t;
|
||||
unsigned int change_detector, must_reset;
|
||||
unsigned int change_detector;
|
||||
unsigned int poll_freq;
|
||||
bool was_frozen;
|
||||
|
||||
mutex_lock(&hotkey_thread_mutex);
|
||||
|
||||
@ -2488,14 +2489,14 @@ static int hotkey_kthread(void *data)
|
||||
t = 100; /* should never happen... */
|
||||
}
|
||||
t = msleep_interruptible(t);
|
||||
if (unlikely(kthread_should_stop()))
|
||||
if (unlikely(kthread_freezable_should_stop(&was_frozen)))
|
||||
break;
|
||||
must_reset = try_to_freeze();
|
||||
if (t > 0 && !must_reset)
|
||||
|
||||
if (t > 0 && !was_frozen)
|
||||
continue;
|
||||
|
||||
mutex_lock(&hotkey_thread_data_mutex);
|
||||
if (must_reset || hotkey_config_change != change_detector) {
|
||||
if (was_frozen || hotkey_config_change != change_detector) {
|
||||
/* forget old state on thaw or config change */
|
||||
si = so;
|
||||
t = 0;
|
||||
@ -2528,10 +2529,6 @@ static int hotkey_kthread(void *data)
|
||||
static void hotkey_poll_stop_sync(void)
|
||||
{
|
||||
if (tpacpi_hotkey_task) {
|
||||
if (frozen(tpacpi_hotkey_task) ||
|
||||
freezing(tpacpi_hotkey_task))
|
||||
thaw_process(tpacpi_hotkey_task);
|
||||
|
||||
kthread_stop(tpacpi_hotkey_task);
|
||||
tpacpi_hotkey_task = NULL;
|
||||
mutex_lock(&hotkey_thread_mutex);
|
||||
|
@ -466,8 +466,6 @@ static int rtsx_control_thread(void *__dev)
|
||||
struct rtsx_chip *chip = dev->chip;
|
||||
struct Scsi_Host *host = rtsx_to_host(dev);
|
||||
|
||||
current->flags |= PF_NOFREEZE;
|
||||
|
||||
for (;;) {
|
||||
if (wait_for_completion_interruptible(&dev->cmnd_ready))
|
||||
break;
|
||||
|
@ -831,7 +831,8 @@ static int usb_stor_scan_thread(void * __us)
|
||||
|
||||
dev_dbg(dev, "device found\n");
|
||||
|
||||
set_freezable_with_signal();
|
||||
set_freezable();
|
||||
|
||||
/*
|
||||
* Wait for the timeout to expire or for a disconnect
|
||||
*
|
||||
@ -839,16 +840,16 @@ static int usb_stor_scan_thread(void * __us)
|
||||
* fail to freeze, but we can't be non-freezable either. Nor can
|
||||
* khubd freeze while waiting for scanning to complete as it may
|
||||
* hold the device lock, causing a hang when suspending devices.
|
||||
* So we request a fake signal when freezing and use
|
||||
* interruptible sleep to kick us out of our wait early when
|
||||
* freezing happens.
|
||||
* So instead of using wait_event_freezable(), explicitly test
|
||||
* for (DONT_SCAN || freezing) in interruptible wait and proceed
|
||||
* if any of DONT_SCAN, freezing or timeout has happened.
|
||||
*/
|
||||
if (delay_use > 0) {
|
||||
dev_dbg(dev, "waiting for device to settle "
|
||||
"before scanning\n");
|
||||
wait_event_interruptible_timeout(us->delay_wait,
|
||||
test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
|
||||
delay_use * HZ);
|
||||
test_bit(US_FLIDX_DONT_SCAN, &us->dflags) ||
|
||||
freezing(current), delay_use * HZ);
|
||||
}
|
||||
|
||||
/* If the device is still connected, perform the scanning */
|
||||
|
@ -340,7 +340,7 @@ static int worker_loop(void *arg)
|
||||
if (freezing(current)) {
|
||||
worker->working = 0;
|
||||
spin_unlock_irq(&worker->lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
spin_unlock_irq(&worker->lock);
|
||||
if (!kthread_should_stop()) {
|
||||
|
@ -1579,9 +1579,7 @@ static int cleaner_kthread(void *arg)
|
||||
btrfs_run_defrag_inodes(root->fs_info);
|
||||
}
|
||||
|
||||
if (freezing(current)) {
|
||||
refrigerator();
|
||||
} else {
|
||||
if (!try_to_freeze()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!kthread_should_stop())
|
||||
schedule();
|
||||
@ -1635,9 +1633,7 @@ static int transaction_kthread(void *arg)
|
||||
wake_up_process(root->fs_info->cleaner_kthread);
|
||||
mutex_unlock(&root->fs_info->transaction_kthread_mutex);
|
||||
|
||||
if (freezing(current)) {
|
||||
refrigerator();
|
||||
} else {
|
||||
if (!try_to_freeze()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!kthread_should_stop() &&
|
||||
!btrfs_transaction_blocked(root->fs_info))
|
||||
|
@ -2884,8 +2884,7 @@ static int ext4_lazyinit_thread(void *arg)
|
||||
}
|
||||
mutex_unlock(&eli->li_list_mtx);
|
||||
|
||||
if (freezing(current))
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
|
||||
cur = jiffies;
|
||||
if ((time_after_eq(cur, next_wakeup)) ||
|
||||
|
@ -947,7 +947,7 @@ int bdi_writeback_thread(void *data)
|
||||
|
||||
trace_writeback_thread_start(bdi);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
while (!kthread_freezable_should_stop(NULL)) {
|
||||
/*
|
||||
* Remove own delayed wake-up timer, since we are already awake
|
||||
* and we'll take care of the preriodic write-back.
|
||||
@ -977,8 +977,6 @@ int bdi_writeback_thread(void *data)
|
||||
*/
|
||||
schedule();
|
||||
}
|
||||
|
||||
try_to_freeze();
|
||||
}
|
||||
|
||||
/* Flush any work that raced with us exiting */
|
||||
|
@ -951,8 +951,8 @@ int gfs2_logd(void *data)
|
||||
wake_up(&sdp->sd_log_waitq);
|
||||
|
||||
t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
|
||||
if (freezing(current))
|
||||
refrigerator();
|
||||
|
||||
try_to_freeze();
|
||||
|
||||
do {
|
||||
prepare_to_wait(&sdp->sd_logd_waitq, &wait,
|
||||
|
@ -1427,8 +1427,8 @@ int gfs2_quotad(void *data)
|
||||
/* Check for & recover partially truncated inodes */
|
||||
quotad_check_trunc_list(sdp);
|
||||
|
||||
if (freezing(current))
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
|
||||
t = min(quotad_timeo, statfs_timeo);
|
||||
|
||||
prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
|
||||
|
@ -166,7 +166,7 @@ static int kjournald(void *arg)
|
||||
*/
|
||||
jbd_debug(1, "Now suspending kjournald\n");
|
||||
spin_unlock(&journal->j_state_lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
spin_lock(&journal->j_state_lock);
|
||||
} else {
|
||||
/*
|
||||
|
@ -173,7 +173,7 @@ static int kjournald2(void *arg)
|
||||
*/
|
||||
jbd_debug(1, "Now suspending kjournald2\n");
|
||||
write_unlock(&journal->j_state_lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
write_lock(&journal->j_state_lock);
|
||||
} else {
|
||||
/*
|
||||
|
@ -2349,7 +2349,7 @@ int jfsIOWait(void *arg)
|
||||
|
||||
if (freezing(current)) {
|
||||
spin_unlock_irq(&log_redrive_lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
spin_unlock_irq(&log_redrive_lock);
|
||||
|
@ -2800,7 +2800,7 @@ int jfs_lazycommit(void *arg)
|
||||
|
||||
if (freezing(current)) {
|
||||
LAZY_UNLOCK(flags);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
DECLARE_WAITQUEUE(wq, current);
|
||||
|
||||
@ -2994,7 +2994,7 @@ int jfs_sync(void *arg)
|
||||
|
||||
if (freezing(current)) {
|
||||
TXN_UNLOCK();
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
TXN_UNLOCK();
|
||||
|
@ -2470,7 +2470,7 @@ static int nilfs_segctor_thread(void *arg)
|
||||
|
||||
if (freezing(current)) {
|
||||
spin_unlock(&sci->sc_state_lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
spin_lock(&sci->sc_state_lock);
|
||||
} else {
|
||||
DEFINE_WAIT(wait);
|
||||
|
@ -1703,7 +1703,7 @@ xfsbufd(
|
||||
|
||||
if (unlikely(freezing(current))) {
|
||||
set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
|
||||
}
|
||||
|
@ -5,71 +5,58 @@
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
#ifdef CONFIG_FREEZER
|
||||
extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
|
||||
extern bool pm_freezing; /* PM freezing in effect */
|
||||
extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
|
||||
|
||||
/*
|
||||
* Check if a process has been frozen
|
||||
*/
|
||||
static inline int frozen(struct task_struct *p)
|
||||
static inline bool frozen(struct task_struct *p)
|
||||
{
|
||||
return p->flags & PF_FROZEN;
|
||||
}
|
||||
|
||||
extern bool freezing_slow_path(struct task_struct *p);
|
||||
|
||||
/*
|
||||
* Check if there is a request to freeze a process
|
||||
*/
|
||||
static inline int freezing(struct task_struct *p)
|
||||
static inline bool freezing(struct task_struct *p)
|
||||
{
|
||||
return test_tsk_thread_flag(p, TIF_FREEZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Request that a process be frozen
|
||||
*/
|
||||
static inline void set_freeze_flag(struct task_struct *p)
|
||||
{
|
||||
set_tsk_thread_flag(p, TIF_FREEZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sometimes we may need to cancel the previous 'freeze' request
|
||||
*/
|
||||
static inline void clear_freeze_flag(struct task_struct *p)
|
||||
{
|
||||
clear_tsk_thread_flag(p, TIF_FREEZE);
|
||||
}
|
||||
|
||||
static inline bool should_send_signal(struct task_struct *p)
|
||||
{
|
||||
return !(p->flags & PF_FREEZER_NOSIG);
|
||||
if (likely(!atomic_read(&system_freezing_cnt)))
|
||||
return false;
|
||||
return freezing_slow_path(p);
|
||||
}
|
||||
|
||||
/* Takes and releases task alloc lock using task_lock() */
|
||||
extern int thaw_process(struct task_struct *p);
|
||||
extern void __thaw_task(struct task_struct *t);
|
||||
|
||||
extern void refrigerator(void);
|
||||
extern bool __refrigerator(bool check_kthr_stop);
|
||||
extern int freeze_processes(void);
|
||||
extern int freeze_kernel_threads(void);
|
||||
extern void thaw_processes(void);
|
||||
|
||||
static inline int try_to_freeze(void)
|
||||
static inline bool try_to_freeze(void)
|
||||
{
|
||||
if (freezing(current)) {
|
||||
refrigerator();
|
||||
return 1;
|
||||
} else
|
||||
return 0;
|
||||
might_sleep();
|
||||
if (likely(!freezing(current)))
|
||||
return false;
|
||||
return __refrigerator(false);
|
||||
}
|
||||
|
||||
extern bool freeze_task(struct task_struct *p, bool sig_only);
|
||||
extern void cancel_freezing(struct task_struct *p);
|
||||
extern bool freeze_task(struct task_struct *p);
|
||||
extern bool set_freezable(void);
|
||||
|
||||
#ifdef CONFIG_CGROUP_FREEZER
|
||||
extern int cgroup_freezing_or_frozen(struct task_struct *task);
|
||||
extern bool cgroup_freezing(struct task_struct *task);
|
||||
#else /* !CONFIG_CGROUP_FREEZER */
|
||||
static inline int cgroup_freezing_or_frozen(struct task_struct *task)
|
||||
static inline bool cgroup_freezing(struct task_struct *task)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
#endif /* !CONFIG_CGROUP_FREEZER */
|
||||
|
||||
@ -117,23 +104,6 @@ static inline int freezer_should_skip(struct task_struct *p)
|
||||
return !!(p->flags & PF_FREEZER_SKIP);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell the freezer that the current task should be frozen by it
|
||||
*/
|
||||
static inline void set_freezable(void)
|
||||
{
|
||||
current->flags &= ~PF_NOFREEZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell the freezer that the current task should be frozen by it and that it
|
||||
* should send a fake signal to the task to freeze it.
|
||||
*/
|
||||
static inline void set_freezable_with_signal(void)
|
||||
{
|
||||
current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG);
|
||||
}
|
||||
|
||||
/*
|
||||
* Freezer-friendly wrappers around wait_event_interruptible(),
|
||||
* wait_event_killable() and wait_event_interruptible_timeout(), originally
|
||||
@ -152,47 +122,45 @@ static inline void set_freezable_with_signal(void)
|
||||
#define wait_event_freezable(wq, condition) \
|
||||
({ \
|
||||
int __retval; \
|
||||
do { \
|
||||
for (;;) { \
|
||||
__retval = wait_event_interruptible(wq, \
|
||||
(condition) || freezing(current)); \
|
||||
if (__retval && !freezing(current)) \
|
||||
if (__retval || (condition)) \
|
||||
break; \
|
||||
else if (!(condition)) \
|
||||
__retval = -ERESTARTSYS; \
|
||||
} while (try_to_freeze()); \
|
||||
try_to_freeze(); \
|
||||
} \
|
||||
__retval; \
|
||||
})
|
||||
|
||||
|
||||
#define wait_event_freezable_timeout(wq, condition, timeout) \
|
||||
({ \
|
||||
long __retval = timeout; \
|
||||
do { \
|
||||
for (;;) { \
|
||||
__retval = wait_event_interruptible_timeout(wq, \
|
||||
(condition) || freezing(current), \
|
||||
__retval); \
|
||||
} while (try_to_freeze()); \
|
||||
if (__retval <= 0 || (condition)) \
|
||||
break; \
|
||||
try_to_freeze(); \
|
||||
} \
|
||||
__retval; \
|
||||
})
|
||||
#else /* !CONFIG_FREEZER */
|
||||
static inline int frozen(struct task_struct *p) { return 0; }
|
||||
static inline int freezing(struct task_struct *p) { return 0; }
|
||||
static inline void set_freeze_flag(struct task_struct *p) {}
|
||||
static inline void clear_freeze_flag(struct task_struct *p) {}
|
||||
static inline int thaw_process(struct task_struct *p) { return 1; }
|
||||
|
||||
static inline void refrigerator(void) {}
|
||||
#else /* !CONFIG_FREEZER */
|
||||
static inline bool frozen(struct task_struct *p) { return false; }
|
||||
static inline bool freezing(struct task_struct *p) { return false; }
|
||||
|
||||
static inline bool __refrigerator(bool check_kthr_stop) { return false; }
|
||||
static inline int freeze_processes(void) { return -ENOSYS; }
|
||||
static inline int freeze_kernel_threads(void) { return -ENOSYS; }
|
||||
static inline void thaw_processes(void) {}
|
||||
|
||||
static inline int try_to_freeze(void) { return 0; }
|
||||
static inline bool try_to_freeze(void) { return false; }
|
||||
|
||||
static inline void freezer_do_not_count(void) {}
|
||||
static inline void freezer_count(void) {}
|
||||
static inline int freezer_should_skip(struct task_struct *p) { return 0; }
|
||||
static inline void set_freezable(void) {}
|
||||
static inline void set_freezable_with_signal(void) {}
|
||||
|
||||
#define wait_event_freezable(wq, condition) \
|
||||
wait_event_interruptible(wq, condition)
|
||||
|
@ -35,6 +35,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
|
||||
void kthread_bind(struct task_struct *k, unsigned int cpu);
|
||||
int kthread_stop(struct task_struct *k);
|
||||
int kthread_should_stop(void);
|
||||
bool kthread_freezable_should_stop(bool *was_frozen);
|
||||
void *kthread_data(struct task_struct *k);
|
||||
|
||||
int kthreadd(void *unused);
|
||||
|
@ -220,7 +220,7 @@ extern char ___assert_task_state[1 - 2*!!(
|
||||
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
|
||||
#define task_contributes_to_load(task) \
|
||||
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
|
||||
(task->flags & PF_FREEZING) == 0)
|
||||
(task->flags & PF_FROZEN) == 0)
|
||||
|
||||
#define __set_task_state(tsk, state_value) \
|
||||
do { (tsk)->state = (state_value); } while (0)
|
||||
@ -1772,7 +1772,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
|
||||
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
|
||||
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
|
||||
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
|
||||
#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
|
||||
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
|
||||
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
|
||||
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
|
||||
@ -1788,7 +1787,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
|
||||
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
|
||||
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
|
||||
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
|
||||
#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
|
||||
|
||||
/*
|
||||
* Only the _current_ task can read/write to tsk->flags, but other
|
||||
|
@ -48,19 +48,17 @@ static inline struct freezer *task_freezer(struct task_struct *task)
|
||||
struct freezer, css);
|
||||
}
|
||||
|
||||
static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
|
||||
bool cgroup_freezing(struct task_struct *task)
|
||||
{
|
||||
enum freezer_state state = task_freezer(task)->state;
|
||||
return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
|
||||
}
|
||||
enum freezer_state state;
|
||||
bool ret;
|
||||
|
||||
int cgroup_freezing_or_frozen(struct task_struct *task)
|
||||
{
|
||||
int result;
|
||||
task_lock(task);
|
||||
result = __cgroup_freezing_or_frozen(task);
|
||||
task_unlock(task);
|
||||
return result;
|
||||
rcu_read_lock();
|
||||
state = task_freezer(task)->state;
|
||||
ret = state == CGROUP_FREEZING || state == CGROUP_FROZEN;
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -102,9 +100,6 @@ struct cgroup_subsys freezer_subsys;
|
||||
* freezer_can_attach():
|
||||
* cgroup_mutex (held by caller of can_attach)
|
||||
*
|
||||
* cgroup_freezing_or_frozen():
|
||||
* task->alloc_lock (to get task's cgroup)
|
||||
*
|
||||
* freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
|
||||
* freezer->lock
|
||||
* sighand->siglock (if the cgroup is freezing)
|
||||
@ -130,7 +125,7 @@ struct cgroup_subsys freezer_subsys;
|
||||
* write_lock css_set_lock (cgroup iterator start)
|
||||
* task->alloc_lock
|
||||
* read_lock css_set_lock (cgroup iterator start)
|
||||
* task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
|
||||
* task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
|
||||
* sighand->siglock
|
||||
*/
|
||||
static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
|
||||
@ -150,7 +145,11 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
|
||||
static void freezer_destroy(struct cgroup_subsys *ss,
|
||||
struct cgroup *cgroup)
|
||||
{
|
||||
kfree(cgroup_freezer(cgroup));
|
||||
struct freezer *freezer = cgroup_freezer(cgroup);
|
||||
|
||||
if (freezer->state != CGROUP_THAWED)
|
||||
atomic_dec(&system_freezing_cnt);
|
||||
kfree(freezer);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -177,13 +176,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
|
||||
|
||||
static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
|
||||
{
|
||||
rcu_read_lock();
|
||||
if (__cgroup_freezing_or_frozen(tsk)) {
|
||||
rcu_read_unlock();
|
||||
return -EBUSY;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
return cgroup_freezing(tsk) ? -EBUSY : 0;
|
||||
}
|
||||
|
||||
static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
|
||||
@ -213,7 +206,7 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
|
||||
|
||||
/* Locking avoids race with FREEZING -> THAWED transitions. */
|
||||
if (freezer->state == CGROUP_FREEZING)
|
||||
freeze_task(task, true);
|
||||
freeze_task(task);
|
||||
spin_unlock_irq(&freezer->lock);
|
||||
}
|
||||
|
||||
@ -231,7 +224,7 @@ static void update_if_frozen(struct cgroup *cgroup,
|
||||
cgroup_iter_start(cgroup, &it);
|
||||
while ((task = cgroup_iter_next(cgroup, &it))) {
|
||||
ntotal++;
|
||||
if (frozen(task))
|
||||
if (freezing(task) && frozen(task))
|
||||
nfrozen++;
|
||||
}
|
||||
|
||||
@ -279,10 +272,9 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
|
||||
struct task_struct *task;
|
||||
unsigned int num_cant_freeze_now = 0;
|
||||
|
||||
freezer->state = CGROUP_FREEZING;
|
||||
cgroup_iter_start(cgroup, &it);
|
||||
while ((task = cgroup_iter_next(cgroup, &it))) {
|
||||
if (!freeze_task(task, true))
|
||||
if (!freeze_task(task))
|
||||
continue;
|
||||
if (frozen(task))
|
||||
continue;
|
||||
@ -300,12 +292,9 @@ static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
|
||||
struct task_struct *task;
|
||||
|
||||
cgroup_iter_start(cgroup, &it);
|
||||
while ((task = cgroup_iter_next(cgroup, &it))) {
|
||||
thaw_process(task);
|
||||
}
|
||||
while ((task = cgroup_iter_next(cgroup, &it)))
|
||||
__thaw_task(task);
|
||||
cgroup_iter_end(cgroup, &it);
|
||||
|
||||
freezer->state = CGROUP_THAWED;
|
||||
}
|
||||
|
||||
static int freezer_change_state(struct cgroup *cgroup,
|
||||
@ -319,20 +308,24 @@ static int freezer_change_state(struct cgroup *cgroup,
|
||||
spin_lock_irq(&freezer->lock);
|
||||
|
||||
update_if_frozen(cgroup, freezer);
|
||||
if (goal_state == freezer->state)
|
||||
goto out;
|
||||
|
||||
switch (goal_state) {
|
||||
case CGROUP_THAWED:
|
||||
if (freezer->state != CGROUP_THAWED)
|
||||
atomic_dec(&system_freezing_cnt);
|
||||
freezer->state = CGROUP_THAWED;
|
||||
unfreeze_cgroup(cgroup, freezer);
|
||||
break;
|
||||
case CGROUP_FROZEN:
|
||||
if (freezer->state == CGROUP_THAWED)
|
||||
atomic_inc(&system_freezing_cnt);
|
||||
freezer->state = CGROUP_FREEZING;
|
||||
retval = try_to_freeze_cgroup(cgroup, freezer);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
out:
|
||||
|
||||
spin_unlock_irq(&freezer->lock);
|
||||
|
||||
return retval;
|
||||
|
@ -679,8 +679,6 @@ static void exit_mm(struct task_struct * tsk)
|
||||
tsk->mm = NULL;
|
||||
up_read(&mm->mmap_sem);
|
||||
enter_lazy_tlb(mm, current);
|
||||
/* We don't want this task to be frozen prematurely */
|
||||
clear_freeze_flag(tsk);
|
||||
task_unlock(tsk);
|
||||
mm_update_next_owner(mm);
|
||||
mmput(mm);
|
||||
@ -1040,6 +1038,7 @@ NORET_TYPE void do_exit(long code)
|
||||
exit_rcu();
|
||||
/* causes final put_task_struct in finish_task_switch(). */
|
||||
tsk->state = TASK_DEAD;
|
||||
tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
|
||||
schedule();
|
||||
BUG();
|
||||
/* Avoid "noreturn function does return". */
|
||||
|
@ -992,7 +992,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
|
||||
new_flags |= PF_FORKNOEXEC;
|
||||
new_flags |= PF_STARTING;
|
||||
p->flags = new_flags;
|
||||
clear_freeze_flag(p);
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
|
||||
|
209
kernel/freezer.c
209
kernel/freezer.c
@ -9,101 +9,114 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
/*
|
||||
* freezing is complete, mark current process as frozen
|
||||
/* total number of freezing conditions in effect */
|
||||
atomic_t system_freezing_cnt = ATOMIC_INIT(0);
|
||||
EXPORT_SYMBOL(system_freezing_cnt);
|
||||
|
||||
/* indicate whether PM freezing is in effect, protected by pm_mutex */
|
||||
bool pm_freezing;
|
||||
bool pm_nosig_freezing;
|
||||
|
||||
/* protects freezing and frozen transitions */
|
||||
static DEFINE_SPINLOCK(freezer_lock);
|
||||
|
||||
/**
|
||||
* freezing_slow_path - slow path for testing whether a task needs to be frozen
|
||||
* @p: task to be tested
|
||||
*
|
||||
* This function is called by freezing() if system_freezing_cnt isn't zero
|
||||
* and tests whether @p needs to enter and stay in frozen state. Can be
|
||||
* called under any context. The freezers are responsible for ensuring the
|
||||
* target tasks see the updated state.
|
||||
*/
|
||||
static inline void frozen_process(void)
|
||||
bool freezing_slow_path(struct task_struct *p)
|
||||
{
|
||||
if (!unlikely(current->flags & PF_NOFREEZE)) {
|
||||
current->flags |= PF_FROZEN;
|
||||
smp_wmb();
|
||||
}
|
||||
clear_freeze_flag(current);
|
||||
if (p->flags & PF_NOFREEZE)
|
||||
return false;
|
||||
|
||||
if (pm_nosig_freezing || cgroup_freezing(p))
|
||||
return true;
|
||||
|
||||
if (pm_freezing && !(p->flags & PF_KTHREAD))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(freezing_slow_path);
|
||||
|
||||
/* Refrigerator is place where frozen processes are stored :-). */
|
||||
void refrigerator(void)
|
||||
bool __refrigerator(bool check_kthr_stop)
|
||||
{
|
||||
/* Hmm, should we be allowed to suspend when there are realtime
|
||||
processes around? */
|
||||
long save;
|
||||
bool was_frozen = false;
|
||||
long save = current->state;
|
||||
|
||||
task_lock(current);
|
||||
if (freezing(current)) {
|
||||
frozen_process();
|
||||
task_unlock(current);
|
||||
} else {
|
||||
task_unlock(current);
|
||||
return;
|
||||
}
|
||||
save = current->state;
|
||||
pr_debug("%s entered refrigerator\n", current->comm);
|
||||
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
recalc_sigpending(); /* We sent fake signal, clean it up */
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
/* prevent accounting of that task to load */
|
||||
current->flags |= PF_FREEZING;
|
||||
|
||||
for (;;) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
if (!frozen(current))
|
||||
|
||||
spin_lock_irq(&freezer_lock);
|
||||
current->flags |= PF_FROZEN;
|
||||
if (!freezing(current) ||
|
||||
(check_kthr_stop && kthread_should_stop()))
|
||||
current->flags &= ~PF_FROZEN;
|
||||
spin_unlock_irq(&freezer_lock);
|
||||
|
||||
if (!(current->flags & PF_FROZEN))
|
||||
break;
|
||||
was_frozen = true;
|
||||
schedule();
|
||||
}
|
||||
|
||||
/* Remove the accounting blocker */
|
||||
current->flags &= ~PF_FREEZING;
|
||||
|
||||
pr_debug("%s left refrigerator\n", current->comm);
|
||||
__set_current_state(save);
|
||||
|
||||
/*
|
||||
* Restore saved task state before returning. The mb'd version
|
||||
* needs to be used; otherwise, it might silently break
|
||||
* synchronization which depends on ordered task state change.
|
||||
*/
|
||||
set_current_state(save);
|
||||
|
||||
return was_frozen;
|
||||
}
|
||||
EXPORT_SYMBOL(refrigerator);
|
||||
EXPORT_SYMBOL(__refrigerator);
|
||||
|
||||
static void fake_signal_wake_up(struct task_struct *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&p->sighand->siglock, flags);
|
||||
signal_wake_up(p, 0);
|
||||
spin_unlock_irqrestore(&p->sighand->siglock, flags);
|
||||
if (lock_task_sighand(p, &flags)) {
|
||||
signal_wake_up(p, 0);
|
||||
unlock_task_sighand(p, &flags);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* freeze_task - send a freeze request to given task
|
||||
* @p: task to send the request to
|
||||
* @sig_only: if set, the request will only be sent if the task has the
|
||||
* PF_FREEZER_NOSIG flag unset
|
||||
* Return value: 'false', if @sig_only is set and the task has
|
||||
* PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise
|
||||
* freeze_task - send a freeze request to given task
|
||||
* @p: task to send the request to
|
||||
*
|
||||
* The freeze request is sent by setting the tasks's TIF_FREEZE flag and
|
||||
* either sending a fake signal to it or waking it up, depending on whether
|
||||
* or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task
|
||||
* has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its
|
||||
* TIF_FREEZE flag will not be set.
|
||||
* If @p is freezing, the freeze request is sent by setting %TIF_FREEZE
|
||||
* flag and either sending a fake signal to it or waking it up, depending
|
||||
* on whether it has %PF_FREEZER_NOSIG set.
|
||||
*
|
||||
* RETURNS:
|
||||
* %false, if @p is not freezing or already frozen; %true, otherwise
|
||||
*/
|
||||
bool freeze_task(struct task_struct *p, bool sig_only)
|
||||
bool freeze_task(struct task_struct *p)
|
||||
{
|
||||
/*
|
||||
* We first check if the task is freezing and next if it has already
|
||||
* been frozen to avoid the race with frozen_process() which first marks
|
||||
* the task as frozen and next clears its TIF_FREEZE.
|
||||
*/
|
||||
if (!freezing(p)) {
|
||||
smp_rmb();
|
||||
if (frozen(p))
|
||||
return false;
|
||||
unsigned long flags;
|
||||
|
||||
if (!sig_only || should_send_signal(p))
|
||||
set_freeze_flag(p);
|
||||
else
|
||||
return false;
|
||||
spin_lock_irqsave(&freezer_lock, flags);
|
||||
if (!freezing(p) || frozen(p)) {
|
||||
spin_unlock_irqrestore(&freezer_lock, flags);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (should_send_signal(p)) {
|
||||
if (!(p->flags & PF_KTHREAD)) {
|
||||
fake_signal_wake_up(p);
|
||||
/*
|
||||
* fake_signal_wake_up() goes through p's scheduler
|
||||
@ -111,56 +124,48 @@ bool freeze_task(struct task_struct *p, bool sig_only)
|
||||
* TASK_RUNNING transition can't race with task state
|
||||
* testing in try_to_freeze_tasks().
|
||||
*/
|
||||
} else if (sig_only) {
|
||||
return false;
|
||||
} else {
|
||||
wake_up_state(p, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&freezer_lock, flags);
|
||||
return true;
|
||||
}
|
||||
|
||||
void cancel_freezing(struct task_struct *p)
|
||||
void __thaw_task(struct task_struct *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (freezing(p)) {
|
||||
pr_debug(" clean up: %s\n", p->comm);
|
||||
clear_freeze_flag(p);
|
||||
spin_lock_irqsave(&p->sighand->siglock, flags);
|
||||
recalc_sigpending_and_wake(p);
|
||||
spin_unlock_irqrestore(&p->sighand->siglock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
static int __thaw_process(struct task_struct *p)
|
||||
{
|
||||
if (frozen(p)) {
|
||||
p->flags &= ~PF_FROZEN;
|
||||
return 1;
|
||||
}
|
||||
clear_freeze_flag(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wake up a frozen process
|
||||
*
|
||||
* task_lock() is needed to prevent the race with refrigerator() which may
|
||||
* occur if the freezing of tasks fails. Namely, without the lock, if the
|
||||
* freezing of tasks failed, thaw_tasks() might have run before a task in
|
||||
* refrigerator() could call frozen_process(), in which case the task would be
|
||||
* frozen and no one would thaw it.
|
||||
*/
|
||||
int thaw_process(struct task_struct *p)
|
||||
{
|
||||
task_lock(p);
|
||||
if (__thaw_process(p) == 1) {
|
||||
task_unlock(p);
|
||||
/*
|
||||
* Clear freezing and kick @p if FROZEN. Clearing is guaranteed to
|
||||
* be visible to @p as waking up implies wmb. Waking up inside
|
||||
* freezer_lock also prevents wakeups from leaking outside
|
||||
* refrigerator.
|
||||
*/
|
||||
spin_lock_irqsave(&freezer_lock, flags);
|
||||
if (frozen(p))
|
||||
wake_up_process(p);
|
||||
return 1;
|
||||
}
|
||||
task_unlock(p);
|
||||
return 0;
|
||||
spin_unlock_irqrestore(&freezer_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(thaw_process);
|
||||
|
||||
/**
|
||||
* set_freezable - make %current freezable
|
||||
*
|
||||
* Mark %current freezable and enter refrigerator if necessary.
|
||||
*/
|
||||
bool set_freezable(void)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
/*
|
||||
* Modify flags while holding freezer_lock. This ensures the
|
||||
* freezer notices that we aren't frozen yet or the freezing
|
||||
* condition is visible to try_to_freeze() below.
|
||||
*/
|
||||
spin_lock_irq(&freezer_lock);
|
||||
current->flags &= ~PF_NOFREEZE;
|
||||
spin_unlock_irq(&freezer_lock);
|
||||
|
||||
return try_to_freeze();
|
||||
}
|
||||
EXPORT_SYMBOL(set_freezable);
|
||||
|
@ -58,6 +58,31 @@ int kthread_should_stop(void)
|
||||
}
|
||||
EXPORT_SYMBOL(kthread_should_stop);
|
||||
|
||||
/**
|
||||
* kthread_freezable_should_stop - should this freezable kthread return now?
|
||||
* @was_frozen: optional out parameter, indicates whether %current was frozen
|
||||
*
|
||||
* kthread_should_stop() for freezable kthreads, which will enter
|
||||
* refrigerator if necessary. This function is safe from kthread_stop() /
|
||||
* freezer deadlock and freezable kthreads should use this function instead
|
||||
* of calling try_to_freeze() directly.
|
||||
*/
|
||||
bool kthread_freezable_should_stop(bool *was_frozen)
|
||||
{
|
||||
bool frozen = false;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (unlikely(freezing(current)))
|
||||
frozen = __refrigerator(true);
|
||||
|
||||
if (was_frozen)
|
||||
*was_frozen = frozen;
|
||||
|
||||
return kthread_should_stop();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
|
||||
|
||||
/**
|
||||
* kthread_data - return data value specified on kthread creation
|
||||
* @task: kthread task in question
|
||||
@ -257,7 +282,7 @@ int kthreadd(void *unused)
|
||||
set_cpus_allowed_ptr(tsk, cpu_all_mask);
|
||||
set_mems_allowed(node_states[N_HIGH_MEMORY]);
|
||||
|
||||
current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
|
||||
current->flags |= PF_NOFREEZE;
|
||||
|
||||
for (;;) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
@ -611,17 +611,6 @@ static void power_down(void)
|
||||
while(1);
|
||||
}
|
||||
|
||||
static int prepare_processes(void)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
if (freeze_processes()) {
|
||||
error = -EBUSY;
|
||||
thaw_processes();
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* hibernate - Carry out system hibernation, including saving the image.
|
||||
*/
|
||||
@ -654,7 +643,7 @@ int hibernate(void)
|
||||
sys_sync();
|
||||
printk("done.\n");
|
||||
|
||||
error = prepare_processes();
|
||||
error = freeze_processes();
|
||||
if (error)
|
||||
goto Finish;
|
||||
|
||||
@ -815,7 +804,7 @@ static int software_resume(void)
|
||||
goto close_finish;
|
||||
|
||||
pr_debug("PM: Preparing processes for restore.\n");
|
||||
error = prepare_processes();
|
||||
error = freeze_processes();
|
||||
if (error) {
|
||||
swsusp_close(FMODE_READ);
|
||||
goto Done;
|
||||
|
@ -22,16 +22,7 @@
|
||||
*/
|
||||
#define TIMEOUT (20 * HZ)
|
||||
|
||||
static inline int freezable(struct task_struct * p)
|
||||
{
|
||||
if ((p == current) ||
|
||||
(p->flags & PF_NOFREEZE) ||
|
||||
(p->exit_state != 0))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int try_to_freeze_tasks(bool sig_only)
|
||||
static int try_to_freeze_tasks(bool user_only)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
unsigned long end_time;
|
||||
@ -46,17 +37,14 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||
|
||||
end_time = jiffies + TIMEOUT;
|
||||
|
||||
if (!sig_only)
|
||||
if (!user_only)
|
||||
freeze_workqueues_begin();
|
||||
|
||||
while (true) {
|
||||
todo = 0;
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
if (frozen(p) || !freezable(p))
|
||||
continue;
|
||||
|
||||
if (!freeze_task(p, sig_only))
|
||||
if (p == current || !freeze_task(p))
|
||||
continue;
|
||||
|
||||
/*
|
||||
@ -77,7 +65,7 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||
} while_each_thread(g, p);
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
if (!sig_only) {
|
||||
if (!user_only) {
|
||||
wq_busy = freeze_workqueues_busy();
|
||||
todo += wq_busy;
|
||||
}
|
||||
@ -103,11 +91,6 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||
elapsed_csecs = elapsed_csecs64;
|
||||
|
||||
if (todo) {
|
||||
/* This does not unfreeze processes that are already frozen
|
||||
* (we have slightly ugly calling convention in that respect,
|
||||
* and caller must call thaw_processes() if something fails),
|
||||
* but it cleans up leftover PF_FREEZE requests.
|
||||
*/
|
||||
printk("\n");
|
||||
printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
|
||||
"(%d tasks refusing to freeze, wq_busy=%d):\n",
|
||||
@ -115,15 +98,11 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||
elapsed_csecs / 100, elapsed_csecs % 100,
|
||||
todo - wq_busy, wq_busy);
|
||||
|
||||
thaw_workqueues();
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
task_lock(p);
|
||||
if (!wakeup && freezing(p) && !freezer_should_skip(p))
|
||||
if (!wakeup && !freezer_should_skip(p) &&
|
||||
p != current && freezing(p) && !frozen(p))
|
||||
sched_show_task(p);
|
||||
cancel_freezing(p);
|
||||
task_unlock(p);
|
||||
} while_each_thread(g, p);
|
||||
read_unlock(&tasklist_lock);
|
||||
} else {
|
||||
@ -136,12 +115,18 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||
|
||||
/**
|
||||
* freeze_processes - Signal user space processes to enter the refrigerator.
|
||||
*
|
||||
* On success, returns 0. On failure, -errno and system is fully thawed.
|
||||
*/
|
||||
int freeze_processes(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (!pm_freezing)
|
||||
atomic_inc(&system_freezing_cnt);
|
||||
|
||||
printk("Freezing user space processes ... ");
|
||||
pm_freezing = true;
|
||||
error = try_to_freeze_tasks(true);
|
||||
if (!error) {
|
||||
printk("done.");
|
||||
@ -150,17 +135,22 @@ int freeze_processes(void)
|
||||
printk("\n");
|
||||
BUG_ON(in_atomic());
|
||||
|
||||
if (error)
|
||||
thaw_processes();
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
|
||||
*
|
||||
* On success, returns 0. On failure, -errno and system is fully thawed.
|
||||
*/
|
||||
int freeze_kernel_threads(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
printk("Freezing remaining freezable tasks ... ");
|
||||
pm_nosig_freezing = true;
|
||||
error = try_to_freeze_tasks(false);
|
||||
if (!error)
|
||||
printk("done.");
|
||||
@ -168,37 +158,32 @@ int freeze_kernel_threads(void)
|
||||
printk("\n");
|
||||
BUG_ON(in_atomic());
|
||||
|
||||
if (error)
|
||||
thaw_processes();
|
||||
return error;
|
||||
}
|
||||
|
||||
static void thaw_tasks(bool nosig_only)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
if (!freezable(p))
|
||||
continue;
|
||||
|
||||
if (nosig_only && should_send_signal(p))
|
||||
continue;
|
||||
|
||||
if (cgroup_freezing_or_frozen(p))
|
||||
continue;
|
||||
|
||||
thaw_process(p);
|
||||
} while_each_thread(g, p);
|
||||
read_unlock(&tasklist_lock);
|
||||
}
|
||||
|
||||
void thaw_processes(void)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
|
||||
if (pm_freezing)
|
||||
atomic_dec(&system_freezing_cnt);
|
||||
pm_freezing = false;
|
||||
pm_nosig_freezing = false;
|
||||
|
||||
oom_killer_enable();
|
||||
|
||||
printk("Restarting tasks ... ");
|
||||
|
||||
thaw_workqueues();
|
||||
thaw_tasks(true);
|
||||
thaw_tasks(false);
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
__thaw_task(p);
|
||||
} while_each_thread(g, p);
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
schedule();
|
||||
printk("done.\n");
|
||||
}
|
||||
|
@ -106,13 +106,11 @@ static int suspend_prepare(void)
|
||||
goto Finish;
|
||||
|
||||
error = suspend_freeze_processes();
|
||||
if (error) {
|
||||
suspend_stats.failed_freeze++;
|
||||
dpm_save_failed_step(SUSPEND_FREEZE);
|
||||
} else
|
||||
if (!error)
|
||||
return 0;
|
||||
|
||||
suspend_thaw_processes();
|
||||
suspend_stats.failed_freeze++;
|
||||
dpm_save_failed_step(SUSPEND_FREEZE);
|
||||
usermodehelper_enable();
|
||||
Finish:
|
||||
pm_notifier_call_chain(PM_POST_SUSPEND);
|
||||
|
@ -257,10 +257,8 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||
break;
|
||||
|
||||
error = freeze_processes();
|
||||
if (error) {
|
||||
thaw_processes();
|
||||
if (error)
|
||||
usermodehelper_enable();
|
||||
}
|
||||
if (!error)
|
||||
data->frozen = 1;
|
||||
break;
|
||||
|
@ -600,14 +600,10 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
|
||||
|
||||
/*
|
||||
* Finally, kill the kernel thread. We don't need to be RCU
|
||||
* safe anymore, since the bdi is gone from visibility. Force
|
||||
* unfreeze of the thread before calling kthread_stop(), otherwise
|
||||
* it would never exet if it is currently stuck in the refrigerator.
|
||||
* safe anymore, since the bdi is gone from visibility.
|
||||
*/
|
||||
if (bdi->wb.task) {
|
||||
thaw_process(bdi->wb.task);
|
||||
if (bdi->wb.task)
|
||||
kthread_stop(bdi->wb.task);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -328,7 +328,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
|
||||
*/
|
||||
if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
|
||||
if (unlikely(frozen(p)))
|
||||
thaw_process(p);
|
||||
__thaw_task(p);
|
||||
return ERR_PTR(-1UL);
|
||||
}
|
||||
if (!p->mm)
|
||||
|
Loading…
Reference in New Issue
Block a user