forked from luck/tmp_suning_uos_patched
tick: Rename tick_check_idle() to tick_irq_enter()
This makes the code more symetric against the existing tick functions called on irq exit: tick_irq_exit() and tick_nohz_irq_exit(). These function are also symetric as they mirror each other's action: we start to account idle time on irq exit and we stop this accounting on irq entry. Also the tick is stopped on irq exit and timekeeping catches up with the tickless time elapsed until we reach irq entry. This rename was suggested by Peter Zijlstra a long while ago but it got forgotten in the mass. Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Alex Shi <alex.shi@linaro.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: John Stultz <john.stultz@linaro.org> Cc: Kevin Hilman <khilman@linaro.org> Link: http://lkml.kernel.org/r/1387320692-28460-2-git-send-email-fweisbec@gmail.com Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
This commit is contained in:
parent
1b3f828760
commit
5acac1be49
|
@ -104,7 +104,7 @@ extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
|
|||
extern void tick_clock_notify(void);
|
||||
extern int tick_check_oneshot_change(int allow_nohz);
|
||||
extern struct tick_sched *tick_get_tick_sched(int cpu);
|
||||
extern void tick_check_idle(void);
|
||||
extern void tick_irq_enter(void);
|
||||
extern int tick_oneshot_mode_active(void);
|
||||
# ifndef arch_needs_cpu
|
||||
# define arch_needs_cpu(cpu) (0)
|
||||
|
@ -112,7 +112,7 @@ extern int tick_oneshot_mode_active(void);
|
|||
# else
|
||||
static inline void tick_clock_notify(void) { }
|
||||
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
|
||||
static inline void tick_check_idle(void) { }
|
||||
static inline void tick_irq_enter(void) { }
|
||||
static inline int tick_oneshot_mode_active(void) { return 0; }
|
||||
# endif
|
||||
|
||||
|
@ -121,7 +121,7 @@ static inline void tick_init(void) { }
|
|||
static inline void tick_cancel_sched_timer(int cpu) { }
|
||||
static inline void tick_clock_notify(void) { }
|
||||
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
|
||||
static inline void tick_check_idle(void) { }
|
||||
static inline void tick_irq_enter(void) { }
|
||||
static inline int tick_oneshot_mode_active(void) { return 0; }
|
||||
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
|
||||
|
||||
|
|
|
@ -318,7 +318,7 @@ void irq_enter(void)
|
|||
* here, as softirq will be serviced on return from interrupt.
|
||||
*/
|
||||
local_bh_disable();
|
||||
tick_check_idle();
|
||||
tick_irq_enter();
|
||||
_local_bh_enable();
|
||||
}
|
||||
|
||||
|
|
|
@ -1023,7 +1023,7 @@ static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline void tick_check_nohz_this_cpu(void)
|
||||
static inline void tick_nohz_irq_enter(void)
|
||||
{
|
||||
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
||||
ktime_t now;
|
||||
|
@ -1042,17 +1042,17 @@ static inline void tick_check_nohz_this_cpu(void)
|
|||
#else
|
||||
|
||||
static inline void tick_nohz_switch_to_nohz(void) { }
|
||||
static inline void tick_check_nohz_this_cpu(void) { }
|
||||
static inline void tick_nohz_irq_enter(void) { }
|
||||
|
||||
#endif /* CONFIG_NO_HZ_COMMON */
|
||||
|
||||
/*
|
||||
* Called from irq_enter to notify about the possible interruption of idle()
|
||||
*/
|
||||
void tick_check_idle(void)
|
||||
void tick_irq_enter(void)
|
||||
{
|
||||
tick_check_oneshot_broadcast_this_cpu();
|
||||
tick_check_nohz_this_cpu();
|
||||
tick_nohz_irq_enter();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue
Block a user