forked from luck/tmp_suning_uos_patched
PM-runtime: Fix deadlock with ktime_get()
A deadlock has been seen when swicthing clocksources which use
PM-runtime. The call path is:
change_clocksource
...
write_seqcount_begin
...
timekeeping_update
...
sh_cmt_clocksource_enable
...
rpm_resume
pm_runtime_mark_last_busy
ktime_get
do
read_seqcount_begin
while read_seqcount_retry
....
write_seqcount_end
Although we should be safe because we haven't yet changed the
clocksource at that time, we can't do that because of seqcount
protection.
Use ktime_get_mono_fast_ns() instead which is lock safe for such
cases.
With ktime_get_mono_fast_ns, the timestamp is not guaranteed to be
monotonic across an update and as a result can goes backward.
According to update_fast_timekeeper() description: "In the worst
case, this can result is a slightly wrong timestamp (a few
nanoseconds)". For PM-runtime autosuspend, this means only that
the suspend decision may be slightly suboptimal.
Fixes: 8234f6734c
("PM-runtime: Switch autosuspend over to using hrtimers")
Reported-by: Biju Das <biju.das@bp.renesas.com>
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
This commit is contained in:
parent
f17b5f06cb
commit
15efb47dc5
|
@ -130,7 +130,7 @@ u64 pm_runtime_autosuspend_expiration(struct device *dev)
|
|||
{
|
||||
int autosuspend_delay;
|
||||
u64 last_busy, expires = 0;
|
||||
u64 now = ktime_to_ns(ktime_get());
|
||||
u64 now = ktime_get_mono_fast_ns();
|
||||
|
||||
if (!dev->power.use_autosuspend)
|
||||
goto out;
|
||||
|
@ -909,7 +909,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
|
|||
* If 'expires' is after the current time, we've been called
|
||||
* too early.
|
||||
*/
|
||||
if (expires > 0 && expires < ktime_to_ns(ktime_get())) {
|
||||
if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
|
||||
dev->power.timer_expires = 0;
|
||||
rpm_suspend(dev, dev->power.timer_autosuspends ?
|
||||
(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
|
||||
|
@ -928,7 +928,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
|
|||
int pm_schedule_suspend(struct device *dev, unsigned int delay)
|
||||
{
|
||||
unsigned long flags;
|
||||
ktime_t expires;
|
||||
u64 expires;
|
||||
int retval;
|
||||
|
||||
spin_lock_irqsave(&dev->power.lock, flags);
|
||||
|
@ -945,8 +945,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
|
|||
/* Other scheduled or pending requests need to be canceled. */
|
||||
pm_runtime_cancel_pending(dev);
|
||||
|
||||
expires = ktime_add(ktime_get(), ms_to_ktime(delay));
|
||||
dev->power.timer_expires = ktime_to_ns(expires);
|
||||
expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
|
||||
dev->power.timer_expires = expires;
|
||||
dev->power.timer_autosuspends = 0;
|
||||
hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
|
|||
|
||||
static inline void pm_runtime_mark_last_busy(struct device *dev)
|
||||
{
|
||||
WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get()));
|
||||
WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
|
||||
}
|
||||
|
||||
static inline bool pm_runtime_is_irq_safe(struct device *dev)
|
||||
|
|
Loading…
Reference in New Issue
Block a user