forked from luck/tmp_suning_uos_patched
25d8d4eeca
- Add support for (optionally) using queued spinlocks & rwlocks. - Support for a new faster system call ABI using the scv instruction on Power9 or later. - Drop support for the PROT_SAO mmap/mprotect flag as it will be unsupported on Power10 and future processors, leaving us with no way to implement the functionality it requests. This risks breaking userspace, though we believe it is unused in practice. - A bug fix for, and then the removal of, our custom stack expansion checking. We now allow stack expansion up to the rlimit, like other architectures. - Remove the remnants of our (previously disabled) topology update code, which tried to react to NUMA layout changes on virtualised systems, but was prone to crashes and other problems. - Add PMU support for Power10 CPUs. - A change to our signal trampoline so that we don't unbalance the link stack (branch return predictor) in the signal delivery path. - Lots of other cleanups, refactorings, smaller features and so on as usual. Thanks to: Abhishek Goel, Alastair D'Silva, Alexander A. Klimov, Alexey Kardashevskiy, Alistair Popple, Andrew Donnellan, Aneesh Kumar K.V, Anju T Sudhakar, Anton Blanchard, Arnd Bergmann, Athira Rajeev, Balamuruhan S, Bharata B Rao, Bill Wendling, Bin Meng, Cédric Le Goater, Chris Packham, Christophe Leroy, Christoph Hellwig, Daniel Axtens, Dan Williams, David Lamparter, Desnes A. Nunes do Rosario, Erhard F., Finn Thain, Frederic Barrat, Ganesh Goudar, Gautham R. Shenoy, Geoff Levand, Greg Kurz, Gustavo A. R. Silva, Hari Bathini, Harish, Imre Kaloz, Joel Stanley, Joe Perches, John Crispin, Jordan Niethe, Kajol Jain, Kamalesh Babulal, Kees Cook, Laurent Dufour, Leonardo Bras, Li RongQing, Madhavan Srinivasan, Mahesh Salgaonkar, Mark Cave-Ayland, Michal Suchanek, Milton Miller, Mimi Zohar, Murilo Opsfelder Araujo, Nathan Chancellor, Nathan Lynch, Naveen N. Rao, Nayna Jain, Nicholas Piggin, Oliver O'Halloran, Palmer Dabbelt, Pedro Miraglia Franco de Carvalho, Philippe Bergheaud, Pingfan Liu, Pratik Rajesh Sampat, Qian Cai, Qinglang Miao, Randy Dunlap, Ravi Bangoria, Sachin Sant, Sam Bobroff, Sandipan Das, Santosh Sivaraj, Satheesh Rajendran, Shirisha Ganta, Sourabh Jain, Srikar Dronamraju, Stan Johnson, Stephen Rothwell, Thadeu Lima de Souza Cascardo, Thiago Jung Bauermann, Tom Lane, Vaibhav Jain, Vladis Dronov, Wei Yongjun, Wen Xiong, YueHaibing. -----BEGIN PGP SIGNATURE----- iQJHBAABCAAxFiEEJFGtCPCthwEv2Y/bUevqPMjhpYAFAl8tOxATHG1wZUBlbGxl cm1hbi5pZC5hdQAKCRBR6+o8yOGlgDQfEAClXHWf6hnxB84bEu39D51NkVotL1IG BRWFvyix+xHuUkHIouBPAAMl6ngY5X6wkYd+Z+CY9zHNtdSDoVlJE30YXdMQA/dE L/rYxR1884yGR/uU/3wusboO68ReXwcKQPmKOymUfh0zH7ujyJsSWLpXFK1YDC5d 2TVVTi0Q+P5ucMHDh0L+AHirIxZvtZSp43+J7xLtywsj+XAxJWCTGo5WCJbdgbCA Qbv3aOkVyUa3EgsbdM/STPpv82ebqT+PHxeSIO4Jw6ZODtKRH0R5YsWCApuY9eZ+ ebY9RLmgv9ZAhJqB2fv9A5NDcMoGpZNmjM7HrWpXwULKQpkBGHCzJ9FcSdHVMOx8 nbVMFjt4uzLwV1w8lFYslQ2tNH/uH2o9BlryV1RLpiiKokDAJO/NOsWN9y0u/I4J EmAM5DSX2LgVvvas96IlGK8KX4xkOkf8FLX/H5UDvvAfloH8J4CZXk/CWCab/nqY KEHPnMmYvQZ1w9SzyZg9sO/1p6Bl1Gmm75Jv2F1lBiRW/42VcGBI/qLsJ4lC59Fc KbwufYNYYG38wbxDLW1HAPJhRonxIcaZj3EEqk7aTiLZ55nNbu8e2k32CpNXTGqt npOhzJHimcq7L6+878ZW+xpbZwogIEUdRSsmwb6aT8za3ShnYwSA2Q3LYxh9xyGH j3GifvPq6Efp3Q== =QMY1 -----END PGP SIGNATURE----- Merge tag 'powerpc-5.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc updates from Michael Ellerman: - Add support for (optionally) using queued spinlocks & rwlocks. - Support for a new faster system call ABI using the scv instruction on Power9 or later. - Drop support for the PROT_SAO mmap/mprotect flag as it will be unsupported on Power10 and future processors, leaving us with no way to implement the functionality it requests. This risks breaking userspace, though we believe it is unused in practice. - A bug fix for, and then the removal of, our custom stack expansion checking. We now allow stack expansion up to the rlimit, like other architectures. - Remove the remnants of our (previously disabled) topology update code, which tried to react to NUMA layout changes on virtualised systems, but was prone to crashes and other problems. - Add PMU support for Power10 CPUs. - A change to our signal trampoline so that we don't unbalance the link stack (branch return predictor) in the signal delivery path. - Lots of other cleanups, refactorings, smaller features and so on as usual. Thanks to: Abhishek Goel, Alastair D'Silva, Alexander A. Klimov, Alexey Kardashevskiy, Alistair Popple, Andrew Donnellan, Aneesh Kumar K.V, Anju T Sudhakar, Anton Blanchard, Arnd Bergmann, Athira Rajeev, Balamuruhan S, Bharata B Rao, Bill Wendling, Bin Meng, Cédric Le Goater, Chris Packham, Christophe Leroy, Christoph Hellwig, Daniel Axtens, Dan Williams, David Lamparter, Desnes A. Nunes do Rosario, Erhard F., Finn Thain, Frederic Barrat, Ganesh Goudar, Gautham R. Shenoy, Geoff Levand, Greg Kurz, Gustavo A. R. Silva, Hari Bathini, Harish, Imre Kaloz, Joel Stanley, Joe Perches, John Crispin, Jordan Niethe, Kajol Jain, Kamalesh Babulal, Kees Cook, Laurent Dufour, Leonardo Bras, Li RongQing, Madhavan Srinivasan, Mahesh Salgaonkar, Mark Cave-Ayland, Michal Suchanek, Milton Miller, Mimi Zohar, Murilo Opsfelder Araujo, Nathan Chancellor, Nathan Lynch, Naveen N. Rao, Nayna Jain, Nicholas Piggin, Oliver O'Halloran, Palmer Dabbelt, Pedro Miraglia Franco de Carvalho, Philippe Bergheaud, Pingfan Liu, Pratik Rajesh Sampat, Qian Cai, Qinglang Miao, Randy Dunlap, Ravi Bangoria, Sachin Sant, Sam Bobroff, Sandipan Das, Santosh Sivaraj, Satheesh Rajendran, Shirisha Ganta, Sourabh Jain, Srikar Dronamraju, Stan Johnson, Stephen Rothwell, Thadeu Lima de Souza Cascardo, Thiago Jung Bauermann, Tom Lane, Vaibhav Jain, Vladis Dronov, Wei Yongjun, Wen Xiong, YueHaibing. * tag 'powerpc-5.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (337 commits) selftests/powerpc: Fix pkey syscall redefinitions powerpc: Fix circular dependency between percpu.h and mmu.h powerpc/powernv/sriov: Fix use of uninitialised variable selftests/powerpc: Skip vmx/vsx/tar/etc tests on older CPUs powerpc/40x: Fix assembler warning about r0 powerpc/papr_scm: Add support for fetching nvdimm 'fuel-gauge' metric powerpc/papr_scm: Fetch nvdimm performance stats from PHYP cpuidle: pseries: Fixup exit latency for CEDE(0) cpuidle: pseries: Add function to parse extended CEDE records cpuidle: pseries: Set the latency-hint before entering CEDE selftests/powerpc: Fix online CPU selection powerpc/perf: Consolidate perf_callchain_user_[64|32]() powerpc/pseries/hotplug-cpu: Remove double free in error path powerpc/pseries/mobility: Add pr_debug() for device tree changes powerpc/pseries/mobility: Set pr_fmt() powerpc/cacheinfo: Warn if cache object chain becomes unordered powerpc/cacheinfo: Improve diagnostics about malformed cache lists powerpc/cacheinfo: Use name@unit instead of full DT path in debug messages powerpc/cacheinfo: Set pr_fmt() powerpc: fix function annotations to avoid section mismatch warnings with gcc-10 ...
122 lines
3.3 KiB
C
122 lines
3.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* Queued spinlock
|
|
*
|
|
* (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
|
|
* (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
|
|
*
|
|
* Authors: Waiman Long <waiman.long@hpe.com>
|
|
*/
|
|
#ifndef __ASM_GENERIC_QSPINLOCK_H
|
|
#define __ASM_GENERIC_QSPINLOCK_H
|
|
|
|
#include <asm-generic/qspinlock_types.h>
|
|
#include <linux/atomic.h>
|
|
|
|
#ifndef queued_spin_is_locked
|
|
/**
|
|
* queued_spin_is_locked - is the spinlock locked?
|
|
* @lock: Pointer to queued spinlock structure
|
|
* Return: 1 if it is locked, 0 otherwise
|
|
*/
|
|
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
|
|
{
|
|
/*
|
|
* Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
|
|
* isn't immediately observable.
|
|
*/
|
|
return atomic_read(&lock->val);
|
|
}
|
|
#endif
|
|
|
|
/**
|
|
* queued_spin_value_unlocked - is the spinlock structure unlocked?
|
|
* @lock: queued spinlock structure
|
|
* Return: 1 if it is unlocked, 0 otherwise
|
|
*
|
|
* N.B. Whenever there are tasks waiting for the lock, it is considered
|
|
* locked wrt the lockref code to avoid lock stealing by the lockref
|
|
* code and change things underneath the lock. This also allows some
|
|
* optimizations to be applied without conflict with lockref.
|
|
*/
|
|
static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
|
|
{
|
|
return !atomic_read(&lock.val);
|
|
}
|
|
|
|
/**
|
|
* queued_spin_is_contended - check if the lock is contended
|
|
* @lock : Pointer to queued spinlock structure
|
|
* Return: 1 if lock contended, 0 otherwise
|
|
*/
|
|
static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
|
|
{
|
|
return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
|
|
}
|
|
/**
|
|
* queued_spin_trylock - try to acquire the queued spinlock
|
|
* @lock : Pointer to queued spinlock structure
|
|
* Return: 1 if lock acquired, 0 if failed
|
|
*/
|
|
static __always_inline int queued_spin_trylock(struct qspinlock *lock)
|
|
{
|
|
u32 val = atomic_read(&lock->val);
|
|
|
|
if (unlikely(val))
|
|
return 0;
|
|
|
|
return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
|
|
}
|
|
|
|
extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
|
|
|
|
#ifndef queued_spin_lock
|
|
/**
|
|
* queued_spin_lock - acquire a queued spinlock
|
|
* @lock: Pointer to queued spinlock structure
|
|
*/
|
|
static __always_inline void queued_spin_lock(struct qspinlock *lock)
|
|
{
|
|
u32 val = 0;
|
|
|
|
if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
|
|
return;
|
|
|
|
queued_spin_lock_slowpath(lock, val);
|
|
}
|
|
#endif
|
|
|
|
#ifndef queued_spin_unlock
|
|
/**
|
|
* queued_spin_unlock - release a queued spinlock
|
|
* @lock : Pointer to queued spinlock structure
|
|
*/
|
|
static __always_inline void queued_spin_unlock(struct qspinlock *lock)
|
|
{
|
|
/*
|
|
* unlock() needs release semantics:
|
|
*/
|
|
smp_store_release(&lock->locked, 0);
|
|
}
|
|
#endif
|
|
|
|
#ifndef virt_spin_lock
|
|
static __always_inline bool virt_spin_lock(struct qspinlock *lock)
|
|
{
|
|
return false;
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Remapping spinlock architecture specific functions to the corresponding
|
|
* queued spinlock functions.
|
|
*/
|
|
#define arch_spin_is_locked(l) queued_spin_is_locked(l)
|
|
#define arch_spin_is_contended(l) queued_spin_is_contended(l)
|
|
#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
|
|
#define arch_spin_lock(l) queued_spin_lock(l)
|
|
#define arch_spin_trylock(l) queued_spin_trylock(l)
|
|
#define arch_spin_unlock(l) queued_spin_unlock(l)
|
|
|
|
#endif /* __ASM_GENERIC_QSPINLOCK_H */
|