forked from luck/tmp_suning_uos_patched
locking/atomic: Use s64 for atomic64
As a step towards making the atomic64 API use consistent types treewide, let's have the generic atomic64 implementation use s64 as the underlying type for atomic64_t, rather than long long, matching the generated headers. Otherwise, there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Arnd Bergmann <arnd@arndb.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: aou@eecs.berkeley.edu Cc: bp@alien8.de Cc: catalin.marinas@arm.com Cc: davem@davemloft.net Cc: fenghua.yu@intel.com Cc: heiko.carstens@de.ibm.com Cc: herbert@gondor.apana.org.au Cc: ink@jurassic.park.msu.ru Cc: jhogan@kernel.org Cc: linux@armlinux.org.uk Cc: mattst88@gmail.com Cc: mpe@ellerman.id.au Cc: palmer@sifive.com Cc: paul.burton@mips.com Cc: paulus@samba.org Cc: ralf@linux-mips.org Cc: rth@twiddle.net Cc: tony.luck@intel.com Cc: vgupta@synopsys.com Link: https://lkml.kernel.org/r/20190522132250.26499-4-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
982164d62a
commit
9255813d58
|
@ -10,24 +10,24 @@
|
|||
#include <linux/types.h>
|
||||
|
||||
typedef struct {
|
||||
long long counter;
|
||||
s64 counter;
|
||||
} atomic64_t;
|
||||
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
|
||||
extern long long atomic64_read(const atomic64_t *v);
|
||||
extern void atomic64_set(atomic64_t *v, long long i);
|
||||
extern s64 atomic64_read(const atomic64_t *v);
|
||||
extern void atomic64_set(atomic64_t *v, s64 i);
|
||||
|
||||
#define atomic64_set_release(v, i) atomic64_set((v), (i))
|
||||
|
||||
#define ATOMIC64_OP(op) \
|
||||
extern void atomic64_##op(long long a, atomic64_t *v);
|
||||
extern void atomic64_##op(s64 a, atomic64_t *v);
|
||||
|
||||
#define ATOMIC64_OP_RETURN(op) \
|
||||
extern long long atomic64_##op##_return(long long a, atomic64_t *v);
|
||||
extern s64 atomic64_##op##_return(s64 a, atomic64_t *v);
|
||||
|
||||
#define ATOMIC64_FETCH_OP(op) \
|
||||
extern long long atomic64_fetch_##op(long long a, atomic64_t *v);
|
||||
extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v);
|
||||
|
||||
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
|
||||
|
||||
|
@ -46,11 +46,11 @@ ATOMIC64_OPS(xor)
|
|||
#undef ATOMIC64_OP_RETURN
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
extern long long atomic64_dec_if_positive(atomic64_t *v);
|
||||
extern s64 atomic64_dec_if_positive(atomic64_t *v);
|
||||
#define atomic64_dec_if_positive atomic64_dec_if_positive
|
||||
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
|
||||
extern long long atomic64_xchg(atomic64_t *v, long long new);
|
||||
extern long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u);
|
||||
extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
|
||||
extern s64 atomic64_xchg(atomic64_t *v, s64 new);
|
||||
extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
|
||||
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
|
||||
|
||||
#endif /* _ASM_GENERIC_ATOMIC64_H */
|
||||
|
|
|
@ -42,11 +42,11 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
|
|||
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
|
||||
}
|
||||
|
||||
long long atomic64_read(const atomic64_t *v)
|
||||
s64 atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
unsigned long flags;
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
s64 val;
|
||||
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter;
|
||||
|
@ -55,7 +55,7 @@ long long atomic64_read(const atomic64_t *v)
|
|||
}
|
||||
EXPORT_SYMBOL(atomic64_read);
|
||||
|
||||
void atomic64_set(atomic64_t *v, long long i)
|
||||
void atomic64_set(atomic64_t *v, s64 i)
|
||||
{
|
||||
unsigned long flags;
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
|
@ -67,7 +67,7 @@ void atomic64_set(atomic64_t *v, long long i)
|
|||
EXPORT_SYMBOL(atomic64_set);
|
||||
|
||||
#define ATOMIC64_OP(op, c_op) \
|
||||
void atomic64_##op(long long a, atomic64_t *v) \
|
||||
void atomic64_##op(s64 a, atomic64_t *v) \
|
||||
{ \
|
||||
unsigned long flags; \
|
||||
raw_spinlock_t *lock = lock_addr(v); \
|
||||
|
@ -79,11 +79,11 @@ void atomic64_##op(long long a, atomic64_t *v) \
|
|||
EXPORT_SYMBOL(atomic64_##op);
|
||||
|
||||
#define ATOMIC64_OP_RETURN(op, c_op) \
|
||||
long long atomic64_##op##_return(long long a, atomic64_t *v) \
|
||||
s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
|
||||
{ \
|
||||
unsigned long flags; \
|
||||
raw_spinlock_t *lock = lock_addr(v); \
|
||||
long long val; \
|
||||
s64 val; \
|
||||
\
|
||||
raw_spin_lock_irqsave(lock, flags); \
|
||||
val = (v->counter c_op a); \
|
||||
|
@ -93,11 +93,11 @@ long long atomic64_##op##_return(long long a, atomic64_t *v) \
|
|||
EXPORT_SYMBOL(atomic64_##op##_return);
|
||||
|
||||
#define ATOMIC64_FETCH_OP(op, c_op) \
|
||||
long long atomic64_fetch_##op(long long a, atomic64_t *v) \
|
||||
s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
|
||||
{ \
|
||||
unsigned long flags; \
|
||||
raw_spinlock_t *lock = lock_addr(v); \
|
||||
long long val; \
|
||||
s64 val; \
|
||||
\
|
||||
raw_spin_lock_irqsave(lock, flags); \
|
||||
val = v->counter; \
|
||||
|
@ -130,11 +130,11 @@ ATOMIC64_OPS(xor, ^=)
|
|||
#undef ATOMIC64_OP_RETURN
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
long long atomic64_dec_if_positive(atomic64_t *v)
|
||||
s64 atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
unsigned long flags;
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
s64 val;
|
||||
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter - 1;
|
||||
|
@ -145,11 +145,11 @@ long long atomic64_dec_if_positive(atomic64_t *v)
|
|||
}
|
||||
EXPORT_SYMBOL(atomic64_dec_if_positive);
|
||||
|
||||
long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
|
||||
s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
|
||||
{
|
||||
unsigned long flags;
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
s64 val;
|
||||
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter;
|
||||
|
@ -160,11 +160,11 @@ long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
|
|||
}
|
||||
EXPORT_SYMBOL(atomic64_cmpxchg);
|
||||
|
||||
long long atomic64_xchg(atomic64_t *v, long long new)
|
||||
s64 atomic64_xchg(atomic64_t *v, s64 new)
|
||||
{
|
||||
unsigned long flags;
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
s64 val;
|
||||
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter;
|
||||
|
@ -174,11 +174,11 @@ long long atomic64_xchg(atomic64_t *v, long long new)
|
|||
}
|
||||
EXPORT_SYMBOL(atomic64_xchg);
|
||||
|
||||
long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u)
|
||||
s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
|
||||
{
|
||||
unsigned long flags;
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
s64 val;
|
||||
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter;
|
||||
|
|
Loading…
Reference in New Issue
Block a user