forked from luck/tmp_suning_uos_patched
04fc8bbcf5
DECLARE_MUTEX_LOCKED was used for semaphores used as completions and we've got rid of them. Well, except for one in libusual that the maintainer explicitly wants to keep as semaphore. So convert that useage to an explicit sema_init and kill of DECLARE_MUTEX_LOCKED so that new code is reminded to use a completion. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: "Satyam Sharma" <satyam.sharma@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
108 lines
2.4 KiB
C
108 lines
2.4 KiB
C
/*
|
|
* include/asm-s390/semaphore.h
|
|
*
|
|
* S390 version
|
|
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
|
*
|
|
* Derived from "include/asm-i386/semaphore.h"
|
|
* (C) Copyright 1996 Linus Torvalds
|
|
*/
|
|
|
|
#ifndef _S390_SEMAPHORE_H
|
|
#define _S390_SEMAPHORE_H
|
|
|
|
#include <asm/system.h>
|
|
#include <asm/atomic.h>
|
|
#include <linux/wait.h>
|
|
#include <linux/rwsem.h>
|
|
|
|
struct semaphore {
|
|
/*
|
|
* Note that any negative value of count is equivalent to 0,
|
|
* but additionally indicates that some process(es) might be
|
|
* sleeping on `wait'.
|
|
*/
|
|
atomic_t count;
|
|
wait_queue_head_t wait;
|
|
};
|
|
|
|
#define __SEMAPHORE_INITIALIZER(name,count) \
|
|
{ ATOMIC_INIT(count), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
|
|
|
|
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
|
|
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
|
|
|
|
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
|
|
|
|
static inline void sema_init (struct semaphore *sem, int val)
|
|
{
|
|
atomic_set(&sem->count, val);
|
|
init_waitqueue_head(&sem->wait);
|
|
}
|
|
|
|
static inline void init_MUTEX (struct semaphore *sem)
|
|
{
|
|
sema_init(sem, 1);
|
|
}
|
|
|
|
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
|
|
{
|
|
sema_init(sem, 0);
|
|
}
|
|
|
|
asmlinkage void __down(struct semaphore * sem);
|
|
asmlinkage int __down_interruptible(struct semaphore * sem);
|
|
asmlinkage int __down_trylock(struct semaphore * sem);
|
|
asmlinkage void __up(struct semaphore * sem);
|
|
|
|
static inline void down(struct semaphore * sem)
|
|
{
|
|
might_sleep();
|
|
if (atomic_dec_return(&sem->count) < 0)
|
|
__down(sem);
|
|
}
|
|
|
|
static inline int down_interruptible(struct semaphore * sem)
|
|
{
|
|
int ret = 0;
|
|
|
|
might_sleep();
|
|
if (atomic_dec_return(&sem->count) < 0)
|
|
ret = __down_interruptible(sem);
|
|
return ret;
|
|
}
|
|
|
|
static inline int down_trylock(struct semaphore * sem)
|
|
{
|
|
int old_val, new_val;
|
|
|
|
/*
|
|
* This inline assembly atomically implements the equivalent
|
|
* to the following C code:
|
|
* old_val = sem->count.counter;
|
|
* if ((new_val = old_val) > 0)
|
|
* sem->count.counter = --new_val;
|
|
* In the ppc code this is called atomic_dec_if_positive.
|
|
*/
|
|
asm volatile(
|
|
" l %0,0(%3)\n"
|
|
"0: ltr %1,%0\n"
|
|
" jle 1f\n"
|
|
" ahi %1,-1\n"
|
|
" cs %0,%1,0(%3)\n"
|
|
" jl 0b\n"
|
|
"1:"
|
|
: "=&d" (old_val), "=&d" (new_val), "=m" (sem->count.counter)
|
|
: "a" (&sem->count.counter), "m" (sem->count.counter)
|
|
: "cc", "memory");
|
|
return old_val <= 0;
|
|
}
|
|
|
|
static inline void up(struct semaphore * sem)
|
|
{
|
|
if (atomic_inc_return(&sem->count) <= 0)
|
|
__up(sem);
|
|
}
|
|
|
|
#endif
|