90 lines
		
	
	
		
			2.0 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			90 lines
		
	
	
		
			2.0 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/* SPDX-License-Identifier: GPL-2.0
 | 
						|
 *
 | 
						|
 * include/asm-sh/spinlock-cas.h
 | 
						|
 *
 | 
						|
 * Copyright (C) 2015 SEI
 | 
						|
 */
 | 
						|
#ifndef __ASM_SH_SPINLOCK_CAS_H
 | 
						|
#define __ASM_SH_SPINLOCK_CAS_H
 | 
						|
 | 
						|
#include <asm/barrier.h>
 | 
						|
#include <asm/processor.h>
 | 
						|
 | 
						|
static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new)
 | 
						|
{
 | 
						|
	__asm__ __volatile__("cas.l %1,%0,@r0"
 | 
						|
		: "+r"(new)
 | 
						|
		: "r"(old), "z"(p)
 | 
						|
		: "t", "memory" );
 | 
						|
	return new;
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * Your basic SMP spinlocks, allowing only a single CPU anywhere
 | 
						|
 */
 | 
						|
 | 
						|
#define arch_spin_is_locked(x)		((x)->lock <= 0)
 | 
						|
 | 
						|
static inline void arch_spin_lock(arch_spinlock_t *lock)
 | 
						|
{
 | 
						|
	while (!__sl_cas(&lock->lock, 1, 0));
 | 
						|
}
 | 
						|
 | 
						|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
 | 
						|
{
 | 
						|
	__sl_cas(&lock->lock, 0, 1);
 | 
						|
}
 | 
						|
 | 
						|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
 | 
						|
{
 | 
						|
	return __sl_cas(&lock->lock, 1, 0);
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * Read-write spinlocks, allowing multiple readers but only one writer.
 | 
						|
 *
 | 
						|
 * NOTE! it is quite common to have readers in interrupts but no interrupt
 | 
						|
 * writers. For those circumstances we can "mix" irq-safe locks - any writer
 | 
						|
 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
 | 
						|
 * read-locks.
 | 
						|
 */
 | 
						|
 | 
						|
static inline void arch_read_lock(arch_rwlock_t *rw)
 | 
						|
{
 | 
						|
	unsigned old;
 | 
						|
	do old = rw->lock;
 | 
						|
	while (!old || __sl_cas(&rw->lock, old, old-1) != old);
 | 
						|
}
 | 
						|
 | 
						|
static inline void arch_read_unlock(arch_rwlock_t *rw)
 | 
						|
{
 | 
						|
	unsigned old;
 | 
						|
	do old = rw->lock;
 | 
						|
	while (__sl_cas(&rw->lock, old, old+1) != old);
 | 
						|
}
 | 
						|
 | 
						|
static inline void arch_write_lock(arch_rwlock_t *rw)
 | 
						|
{
 | 
						|
	while (__sl_cas(&rw->lock, RW_LOCK_BIAS, 0) != RW_LOCK_BIAS);
 | 
						|
}
 | 
						|
 | 
						|
static inline void arch_write_unlock(arch_rwlock_t *rw)
 | 
						|
{
 | 
						|
	__sl_cas(&rw->lock, 0, RW_LOCK_BIAS);
 | 
						|
}
 | 
						|
 | 
						|
static inline int arch_read_trylock(arch_rwlock_t *rw)
 | 
						|
{
 | 
						|
	unsigned old;
 | 
						|
	do old = rw->lock;
 | 
						|
	while (old && __sl_cas(&rw->lock, old, old-1) != old);
 | 
						|
	return !!old;
 | 
						|
}
 | 
						|
 | 
						|
static inline int arch_write_trylock(arch_rwlock_t *rw)
 | 
						|
{
 | 
						|
	return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS;
 | 
						|
}
 | 
						|
 | 
						|
#endif /* __ASM_SH_SPINLOCK_CAS_H */
 |