android_kernel_xiaomi_sm8350/include/asm-generic/atomic.h
Peter Zijlstra febdbfe8a9 arch: Prepare for smp_mb__{before,after}_atomic()
Since the smp_mb__{before,after}*() ops are fundamentally dependent on
how an arch can implement atomics it doesn't make sense to have 3
variants of them. They must all be the same.

Furthermore, the 3 variants suggest they're only valid for those 3
atomic ops, while we have many more where they could be applied.

So move away from
smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}() and reduce the
interface to just the two: smp_mb__{before,after}_atomic().

This patch prepares the way by introducing default implementations in
asm-generic/barrier.h that default to a full barrier and providing
__deprecated inlines for the previous 6 barriers if they're not
provided by the arch.

This should allow for a mostly painless transition (lots of deprecated
warns in the interim).

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/n/tip-wr59327qdyi9mbzn6x937s4e@git.kernel.org
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: "Chen, Gong" <gong.chen@linux.intel.com>
Cc: John Sullivan <jsrhbz@kanargh.force9.co.uk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mauro Carvalho Chehab <m.chehab@samsung.com>
Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2014-04-18 11:40:30 +02:00

188 lines
4.4 KiB
C

/*
* Generic C implementation of atomic counter operations. Usable on
* UP systems only. Do not include in machine independent code.
*
* Originally implemented for MN10300.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef __ASM_GENERIC_ATOMIC_H
#define __ASM_GENERIC_ATOMIC_H
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#ifdef CONFIG_SMP
/* Force people to define core atomics */
# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \
!defined(atomic_clear_mask) || !defined(atomic_set_mask)
# error "SMP requires a little arch-specific magic"
# endif
#endif
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
#define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
#ifndef atomic_read
#define atomic_read(v) (*(volatile int *)&(v)->counter)
#endif
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
#define atomic_set(v, i) (((v)->counter) = (i))
#include <linux/irqflags.h>
/**
* atomic_add_return - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and returns the result
*/
#ifndef atomic_add_return
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
int temp;
raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
temp = v->counter;
temp += i;
v->counter = temp;
raw_local_irq_restore(flags);
return temp;
}
#endif
/**
* atomic_sub_return - subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns the result
*/
#ifndef atomic_sub_return
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long flags;
int temp;
raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
temp = v->counter;
temp -= i;
v->counter = temp;
raw_local_irq_restore(flags);
return temp;
}
#endif
static inline int atomic_add_negative(int i, atomic_t *v)
{
return atomic_add_return(i, v) < 0;
}
static inline void atomic_add(int i, atomic_t *v)
{
atomic_add_return(i, v);
}
static inline void atomic_sub(int i, atomic_t *v)
{
atomic_sub_return(i, v);
}
static inline void atomic_inc(atomic_t *v)
{
atomic_add_return(1, v);
}
static inline void atomic_dec(atomic_t *v)
{
atomic_sub_return(1, v);
}
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
c = old;
return c;
}
/**
* atomic_clear_mask - Atomically clear bits in atomic variable
* @mask: Mask of the bits to be cleared
* @v: pointer of type atomic_t
*
* Atomically clears the bits set in @mask from @v
*/
#ifndef atomic_clear_mask
static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
{
unsigned long flags;
mask = ~mask;
raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
v->counter &= mask;
raw_local_irq_restore(flags);
}
#endif
/**
* atomic_set_mask - Atomically set bits in atomic variable
* @mask: Mask of the bits to be set
* @v: pointer of type atomic_t
*
* Atomically sets the bits set in @mask in @v
*/
#ifndef atomic_set_mask
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
v->counter |= mask;
raw_local_irq_restore(flags);
}
#endif
#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_ATOMIC_H */