Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright IBM Corp. 1999, 2016 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, * Denis Joseph Barrow, * Arnd Bergmann, */ #ifndef __ARCH_S390_ATOMIC__ #define __ARCH_S390_ATOMIC__ #include <linux/compiler.h> #include <linux/types.h> #include <asm/atomic_ops.h> #include <asm/barrier.h> #include <asm/cmpxchg.h> #define ATOMIC_INIT(i) { (i) } static inline int atomic_read(const atomic_t *v) { int c; asm volatile( " l %0,%1\n" : "=d" (c) : "Q" (v->counter)); return c; } static inline void atomic_set(atomic_t *v, int i) { asm volatile( " st %1,%0\n" : "=Q" (v->counter) : "d" (i)); } static inline int atomic_add_return(int i, atomic_t *v) { return __atomic_add_barrier(i, &v->counter) + i; } static inline int atomic_fetch_add(int i, atomic_t *v) { return __atomic_add_barrier(i, &v->counter); } static inline void atomic_add(int i, atomic_t *v) { #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { __atomic_add_const(i, &v->counter); return; } #endif __atomic_add(i, &v->counter); } #define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v) #define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v) #define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v) #define ATOMIC_OPS(op) \ static inline void atomic_##op(int i, atomic_t *v) \ { \ __atomic_##op(i, &v->counter); \ } \ static inline int atomic_fetch_##op(int i, atomic_t *v) \ { \ return __atomic_##op##_barrier(i, &v->counter); \ } ATOMIC_OPS(and) ATOMIC_OPS(or) ATOMIC_OPS(xor) #undef ATOMIC_OPS #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { return __atomic_cmpxchg(&v->counter, old, new); } #define ATOMIC64_INIT(i) { (i) } static inline s64 atomic64_read(const atomic64_t *v) { s64 c; asm volatile( " lg %0,%1\n" : "=d" (c) : "Q" (v->counter)); return c; } static inline void atomic64_set(atomic64_t *v, s64 i) { asm volatile( " stg %1,%0\n" : "=Q" (v->counter) : "d" (i)); } static inline s64 atomic64_add_return(s64 i, atomic64_t *v) { return __atomic64_add_barrier(i, (long *)&v->counter) + i; } static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { return __atomic64_add_barrier(i, (long *)&v->counter); } static inline void atomic64_add(s64 i, atomic64_t *v) { #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { __atomic64_add_const(i, (long *)&v->counter); return; } #endif __atomic64_add(i, (long *)&v->counter); } #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { return __atomic64_cmpxchg((long *)&v->counter, old, new); } #define ATOMIC64_OPS(op) \ static inline void atomic64_##op(s64 i, atomic64_t *v) \ { \ __atomic64_##op(i, (long *)&v->counter); \ } \ static inline long atomic64_fetch_##op(s64 i, atomic64_t *v) \ { \ return __atomic64_##op##_barrier(i, (long *)&v->counter); \ } ATOMIC64_OPS(and) ATOMIC64_OPS(or) ATOMIC64_OPS(xor) #undef ATOMIC64_OPS #define atomic64_sub_return(_i, _v) atomic64_add_return(-(s64)(_i), _v) #define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(s64)(_i), _v) #define atomic64_sub(_i, _v) atomic64_add(-(s64)(_i), _v) #endif /* __ARCH_S390_ATOMIC__ */ |