Lines Matching +full:3 +full:v

27 static __inline__ int arch_atomic_read(const atomic_t *v)  in arch_atomic_read()  argument
33 __asm__ __volatile__("lwz %0,0(%1)" : "=r"(t) : "b"(&v->counter)); in arch_atomic_read()
35 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter)); in arch_atomic_read()
40 static __inline__ void arch_atomic_set(atomic_t *v, int i) in arch_atomic_set() argument
44 __asm__ __volatile__("stw %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter)); in arch_atomic_set()
46 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i)); in arch_atomic_set()
50 static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
55 "1: lwarx %0,0,%3 # atomic_" #op "\n" \
57 " stwcx. %0,0,%3 \n" \
59 : "=&r" (t), "+m" (v->counter) \
60 : "r"#sign (a), "r" (&v->counter) \
65 static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
70 "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
72 " stwcx. %0,0,%3\n" \
74 : "=&r" (t), "+m" (v->counter) \
75 : "r"#sign (a), "r" (&v->counter) \
82 static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
88 #asm_op "%I3" suffix " %1,%0,%3\n" \
91 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
92 : "r"#sign (a), "r" (&v->counter) \
132 * @v: pointer of type atomic_t
133 * @a: the amount to add to v...
134 * @u: ...unless v is equal to u.
136 * Atomically adds @a to @v, so long as it was not @u.
137 * Returns the old value of @v.
139 static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) in arch_atomic_fetch_add_unless() argument
146 cmpw 0,%0,%3 \n\ in arch_atomic_fetch_add_unless()
155 : "r" (&v->counter), "rI" (a), "r" (u) in arch_atomic_fetch_add_unless()
163 * Atomically test *v and decrement if it is greater than 0.
164 * The function returns the old value of *v minus 1, even if
165 * the atomic variable, v, was not decremented.
167 static __inline__ int arch_atomic_dec_if_positive(atomic_t *v) in arch_atomic_dec_if_positive() argument
182 : "r" (&v->counter) in arch_atomic_dec_if_positive()
193 static __inline__ s64 arch_atomic64_read(const atomic64_t *v) in arch_atomic64_read() argument
199 __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter)); in arch_atomic64_read()
201 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter)); in arch_atomic64_read()
206 static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i) in arch_atomic64_set() argument
210 __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter)); in arch_atomic64_set()
212 __asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i)); in arch_atomic64_set()
216 static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
221 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
223 " stdcx. %0,0,%3 \n" \
225 : "=&r" (t), "+m" (v->counter) \
226 : "r" (a), "r" (&v->counter) \
232 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
237 "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
239 " stdcx. %0,0,%3\n" \
241 : "=&r" (t), "+m" (v->counter) \
242 : "r" (a), "r" (&v->counter) \
250 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
256 #asm_op " %1,%3,%0\n" \
259 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
260 : "r" (a), "r" (&v->counter) \
298 static __inline__ void arch_atomic64_inc(atomic64_t *v) in ATOMIC64_OPS()
307 : "=&r" (t), "+m" (v->counter) in ATOMIC64_OPS()
308 : "r" (&v->counter) in ATOMIC64_OPS()
313 static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v) in arch_atomic64_inc_return_relaxed() argument
322 : "=&r" (t), "+m" (v->counter) in arch_atomic64_inc_return_relaxed()
323 : "r" (&v->counter) in arch_atomic64_inc_return_relaxed()
329 static __inline__ void arch_atomic64_dec(atomic64_t *v) in arch_atomic64_dec() argument
338 : "=&r" (t), "+m" (v->counter) in arch_atomic64_dec()
339 : "r" (&v->counter) in arch_atomic64_dec()
344 static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v) in arch_atomic64_dec_return_relaxed() argument
353 : "=&r" (t), "+m" (v->counter) in arch_atomic64_dec_return_relaxed()
354 : "r" (&v->counter) in arch_atomic64_dec_return_relaxed()
364 * Atomically test *v and decrement if it is greater than 0.
365 * The function returns the old value of *v minus 1.
367 static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v) in arch_atomic64_dec_if_positive() argument
381 : "r" (&v->counter) in arch_atomic64_dec_if_positive()
390 * @v: pointer of type atomic64_t
391 * @a: the amount to add to v...
392 * @u: ...unless v is equal to u.
394 * Atomically adds @a to @v, so long as it was not @u.
395 * Returns the old value of @v.
397 static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) in arch_atomic64_fetch_add_unless() argument
404 cmpd 0,%0,%3 \n\ in arch_atomic64_fetch_add_unless()
413 : "r" (&v->counter), "r" (a), "r" (u) in arch_atomic64_fetch_add_unless()
422 * @v: pointer of type atomic64_t
424 * Atomically increments @v by 1, so long as @v is non-zero.
425 * Returns non-zero if @v was non-zero, and zero otherwise.
427 static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v) in arch_atomic64_inc_not_zero() argument
443 : "r" (&v->counter) in arch_atomic64_inc_not_zero()
448 #define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v)) argument