Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2003 Jerome Marchand, Bull S.A. |
| 3 | * Cleaned up by David Mosberger-Tang <davidm@hpl.hp.com> |
| 4 | * |
| 5 | * This file is released under the GPLv2, or at your option any later version. |
| 6 | * |
| 7 | * ia64 version of "atomic_dec_and_lock()" using the atomic "cmpxchg" instruction. This |
| 8 | * code is an adaptation of the x86 version of "atomic_dec_and_lock()". |
| 9 | */ |
| 10 | |
| 11 | #include <linux/compiler.h> |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/spinlock.h> |
| 14 | #include <asm/atomic.h> |
| 15 | |
| 16 | /* |
| 17 | * Decrement REFCOUNT and if the count reaches zero, acquire the spinlock. Both of these |
| 18 | * operations have to be done atomically, so that the count doesn't drop to zero without |
| 19 | * acquiring the spinlock first. |
| 20 | */ |
| 21 | int |
| 22 | _atomic_dec_and_lock (atomic_t *refcount, spinlock_t *lock) |
| 23 | { |
| 24 | int old, new; |
| 25 | |
| 26 | do { |
| 27 | old = atomic_read(refcount); |
| 28 | new = old - 1; |
| 29 | |
| 30 | if (unlikely (old == 1)) { |
| 31 | /* oops, we may be decrementing to zero, do it the slow way... */ |
| 32 | spin_lock(lock); |
| 33 | if (atomic_dec_and_test(refcount)) |
| 34 | return 1; |
| 35 | spin_unlock(lock); |
| 36 | return 0; |
| 37 | } |
| 38 | } while (cmpxchg(&refcount->counter, old, new) != old); |
| 39 | return 0; |
| 40 | } |
| 41 | |
| 42 | EXPORT_SYMBOL(_atomic_dec_and_lock); |