blob: e73822aa6e9a68131df955c0a6025c7aa5556fc6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#include <linux/module.h>
2#include <linux/spinlock.h>
3#include <asm/atomic.h>
4
David S. Miller4db2ce02005-09-14 21:47:01 -07005/*
6 * This is an implementation of the notion of "decrement a
7 * reference count, and return locked if it decremented to zero".
8 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * NOTE NOTE NOTE! This is _not_ equivalent to
10 *
11 * if (atomic_dec_and_test(&atomic)) {
12 * spin_lock(&lock);
13 * return 1;
14 * }
15 * return 0;
16 *
17 * because the spin-lock and the decrement must be
18 * "atomic".
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070020int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
21{
Nick Piggina57004e12006-01-08 01:02:19 -080022 /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
23 if (atomic_add_unless(atomic, -1, 1))
24 return 0;
Jan Blunck417dcdf2009-06-16 15:33:33 -070025
Nick Piggina57004e12006-01-08 01:02:19 -080026 /* Otherwise do it the slow way */
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 spin_lock(lock);
28 if (atomic_dec_and_test(atomic))
29 return 1;
30 spin_unlock(lock);
31 return 0;
32}
33
34EXPORT_SYMBOL(_atomic_dec_and_lock);