blob: e26278576b31148bd021a52c0980f921bfaa233a [file] [log] [blame]
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -05001#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#include <linux/spinlock.h>
Arun Sharma600634972011-07-26 16:09:06 -07003#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
David S. Miller4db2ce02005-09-14 21:47:01 -07005/*
6 * This is an implementation of the notion of "decrement a
7 * reference count, and return locked if it decremented to zero".
8 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * NOTE NOTE NOTE! This is _not_ equivalent to
10 *
11 * if (atomic_dec_and_test(&atomic)) {
12 * spin_lock(&lock);
13 * return 1;
14 * }
15 * return 0;
16 *
17 * because the spin-lock and the decrement must be
18 * "atomic".
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070020int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
21{
Nick Piggina57004e12006-01-08 01:02:19 -080022 /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
23 if (atomic_add_unless(atomic, -1, 1))
24 return 0;
Jan Blunck417dcdf2009-06-16 15:33:33 -070025
Nick Piggina57004e12006-01-08 01:02:19 -080026 /* Otherwise do it the slow way */
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 spin_lock(lock);
28 if (atomic_dec_and_test(atomic))
29 return 1;
30 spin_unlock(lock);
31 return 0;
32}
33
34EXPORT_SYMBOL(_atomic_dec_and_lock);