blob: fe297b599b0a1caa8696b8b7c5a5ed1ddab16556 [file] [log] [blame]
Peter Zijlstra93ea02b2013-11-06 14:57:36 +01001/*
2 * Generic barrier definitions, originally based on MN10300 definitions.
David Howells885df912012-03-28 18:30:03 +01003 *
4 * It should be possible to use these on really simple architectures,
5 * but it serves more as a starting point for new ports.
6 *
7 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
8 * Written by David Howells (dhowells@redhat.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public Licence
12 * as published by the Free Software Foundation; either version
13 * 2 of the Licence, or (at your option) any later version.
14 */
15#ifndef __ASM_GENERIC_BARRIER_H
16#define __ASM_GENERIC_BARRIER_H
17
18#ifndef __ASSEMBLY__
19
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010020#include <linux/compiler.h>
21
22#ifndef nop
23#define nop() asm volatile ("nop")
24#endif
David Howells885df912012-03-28 18:30:03 +010025
26/*
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010027 * Force strict CPU ordering. And yes, this is required on UP too when we're
28 * talking to devices.
David Howells885df912012-03-28 18:30:03 +010029 *
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010030 * Fall back to compiler barriers if nothing better is provided.
David Howells885df912012-03-28 18:30:03 +010031 */
32
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010033#ifndef mb
34#define mb() barrier()
35#endif
36
37#ifndef rmb
David Howells885df912012-03-28 18:30:03 +010038#define rmb() mb()
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010039#endif
40
41#ifndef wmb
42#define wmb() mb()
43#endif
44
Alexander Duyck1077fa32014-12-11 15:02:06 -080045#ifndef dma_rmb
46#define dma_rmb() rmb()
47#endif
48
49#ifndef dma_wmb
50#define dma_wmb() wmb()
51#endif
52
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010053#ifndef read_barrier_depends
54#define read_barrier_depends() do { } while (0)
55#endif
David Howells885df912012-03-28 18:30:03 +010056
Michael S. Tsirkina9e42522015-12-27 13:50:07 +020057#ifndef __smp_mb
58#define __smp_mb() mb()
59#endif
60
61#ifndef __smp_rmb
62#define __smp_rmb() rmb()
63#endif
64
65#ifndef __smp_wmb
66#define __smp_wmb() wmb()
67#endif
68
69#ifndef __smp_read_barrier_depends
70#define __smp_read_barrier_depends() read_barrier_depends()
71#endif
72
David Howells885df912012-03-28 18:30:03 +010073#ifdef CONFIG_SMP
Vineet Gupta470c27e2015-03-11 21:42:02 +053074
75#ifndef smp_mb
Michael S. Tsirkina9e42522015-12-27 13:50:07 +020076#define smp_mb() __smp_mb()
Vineet Gupta470c27e2015-03-11 21:42:02 +053077#endif
78
79#ifndef smp_rmb
Michael S. Tsirkina9e42522015-12-27 13:50:07 +020080#define smp_rmb() __smp_rmb()
Vineet Gupta470c27e2015-03-11 21:42:02 +053081#endif
82
83#ifndef smp_wmb
Michael S. Tsirkina9e42522015-12-27 13:50:07 +020084#define smp_wmb() __smp_wmb()
Vineet Gupta470c27e2015-03-11 21:42:02 +053085#endif
86
87#ifndef smp_read_barrier_depends
Michael S. Tsirkina9e42522015-12-27 13:50:07 +020088#define smp_read_barrier_depends() __smp_read_barrier_depends()
Vineet Gupta470c27e2015-03-11 21:42:02 +053089#endif
90
Linus Torvalds0890a262015-07-01 09:24:26 -070091#else /* !CONFIG_SMP */
92
Vineet Gupta470c27e2015-03-11 21:42:02 +053093#ifndef smp_mb
David Howells885df912012-03-28 18:30:03 +010094#define smp_mb() barrier()
Vineet Gupta470c27e2015-03-11 21:42:02 +053095#endif
96
97#ifndef smp_rmb
David Howells885df912012-03-28 18:30:03 +010098#define smp_rmb() barrier()
Vineet Gupta470c27e2015-03-11 21:42:02 +053099#endif
100
101#ifndef smp_wmb
David Howells885df912012-03-28 18:30:03 +0100102#define smp_wmb() barrier()
Vineet Gupta470c27e2015-03-11 21:42:02 +0530103#endif
104
105#ifndef smp_read_barrier_depends
Peter Zijlstra93ea02b2013-11-06 14:57:36 +0100106#define smp_read_barrier_depends() do { } while (0)
David Howells885df912012-03-28 18:30:03 +0100107#endif
108
Linus Torvalds0890a262015-07-01 09:24:26 -0700109#endif /* CONFIG_SMP */
Vineet Gupta470c27e2015-03-11 21:42:02 +0530110
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200111#ifndef __smp_store_mb
112#define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
113#endif
114
115#ifndef __smp_mb__before_atomic
116#define __smp_mb__before_atomic() __smp_mb()
117#endif
118
119#ifndef __smp_mb__after_atomic
120#define __smp_mb__after_atomic() __smp_mb()
121#endif
122
123#ifndef __smp_store_release
124#define __smp_store_release(p, v) \
125do { \
126 compiletime_assert_atomic_type(*p); \
127 __smp_mb(); \
128 WRITE_ONCE(*p, v); \
129} while (0)
130#endif
131
132#ifndef __smp_load_acquire
133#define __smp_load_acquire(p) \
134({ \
135 typeof(*p) ___p1 = READ_ONCE(*p); \
136 compiletime_assert_atomic_type(*p); \
137 __smp_mb(); \
138 ___p1; \
139})
140#endif
141
142#ifdef CONFIG_SMP
143
Peter Zijlstrab92b8b32015-05-12 10:51:55 +0200144#ifndef smp_store_mb
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200145#define smp_store_mb(var, value) __smp_store_mb(var, value)
Peter Zijlstra93ea02b2013-11-06 14:57:36 +0100146#endif
David Howells885df912012-03-28 18:30:03 +0100147
Peter Zijlstrafebdbfe2014-02-06 18:16:07 +0100148#ifndef smp_mb__before_atomic
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200149#define smp_mb__before_atomic() __smp_mb__before_atomic()
Peter Zijlstrafebdbfe2014-02-06 18:16:07 +0100150#endif
151
152#ifndef smp_mb__after_atomic
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200153#define smp_mb__after_atomic() __smp_mb__after_atomic()
154#endif
155
156#ifndef smp_store_release
157#define smp_store_release(p, v) __smp_store_release(p, v)
158#endif
159
160#ifndef smp_load_acquire
161#define smp_load_acquire(p) __smp_load_acquire(p)
162#endif
163
164#else /* !CONFIG_SMP */
165
166#ifndef smp_store_mb
167#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
168#endif
169
170#ifndef smp_mb__before_atomic
171#define smp_mb__before_atomic() barrier()
172#endif
173
174#ifndef smp_mb__after_atomic
175#define smp_mb__after_atomic() barrier()
Peter Zijlstrafebdbfe2014-02-06 18:16:07 +0100176#endif
177
Michael S. Tsirkin57f7c032015-12-27 14:10:52 +0200178#ifndef smp_store_release
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100179#define smp_store_release(p, v) \
180do { \
181 compiletime_assert_atomic_type(*p); \
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200182 barrier(); \
Andrey Konovalov76695af2015-08-02 17:11:04 +0200183 WRITE_ONCE(*p, v); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100184} while (0)
Michael S. Tsirkin57f7c032015-12-27 14:10:52 +0200185#endif
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100186
Michael S. Tsirkin57f7c032015-12-27 14:10:52 +0200187#ifndef smp_load_acquire
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100188#define smp_load_acquire(p) \
189({ \
Andrey Konovalov76695af2015-08-02 17:11:04 +0200190 typeof(*p) ___p1 = READ_ONCE(*p); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100191 compiletime_assert_atomic_type(*p); \
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200192 barrier(); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100193 ___p1; \
194})
Michael S. Tsirkin57f7c032015-12-27 14:10:52 +0200195#endif
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100196
Peter Zijlstra726328d2016-05-26 10:35:03 +0200197#endif /* CONFIG_SMP */
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200198
Michael S. Tsirkin6a65d262015-12-27 18:23:01 +0200199/* Barriers for virtual machine guests when talking to an SMP host */
200#define virt_mb() __smp_mb()
201#define virt_rmb() __smp_rmb()
202#define virt_wmb() __smp_wmb()
203#define virt_read_barrier_depends() __smp_read_barrier_depends()
204#define virt_store_mb(var, value) __smp_store_mb(var, value)
205#define virt_mb__before_atomic() __smp_mb__before_atomic()
206#define virt_mb__after_atomic() __smp_mb__after_atomic()
207#define virt_store_release(p, v) __smp_store_release(p, v)
208#define virt_load_acquire(p) __smp_load_acquire(p)
209
Peter Zijlstra7cb45c02016-06-01 19:23:54 +0200210/**
211 * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
212 *
213 * A control dependency provides a LOAD->STORE order, the additional RMB
214 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
215 * aka. (load)-ACQUIRE.
216 *
217 * Architectures that do not do load speculation can have this be barrier().
218 */
219#ifndef smp_acquire__after_ctrl_dep
220#define smp_acquire__after_ctrl_dep() smp_rmb()
221#endif
222
223/**
224 * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
225 * @ptr: pointer to the variable to wait on
226 * @cond: boolean expression to wait for
227 *
228 * Equivalent to using smp_load_acquire() on the condition variable but employs
229 * the control dependency of the wait to reduce the barrier on many platforms.
230 *
231 * Due to C lacking lambda expressions we load the value of *ptr into a
232 * pre-named variable @VAL to be used in @cond.
233 */
234#ifndef smp_cond_load_acquire
235#define smp_cond_load_acquire(ptr, cond_expr) ({ \
236 typeof(ptr) __PTR = (ptr); \
237 typeof(*ptr) VAL; \
238 for (;;) { \
239 VAL = READ_ONCE(*__PTR); \
240 if (cond_expr) \
241 break; \
242 cpu_relax(); \
243 } \
244 smp_acquire__after_ctrl_dep(); \
245 VAL; \
246})
247#endif
248
David Howells885df912012-03-28 18:30:03 +0100249#endif /* !__ASSEMBLY__ */
250#endif /* __ASM_GENERIC_BARRIER_H */