blob: 5a36ef70f6f4be31511ee879bdda403a34dce0b4 [file] [log] [blame]
Will Deacon48ec83b2015-05-27 17:25:59 +01001/*
2 * IOMMU API for ARM architected SMMUv3 implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2015 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 *
20 * This driver is powered by bad coffee and bombay mix.
21 */
22
23#include <linux/delay.h>
24#include <linux/err.h>
25#include <linux/interrupt.h>
26#include <linux/iommu.h>
27#include <linux/iopoll.h>
28#include <linux/module.h>
Marc Zyngier166bdbd2015-10-13 18:32:30 +010029#include <linux/msi.h>
Will Deacon48ec83b2015-05-27 17:25:59 +010030#include <linux/of.h>
31#include <linux/of_address.h>
Will Deacon941a8022015-08-11 16:25:10 +010032#include <linux/of_platform.h>
Will Deacon48ec83b2015-05-27 17:25:59 +010033#include <linux/pci.h>
34#include <linux/platform_device.h>
35
36#include "io-pgtable.h"
37
38/* MMIO registers */
39#define ARM_SMMU_IDR0 0x0
40#define IDR0_ST_LVL_SHIFT 27
41#define IDR0_ST_LVL_MASK 0x3
42#define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
43#define IDR0_STALL_MODEL (3 << 24)
44#define IDR0_TTENDIAN_SHIFT 21
45#define IDR0_TTENDIAN_MASK 0x3
46#define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
47#define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
48#define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
49#define IDR0_CD2L (1 << 19)
50#define IDR0_VMID16 (1 << 18)
51#define IDR0_PRI (1 << 16)
52#define IDR0_SEV (1 << 14)
53#define IDR0_MSI (1 << 13)
54#define IDR0_ASID16 (1 << 12)
55#define IDR0_ATS (1 << 10)
56#define IDR0_HYP (1 << 9)
57#define IDR0_COHACC (1 << 4)
58#define IDR0_TTF_SHIFT 2
59#define IDR0_TTF_MASK 0x3
60#define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
61#define IDR0_S1P (1 << 1)
62#define IDR0_S2P (1 << 0)
63
64#define ARM_SMMU_IDR1 0x4
65#define IDR1_TABLES_PRESET (1 << 30)
66#define IDR1_QUEUES_PRESET (1 << 29)
67#define IDR1_REL (1 << 28)
68#define IDR1_CMDQ_SHIFT 21
69#define IDR1_CMDQ_MASK 0x1f
70#define IDR1_EVTQ_SHIFT 16
71#define IDR1_EVTQ_MASK 0x1f
72#define IDR1_PRIQ_SHIFT 11
73#define IDR1_PRIQ_MASK 0x1f
74#define IDR1_SSID_SHIFT 6
75#define IDR1_SSID_MASK 0x1f
76#define IDR1_SID_SHIFT 0
77#define IDR1_SID_MASK 0x3f
78
79#define ARM_SMMU_IDR5 0x14
80#define IDR5_STALL_MAX_SHIFT 16
81#define IDR5_STALL_MAX_MASK 0xffff
82#define IDR5_GRAN64K (1 << 6)
83#define IDR5_GRAN16K (1 << 5)
84#define IDR5_GRAN4K (1 << 4)
85#define IDR5_OAS_SHIFT 0
86#define IDR5_OAS_MASK 0x7
87#define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
88#define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
89#define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
90#define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
91#define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
92#define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
93
94#define ARM_SMMU_CR0 0x20
95#define CR0_CMDQEN (1 << 3)
96#define CR0_EVTQEN (1 << 2)
97#define CR0_PRIQEN (1 << 1)
98#define CR0_SMMUEN (1 << 0)
99
100#define ARM_SMMU_CR0ACK 0x24
101
102#define ARM_SMMU_CR1 0x28
103#define CR1_SH_NSH 0
104#define CR1_SH_OSH 2
105#define CR1_SH_ISH 3
106#define CR1_CACHE_NC 0
107#define CR1_CACHE_WB 1
108#define CR1_CACHE_WT 2
109#define CR1_TABLE_SH_SHIFT 10
110#define CR1_TABLE_OC_SHIFT 8
111#define CR1_TABLE_IC_SHIFT 6
112#define CR1_QUEUE_SH_SHIFT 4
113#define CR1_QUEUE_OC_SHIFT 2
114#define CR1_QUEUE_IC_SHIFT 0
115
116#define ARM_SMMU_CR2 0x2c
117#define CR2_PTM (1 << 2)
118#define CR2_RECINVSID (1 << 1)
119#define CR2_E2H (1 << 0)
120
121#define ARM_SMMU_IRQ_CTRL 0x50
122#define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
Marc Zyngierccd63852015-07-15 11:55:18 +0100123#define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
Will Deacon48ec83b2015-05-27 17:25:59 +0100124#define IRQ_CTRL_GERROR_IRQEN (1 << 0)
125
126#define ARM_SMMU_IRQ_CTRLACK 0x54
127
128#define ARM_SMMU_GERROR 0x60
129#define GERROR_SFM_ERR (1 << 8)
130#define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
131#define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
132#define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
133#define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
134#define GERROR_PRIQ_ABT_ERR (1 << 3)
135#define GERROR_EVTQ_ABT_ERR (1 << 2)
136#define GERROR_CMDQ_ERR (1 << 0)
137#define GERROR_ERR_MASK 0xfd
138
139#define ARM_SMMU_GERRORN 0x64
140
141#define ARM_SMMU_GERROR_IRQ_CFG0 0x68
142#define ARM_SMMU_GERROR_IRQ_CFG1 0x70
143#define ARM_SMMU_GERROR_IRQ_CFG2 0x74
144
145#define ARM_SMMU_STRTAB_BASE 0x80
146#define STRTAB_BASE_RA (1UL << 62)
147#define STRTAB_BASE_ADDR_SHIFT 6
148#define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
149
150#define ARM_SMMU_STRTAB_BASE_CFG 0x88
151#define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
152#define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
153#define STRTAB_BASE_CFG_SPLIT_SHIFT 6
154#define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
155#define STRTAB_BASE_CFG_FMT_SHIFT 16
156#define STRTAB_BASE_CFG_FMT_MASK 0x3
157#define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
158#define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
159
160#define ARM_SMMU_CMDQ_BASE 0x90
161#define ARM_SMMU_CMDQ_PROD 0x98
162#define ARM_SMMU_CMDQ_CONS 0x9c
163
164#define ARM_SMMU_EVTQ_BASE 0xa0
165#define ARM_SMMU_EVTQ_PROD 0x100a8
166#define ARM_SMMU_EVTQ_CONS 0x100ac
167#define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
168#define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
169#define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
170
171#define ARM_SMMU_PRIQ_BASE 0xc0
172#define ARM_SMMU_PRIQ_PROD 0x100c8
173#define ARM_SMMU_PRIQ_CONS 0x100cc
174#define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
175#define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
176#define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
177
178/* Common MSI config fields */
Will Deacon48ec83b2015-05-27 17:25:59 +0100179#define MSI_CFG0_ADDR_SHIFT 2
180#define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
Marc Zyngierec11d632015-07-15 11:55:19 +0100181#define MSI_CFG2_SH_SHIFT 4
182#define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
183#define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
184#define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
185#define MSI_CFG2_MEMATTR_SHIFT 0
186#define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
Will Deacon48ec83b2015-05-27 17:25:59 +0100187
188#define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
189#define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
190#define Q_OVERFLOW_FLAG (1 << 31)
191#define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
192#define Q_ENT(q, p) ((q)->base + \
193 Q_IDX(q, p) * (q)->ent_dwords)
194
195#define Q_BASE_RWA (1UL << 62)
196#define Q_BASE_ADDR_SHIFT 5
197#define Q_BASE_ADDR_MASK 0xfffffffffffUL
198#define Q_BASE_LOG2SIZE_SHIFT 0
199#define Q_BASE_LOG2SIZE_MASK 0x1fUL
200
201/*
202 * Stream table.
203 *
204 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
Zhen Leie2f4c232015-07-07 04:30:17 +0100205 * 2lvl: 128k L1 entries,
206 * 256 lazy entries per table (each table covers a PCI bus)
Will Deacon48ec83b2015-05-27 17:25:59 +0100207 */
Zhen Leie2f4c232015-07-07 04:30:17 +0100208#define STRTAB_L1_SZ_SHIFT 20
Will Deacon48ec83b2015-05-27 17:25:59 +0100209#define STRTAB_SPLIT 8
210
211#define STRTAB_L1_DESC_DWORDS 1
212#define STRTAB_L1_DESC_SPAN_SHIFT 0
213#define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
214#define STRTAB_L1_DESC_L2PTR_SHIFT 6
215#define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
216
217#define STRTAB_STE_DWORDS 8
218#define STRTAB_STE_0_V (1UL << 0)
219#define STRTAB_STE_0_CFG_SHIFT 1
220#define STRTAB_STE_0_CFG_MASK 0x7UL
221#define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
222#define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
223#define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
224#define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
225
226#define STRTAB_STE_0_S1FMT_SHIFT 4
227#define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
228#define STRTAB_STE_0_S1CTXPTR_SHIFT 6
229#define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
230#define STRTAB_STE_0_S1CDMAX_SHIFT 59
231#define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
232
233#define STRTAB_STE_1_S1C_CACHE_NC 0UL
234#define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
235#define STRTAB_STE_1_S1C_CACHE_WT 2UL
236#define STRTAB_STE_1_S1C_CACHE_WB 3UL
237#define STRTAB_STE_1_S1C_SH_NSH 0UL
238#define STRTAB_STE_1_S1C_SH_OSH 2UL
239#define STRTAB_STE_1_S1C_SH_ISH 3UL
240#define STRTAB_STE_1_S1CIR_SHIFT 2
241#define STRTAB_STE_1_S1COR_SHIFT 4
242#define STRTAB_STE_1_S1CSH_SHIFT 6
243
244#define STRTAB_STE_1_S1STALLD (1UL << 27)
245
246#define STRTAB_STE_1_EATS_ABT 0UL
247#define STRTAB_STE_1_EATS_TRANS 1UL
248#define STRTAB_STE_1_EATS_S1CHK 2UL
249#define STRTAB_STE_1_EATS_SHIFT 28
250
251#define STRTAB_STE_1_STRW_NSEL1 0UL
252#define STRTAB_STE_1_STRW_EL2 2UL
253#define STRTAB_STE_1_STRW_SHIFT 30
254
255#define STRTAB_STE_2_S2VMID_SHIFT 0
256#define STRTAB_STE_2_S2VMID_MASK 0xffffUL
257#define STRTAB_STE_2_VTCR_SHIFT 32
258#define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
259#define STRTAB_STE_2_S2AA64 (1UL << 51)
260#define STRTAB_STE_2_S2ENDI (1UL << 52)
261#define STRTAB_STE_2_S2PTW (1UL << 54)
262#define STRTAB_STE_2_S2R (1UL << 58)
263
264#define STRTAB_STE_3_S2TTB_SHIFT 4
265#define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
266
267/* Context descriptor (stage-1 only) */
268#define CTXDESC_CD_DWORDS 8
269#define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
270#define ARM64_TCR_T0SZ_SHIFT 0
271#define ARM64_TCR_T0SZ_MASK 0x1fUL
272#define CTXDESC_CD_0_TCR_TG0_SHIFT 6
273#define ARM64_TCR_TG0_SHIFT 14
274#define ARM64_TCR_TG0_MASK 0x3UL
275#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
Zhen Lei5d58c622015-06-26 09:32:59 +0100276#define ARM64_TCR_IRGN0_SHIFT 8
Will Deacon48ec83b2015-05-27 17:25:59 +0100277#define ARM64_TCR_IRGN0_MASK 0x3UL
278#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
Zhen Lei5d58c622015-06-26 09:32:59 +0100279#define ARM64_TCR_ORGN0_SHIFT 10
Will Deacon48ec83b2015-05-27 17:25:59 +0100280#define ARM64_TCR_ORGN0_MASK 0x3UL
281#define CTXDESC_CD_0_TCR_SH0_SHIFT 12
282#define ARM64_TCR_SH0_SHIFT 12
283#define ARM64_TCR_SH0_MASK 0x3UL
284#define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
285#define ARM64_TCR_EPD0_SHIFT 7
286#define ARM64_TCR_EPD0_MASK 0x1UL
287#define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
288#define ARM64_TCR_EPD1_SHIFT 23
289#define ARM64_TCR_EPD1_MASK 0x1UL
290
291#define CTXDESC_CD_0_ENDI (1UL << 15)
292#define CTXDESC_CD_0_V (1UL << 31)
293
294#define CTXDESC_CD_0_TCR_IPS_SHIFT 32
295#define ARM64_TCR_IPS_SHIFT 32
296#define ARM64_TCR_IPS_MASK 0x7UL
297#define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
298#define ARM64_TCR_TBI0_SHIFT 37
299#define ARM64_TCR_TBI0_MASK 0x1UL
300
301#define CTXDESC_CD_0_AA64 (1UL << 41)
302#define CTXDESC_CD_0_R (1UL << 45)
303#define CTXDESC_CD_0_A (1UL << 46)
304#define CTXDESC_CD_0_ASET_SHIFT 47
305#define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
306#define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
307#define CTXDESC_CD_0_ASID_SHIFT 48
308#define CTXDESC_CD_0_ASID_MASK 0xffffUL
309
310#define CTXDESC_CD_1_TTB0_SHIFT 4
311#define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
312
313#define CTXDESC_CD_3_MAIR_SHIFT 0
314
315/* Convert between AArch64 (CPU) TCR format and SMMU CD format */
316#define ARM_SMMU_TCR2CD(tcr, fld) \
317 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
318 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
319
320/* Command queue */
321#define CMDQ_ENT_DWORDS 2
322#define CMDQ_MAX_SZ_SHIFT 8
323
324#define CMDQ_ERR_SHIFT 24
325#define CMDQ_ERR_MASK 0x7f
326#define CMDQ_ERR_CERROR_NONE_IDX 0
327#define CMDQ_ERR_CERROR_ILL_IDX 1
328#define CMDQ_ERR_CERROR_ABT_IDX 2
329
330#define CMDQ_0_OP_SHIFT 0
331#define CMDQ_0_OP_MASK 0xffUL
332#define CMDQ_0_SSV (1UL << 11)
333
334#define CMDQ_PREFETCH_0_SID_SHIFT 32
335#define CMDQ_PREFETCH_1_SIZE_SHIFT 0
336#define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
337
338#define CMDQ_CFGI_0_SID_SHIFT 32
339#define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
340#define CMDQ_CFGI_1_LEAF (1UL << 0)
341#define CMDQ_CFGI_1_RANGE_SHIFT 0
342#define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
343
344#define CMDQ_TLBI_0_VMID_SHIFT 32
345#define CMDQ_TLBI_0_ASID_SHIFT 48
346#define CMDQ_TLBI_1_LEAF (1UL << 0)
347#define CMDQ_TLBI_1_ADDR_MASK ~0xfffUL
348
349#define CMDQ_PRI_0_SSID_SHIFT 12
350#define CMDQ_PRI_0_SSID_MASK 0xfffffUL
351#define CMDQ_PRI_0_SID_SHIFT 32
352#define CMDQ_PRI_0_SID_MASK 0xffffffffUL
353#define CMDQ_PRI_1_GRPID_SHIFT 0
354#define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
355#define CMDQ_PRI_1_RESP_SHIFT 12
356#define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
357#define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
358#define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
359
360#define CMDQ_SYNC_0_CS_SHIFT 12
361#define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
362#define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
363
364/* Event queue */
365#define EVTQ_ENT_DWORDS 4
366#define EVTQ_MAX_SZ_SHIFT 7
367
368#define EVTQ_0_ID_SHIFT 0
369#define EVTQ_0_ID_MASK 0xffUL
370
371/* PRI queue */
372#define PRIQ_ENT_DWORDS 2
373#define PRIQ_MAX_SZ_SHIFT 8
374
375#define PRIQ_0_SID_SHIFT 0
376#define PRIQ_0_SID_MASK 0xffffffffUL
377#define PRIQ_0_SSID_SHIFT 32
378#define PRIQ_0_SSID_MASK 0xfffffUL
379#define PRIQ_0_OF (1UL << 57)
380#define PRIQ_0_PERM_PRIV (1UL << 58)
381#define PRIQ_0_PERM_EXEC (1UL << 59)
382#define PRIQ_0_PERM_READ (1UL << 60)
383#define PRIQ_0_PERM_WRITE (1UL << 61)
384#define PRIQ_0_PRG_LAST (1UL << 62)
385#define PRIQ_0_SSID_V (1UL << 63)
386
387#define PRIQ_1_PRG_IDX_SHIFT 0
388#define PRIQ_1_PRG_IDX_MASK 0x1ffUL
389#define PRIQ_1_ADDR_SHIFT 12
390#define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
391
392/* High-level queue structures */
393#define ARM_SMMU_POLL_TIMEOUT_US 100
394
395static bool disable_bypass;
396module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
397MODULE_PARM_DESC(disable_bypass,
398 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
399
400enum pri_resp {
401 PRI_RESP_DENY,
402 PRI_RESP_FAIL,
403 PRI_RESP_SUCC,
404};
405
Marc Zyngier166bdbd2015-10-13 18:32:30 +0100406enum arm_smmu_msi_index {
407 EVTQ_MSI_INDEX,
408 GERROR_MSI_INDEX,
409 PRIQ_MSI_INDEX,
410 ARM_SMMU_MAX_MSIS,
411};
412
413static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
414 [EVTQ_MSI_INDEX] = {
415 ARM_SMMU_EVTQ_IRQ_CFG0,
416 ARM_SMMU_EVTQ_IRQ_CFG1,
417 ARM_SMMU_EVTQ_IRQ_CFG2,
418 },
419 [GERROR_MSI_INDEX] = {
420 ARM_SMMU_GERROR_IRQ_CFG0,
421 ARM_SMMU_GERROR_IRQ_CFG1,
422 ARM_SMMU_GERROR_IRQ_CFG2,
423 },
424 [PRIQ_MSI_INDEX] = {
425 ARM_SMMU_PRIQ_IRQ_CFG0,
426 ARM_SMMU_PRIQ_IRQ_CFG1,
427 ARM_SMMU_PRIQ_IRQ_CFG2,
428 },
429};
430
Will Deacon48ec83b2015-05-27 17:25:59 +0100431struct arm_smmu_cmdq_ent {
432 /* Common fields */
433 u8 opcode;
434 bool substream_valid;
435
436 /* Command-specific fields */
437 union {
438 #define CMDQ_OP_PREFETCH_CFG 0x1
439 struct {
440 u32 sid;
441 u8 size;
442 u64 addr;
443 } prefetch;
444
445 #define CMDQ_OP_CFGI_STE 0x3
446 #define CMDQ_OP_CFGI_ALL 0x4
447 struct {
448 u32 sid;
449 union {
450 bool leaf;
451 u8 span;
452 };
453 } cfgi;
454
455 #define CMDQ_OP_TLBI_NH_ASID 0x11
456 #define CMDQ_OP_TLBI_NH_VA 0x12
457 #define CMDQ_OP_TLBI_EL2_ALL 0x20
458 #define CMDQ_OP_TLBI_S12_VMALL 0x28
459 #define CMDQ_OP_TLBI_S2_IPA 0x2a
460 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
461 struct {
462 u16 asid;
463 u16 vmid;
464 bool leaf;
465 u64 addr;
466 } tlbi;
467
468 #define CMDQ_OP_PRI_RESP 0x41
469 struct {
470 u32 sid;
471 u32 ssid;
472 u16 grpid;
473 enum pri_resp resp;
474 } pri;
475
476 #define CMDQ_OP_CMD_SYNC 0x46
477 };
478};
479
480struct arm_smmu_queue {
481 int irq; /* Wired interrupt */
482
483 __le64 *base;
484 dma_addr_t base_dma;
485 u64 q_base;
486
487 size_t ent_dwords;
488 u32 max_n_shift;
489 u32 prod;
490 u32 cons;
491
492 u32 __iomem *prod_reg;
493 u32 __iomem *cons_reg;
494};
495
496struct arm_smmu_cmdq {
497 struct arm_smmu_queue q;
498 spinlock_t lock;
499};
500
501struct arm_smmu_evtq {
502 struct arm_smmu_queue q;
503 u32 max_stalls;
504};
505
506struct arm_smmu_priq {
507 struct arm_smmu_queue q;
508};
509
510/* High-level stream table and context descriptor structures */
511struct arm_smmu_strtab_l1_desc {
512 u8 span;
513
514 __le64 *l2ptr;
515 dma_addr_t l2ptr_dma;
516};
517
518struct arm_smmu_s1_cfg {
519 __le64 *cdptr;
520 dma_addr_t cdptr_dma;
521
522 struct arm_smmu_ctx_desc {
523 u16 asid;
524 u64 ttbr;
525 u64 tcr;
526 u64 mair;
527 } cd;
528};
529
530struct arm_smmu_s2_cfg {
531 u16 vmid;
532 u64 vttbr;
533 u64 vtcr;
534};
535
536struct arm_smmu_strtab_ent {
537 bool valid;
538
539 bool bypass; /* Overrides s1/s2 config */
540 struct arm_smmu_s1_cfg *s1_cfg;
541 struct arm_smmu_s2_cfg *s2_cfg;
542};
543
544struct arm_smmu_strtab_cfg {
545 __le64 *strtab;
546 dma_addr_t strtab_dma;
547 struct arm_smmu_strtab_l1_desc *l1_desc;
548 unsigned int num_l1_ents;
549
550 u64 strtab_base;
551 u32 strtab_base_cfg;
552};
553
554/* An SMMUv3 instance */
555struct arm_smmu_device {
556 struct device *dev;
557 void __iomem *base;
558
559#define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
560#define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
561#define ARM_SMMU_FEAT_TT_LE (1 << 2)
562#define ARM_SMMU_FEAT_TT_BE (1 << 3)
563#define ARM_SMMU_FEAT_PRI (1 << 4)
564#define ARM_SMMU_FEAT_ATS (1 << 5)
565#define ARM_SMMU_FEAT_SEV (1 << 6)
566#define ARM_SMMU_FEAT_MSI (1 << 7)
567#define ARM_SMMU_FEAT_COHERENCY (1 << 8)
568#define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
569#define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
570#define ARM_SMMU_FEAT_STALLS (1 << 11)
571#define ARM_SMMU_FEAT_HYP (1 << 12)
572 u32 features;
573
Zhen Lei5e929462015-07-07 04:30:18 +0100574#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
575 u32 options;
576
Will Deacon48ec83b2015-05-27 17:25:59 +0100577 struct arm_smmu_cmdq cmdq;
578 struct arm_smmu_evtq evtq;
579 struct arm_smmu_priq priq;
580
581 int gerr_irq;
582
583 unsigned long ias; /* IPA */
584 unsigned long oas; /* PA */
585
586#define ARM_SMMU_MAX_ASIDS (1 << 16)
587 unsigned int asid_bits;
588 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
589
590#define ARM_SMMU_MAX_VMIDS (1 << 16)
591 unsigned int vmid_bits;
592 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
593
594 unsigned int ssid_bits;
595 unsigned int sid_bits;
596
597 struct arm_smmu_strtab_cfg strtab_cfg;
Will Deacon48ec83b2015-05-27 17:25:59 +0100598};
599
600/* SMMU private data for an IOMMU group */
601struct arm_smmu_group {
602 struct arm_smmu_device *smmu;
603 struct arm_smmu_domain *domain;
604 int num_sids;
605 u32 *sids;
606 struct arm_smmu_strtab_ent ste;
607};
608
609/* SMMU private data for an IOMMU domain */
610enum arm_smmu_domain_stage {
611 ARM_SMMU_DOMAIN_S1 = 0,
612 ARM_SMMU_DOMAIN_S2,
613 ARM_SMMU_DOMAIN_NESTED,
614};
615
616struct arm_smmu_domain {
617 struct arm_smmu_device *smmu;
618 struct mutex init_mutex; /* Protects smmu pointer */
619
620 struct io_pgtable_ops *pgtbl_ops;
621 spinlock_t pgtbl_lock;
622
623 enum arm_smmu_domain_stage stage;
624 union {
625 struct arm_smmu_s1_cfg s1_cfg;
626 struct arm_smmu_s2_cfg s2_cfg;
627 };
628
629 struct iommu_domain domain;
630};
631
Zhen Lei5e929462015-07-07 04:30:18 +0100632struct arm_smmu_option_prop {
633 u32 opt;
634 const char *prop;
635};
636
637static struct arm_smmu_option_prop arm_smmu_options[] = {
638 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
639 { 0, NULL},
640};
641
Will Deacon48ec83b2015-05-27 17:25:59 +0100642static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
643{
644 return container_of(dom, struct arm_smmu_domain, domain);
645}
646
Zhen Lei5e929462015-07-07 04:30:18 +0100647static void parse_driver_options(struct arm_smmu_device *smmu)
648{
649 int i = 0;
650
651 do {
652 if (of_property_read_bool(smmu->dev->of_node,
653 arm_smmu_options[i].prop)) {
654 smmu->options |= arm_smmu_options[i].opt;
655 dev_notice(smmu->dev, "option %s\n",
656 arm_smmu_options[i].prop);
657 }
658 } while (arm_smmu_options[++i].opt);
659}
660
Will Deacon48ec83b2015-05-27 17:25:59 +0100661/* Low-level queue manipulation functions */
662static bool queue_full(struct arm_smmu_queue *q)
663{
664 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
665 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
666}
667
668static bool queue_empty(struct arm_smmu_queue *q)
669{
670 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
671 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
672}
673
674static void queue_sync_cons(struct arm_smmu_queue *q)
675{
676 q->cons = readl_relaxed(q->cons_reg);
677}
678
679static void queue_inc_cons(struct arm_smmu_queue *q)
680{
681 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
682
683 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
684 writel(q->cons, q->cons_reg);
685}
686
687static int queue_sync_prod(struct arm_smmu_queue *q)
688{
689 int ret = 0;
690 u32 prod = readl_relaxed(q->prod_reg);
691
692 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
693 ret = -EOVERFLOW;
694
695 q->prod = prod;
696 return ret;
697}
698
699static void queue_inc_prod(struct arm_smmu_queue *q)
700{
701 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
702
703 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
704 writel(q->prod, q->prod_reg);
705}
706
707static bool __queue_cons_before(struct arm_smmu_queue *q, u32 until)
708{
709 if (Q_WRP(q, q->cons) == Q_WRP(q, until))
710 return Q_IDX(q, q->cons) < Q_IDX(q, until);
711
712 return Q_IDX(q, q->cons) >= Q_IDX(q, until);
713}
714
715static int queue_poll_cons(struct arm_smmu_queue *q, u32 until, bool wfe)
716{
717 ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
718
719 while (queue_sync_cons(q), __queue_cons_before(q, until)) {
720 if (ktime_compare(ktime_get(), timeout) > 0)
721 return -ETIMEDOUT;
722
723 if (wfe) {
724 wfe();
725 } else {
726 cpu_relax();
727 udelay(1);
728 }
729 }
730
731 return 0;
732}
733
734static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
735{
736 int i;
737
738 for (i = 0; i < n_dwords; ++i)
739 *dst++ = cpu_to_le64(*src++);
740}
741
742static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
743{
744 if (queue_full(q))
745 return -ENOSPC;
746
747 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
748 queue_inc_prod(q);
749 return 0;
750}
751
752static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
753{
754 int i;
755
756 for (i = 0; i < n_dwords; ++i)
757 *dst++ = le64_to_cpu(*src++);
758}
759
760static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
761{
762 if (queue_empty(q))
763 return -EAGAIN;
764
765 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
766 queue_inc_cons(q);
767 return 0;
768}
769
770/* High-level queue accessors */
771static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
772{
773 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
774 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
775
776 switch (ent->opcode) {
777 case CMDQ_OP_TLBI_EL2_ALL:
778 case CMDQ_OP_TLBI_NSNH_ALL:
779 break;
780 case CMDQ_OP_PREFETCH_CFG:
781 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
782 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
783 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
784 break;
785 case CMDQ_OP_CFGI_STE:
786 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
787 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
788 break;
789 case CMDQ_OP_CFGI_ALL:
790 /* Cover the entire SID range */
791 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
792 break;
793 case CMDQ_OP_TLBI_NH_VA:
794 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
795 /* Fallthrough */
796 case CMDQ_OP_TLBI_S2_IPA:
797 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
798 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
799 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_ADDR_MASK;
800 break;
801 case CMDQ_OP_TLBI_NH_ASID:
802 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
803 /* Fallthrough */
804 case CMDQ_OP_TLBI_S12_VMALL:
805 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
806 break;
807 case CMDQ_OP_PRI_RESP:
808 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
809 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
810 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
811 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
812 switch (ent->pri.resp) {
813 case PRI_RESP_DENY:
814 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
815 break;
816 case PRI_RESP_FAIL:
817 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
818 break;
819 case PRI_RESP_SUCC:
820 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
821 break;
822 default:
823 return -EINVAL;
824 }
825 break;
826 case CMDQ_OP_CMD_SYNC:
827 cmd[0] |= CMDQ_SYNC_0_CS_SEV;
828 break;
829 default:
830 return -ENOENT;
831 }
832
833 return 0;
834}
835
836static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
837{
838 static const char *cerror_str[] = {
839 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
840 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
841 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
842 };
843
844 int i;
845 u64 cmd[CMDQ_ENT_DWORDS];
846 struct arm_smmu_queue *q = &smmu->cmdq.q;
847 u32 cons = readl_relaxed(q->cons_reg);
848 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
849 struct arm_smmu_cmdq_ent cmd_sync = {
850 .opcode = CMDQ_OP_CMD_SYNC,
851 };
852
853 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
854 cerror_str[idx]);
855
856 switch (idx) {
857 case CMDQ_ERR_CERROR_ILL_IDX:
858 break;
859 case CMDQ_ERR_CERROR_ABT_IDX:
860 dev_err(smmu->dev, "retrying command fetch\n");
861 case CMDQ_ERR_CERROR_NONE_IDX:
862 return;
863 }
864
865 /*
866 * We may have concurrent producers, so we need to be careful
867 * not to touch any of the shadow cmdq state.
868 */
869 queue_read(cmd, Q_ENT(q, idx), q->ent_dwords);
870 dev_err(smmu->dev, "skipping command in error state:\n");
871 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
872 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
873
874 /* Convert the erroneous command into a CMD_SYNC */
875 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
876 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
877 return;
878 }
879
880 queue_write(cmd, Q_ENT(q, idx), q->ent_dwords);
881}
882
883static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
884 struct arm_smmu_cmdq_ent *ent)
885{
886 u32 until;
887 u64 cmd[CMDQ_ENT_DWORDS];
888 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
889 struct arm_smmu_queue *q = &smmu->cmdq.q;
890
891 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
892 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
893 ent->opcode);
894 return;
895 }
896
897 spin_lock(&smmu->cmdq.lock);
898 while (until = q->prod + 1, queue_insert_raw(q, cmd) == -ENOSPC) {
899 /*
900 * Keep the queue locked, otherwise the producer could wrap
901 * twice and we could see a future consumer pointer that looks
902 * like it's behind us.
903 */
904 if (queue_poll_cons(q, until, wfe))
905 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
906 }
907
908 if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, until, wfe))
909 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
910 spin_unlock(&smmu->cmdq.lock);
911}
912
913/* Context descriptor manipulation functions */
914static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
915{
916 u64 val = 0;
917
918 /* Repack the TCR. Just care about TTBR0 for now */
919 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
920 val |= ARM_SMMU_TCR2CD(tcr, TG0);
921 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
922 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
923 val |= ARM_SMMU_TCR2CD(tcr, SH0);
924 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
925 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
926 val |= ARM_SMMU_TCR2CD(tcr, IPS);
927 val |= ARM_SMMU_TCR2CD(tcr, TBI0);
928
929 return val;
930}
931
932static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
933 struct arm_smmu_s1_cfg *cfg)
934{
935 u64 val;
936
937 /*
938 * We don't need to issue any invalidation here, as we'll invalidate
939 * the STE when installing the new entry anyway.
940 */
941 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
942#ifdef __BIG_ENDIAN
943 CTXDESC_CD_0_ENDI |
944#endif
945 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
946 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
947 CTXDESC_CD_0_V;
948 cfg->cdptr[0] = cpu_to_le64(val);
949
950 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
951 cfg->cdptr[1] = cpu_to_le64(val);
952
953 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
954}
955
956/* Stream table manipulation functions */
957static void
958arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
959{
960 u64 val = 0;
961
962 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
963 << STRTAB_L1_DESC_SPAN_SHIFT;
964 val |= desc->l2ptr_dma &
965 STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT;
966
967 *dst = cpu_to_le64(val);
968}
969
970static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
971{
972 struct arm_smmu_cmdq_ent cmd = {
973 .opcode = CMDQ_OP_CFGI_STE,
974 .cfgi = {
975 .sid = sid,
976 .leaf = true,
977 },
978 };
979
980 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
981 cmd.opcode = CMDQ_OP_CMD_SYNC;
982 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
983}
984
985static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
986 __le64 *dst, struct arm_smmu_strtab_ent *ste)
987{
988 /*
989 * This is hideously complicated, but we only really care about
990 * three cases at the moment:
991 *
992 * 1. Invalid (all zero) -> bypass (init)
993 * 2. Bypass -> translation (attach)
994 * 3. Translation -> bypass (detach)
995 *
996 * Given that we can't update the STE atomically and the SMMU
997 * doesn't read the thing in a defined order, that leaves us
998 * with the following maintenance requirements:
999 *
1000 * 1. Update Config, return (init time STEs aren't live)
1001 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1002 * 3. Update Config, sync
1003 */
1004 u64 val = le64_to_cpu(dst[0]);
1005 bool ste_live = false;
1006 struct arm_smmu_cmdq_ent prefetch_cmd = {
1007 .opcode = CMDQ_OP_PREFETCH_CFG,
1008 .prefetch = {
1009 .sid = sid,
1010 },
1011 };
1012
1013 if (val & STRTAB_STE_0_V) {
1014 u64 cfg;
1015
1016 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
1017 switch (cfg) {
1018 case STRTAB_STE_0_CFG_BYPASS:
1019 break;
1020 case STRTAB_STE_0_CFG_S1_TRANS:
1021 case STRTAB_STE_0_CFG_S2_TRANS:
1022 ste_live = true;
1023 break;
1024 default:
1025 BUG(); /* STE corruption */
1026 }
1027 }
1028
1029 /* Nuke the existing Config, as we're going to rewrite it */
1030 val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
1031
1032 if (ste->valid)
1033 val |= STRTAB_STE_0_V;
1034 else
1035 val &= ~STRTAB_STE_0_V;
1036
1037 if (ste->bypass) {
1038 val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
1039 : STRTAB_STE_0_CFG_BYPASS;
1040 dst[0] = cpu_to_le64(val);
1041 dst[2] = 0; /* Nuke the VMID */
1042 if (ste_live)
1043 arm_smmu_sync_ste_for_sid(smmu, sid);
1044 return;
1045 }
1046
1047 if (ste->s1_cfg) {
1048 BUG_ON(ste_live);
1049 dst[1] = cpu_to_le64(
1050 STRTAB_STE_1_S1C_CACHE_WBRA
1051 << STRTAB_STE_1_S1CIR_SHIFT |
1052 STRTAB_STE_1_S1C_CACHE_WBRA
1053 << STRTAB_STE_1_S1COR_SHIFT |
1054 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
1055 STRTAB_STE_1_S1STALLD |
1056#ifdef CONFIG_PCI_ATS
1057 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1058#endif
1059 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
1060
1061 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
1062 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
1063 STRTAB_STE_0_CFG_S1_TRANS;
1064
1065 }
1066
1067 if (ste->s2_cfg) {
1068 BUG_ON(ste_live);
1069 dst[2] = cpu_to_le64(
1070 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
1071 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
1072 << STRTAB_STE_2_VTCR_SHIFT |
1073#ifdef __BIG_ENDIAN
1074 STRTAB_STE_2_S2ENDI |
1075#endif
1076 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1077 STRTAB_STE_2_S2R);
1078
1079 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
1080 STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT);
1081
1082 val |= STRTAB_STE_0_CFG_S2_TRANS;
1083 }
1084
1085 arm_smmu_sync_ste_for_sid(smmu, sid);
1086 dst[0] = cpu_to_le64(val);
1087 arm_smmu_sync_ste_for_sid(smmu, sid);
1088
1089 /* It's likely that we'll want to use the new STE soon */
Zhen Lei5e929462015-07-07 04:30:18 +01001090 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1091 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
Will Deacon48ec83b2015-05-27 17:25:59 +01001092}
1093
1094static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1095{
1096 unsigned int i;
1097 struct arm_smmu_strtab_ent ste = {
1098 .valid = true,
1099 .bypass = true,
1100 };
1101
1102 for (i = 0; i < nent; ++i) {
1103 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
1104 strtab += STRTAB_STE_DWORDS;
1105 }
1106}
1107
1108static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1109{
1110 size_t size;
1111 void *strtab;
1112 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1113 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1114
1115 if (desc->l2ptr)
1116 return 0;
1117
1118 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
Zhen Lei69146e72015-06-26 09:32:58 +01001119 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
Will Deacon48ec83b2015-05-27 17:25:59 +01001120
1121 desc->span = STRTAB_SPLIT + 1;
1122 desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1123 GFP_KERNEL);
1124 if (!desc->l2ptr) {
1125 dev_err(smmu->dev,
1126 "failed to allocate l2 stream table for SID %u\n",
1127 sid);
1128 return -ENOMEM;
1129 }
1130
1131 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1132 arm_smmu_write_strtab_l1_desc(strtab, desc);
1133 return 0;
1134}
1135
1136/* IRQ and event handlers */
1137static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1138{
1139 int i;
1140 struct arm_smmu_device *smmu = dev;
1141 struct arm_smmu_queue *q = &smmu->evtq.q;
1142 u64 evt[EVTQ_ENT_DWORDS];
1143
1144 while (!queue_remove_raw(q, evt)) {
1145 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
1146
1147 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1148 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1149 dev_info(smmu->dev, "\t0x%016llx\n",
1150 (unsigned long long)evt[i]);
1151 }
1152
1153 /* Sync our overflow flag, as we believe we're up to speed */
1154 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1155 return IRQ_HANDLED;
1156}
1157
1158static irqreturn_t arm_smmu_evtq_handler(int irq, void *dev)
1159{
1160 irqreturn_t ret = IRQ_WAKE_THREAD;
1161 struct arm_smmu_device *smmu = dev;
1162 struct arm_smmu_queue *q = &smmu->evtq.q;
1163
1164 /*
1165 * Not much we can do on overflow, so scream and pretend we're
1166 * trying harder.
1167 */
1168 if (queue_sync_prod(q) == -EOVERFLOW)
1169 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1170 else if (queue_empty(q))
1171 ret = IRQ_NONE;
1172
1173 return ret;
1174}
1175
1176static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1177{
1178 struct arm_smmu_device *smmu = dev;
1179 struct arm_smmu_queue *q = &smmu->priq.q;
1180 u64 evt[PRIQ_ENT_DWORDS];
1181
1182 while (!queue_remove_raw(q, evt)) {
1183 u32 sid, ssid;
1184 u16 grpid;
1185 bool ssv, last;
1186
1187 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1188 ssv = evt[0] & PRIQ_0_SSID_V;
1189 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1190 last = evt[0] & PRIQ_0_PRG_LAST;
1191 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
1192
1193 dev_info(smmu->dev, "unexpected PRI request received:\n");
1194 dev_info(smmu->dev,
1195 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1196 sid, ssid, grpid, last ? "L" : "",
1197 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1198 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1199 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1200 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1201 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1202
1203 if (last) {
1204 struct arm_smmu_cmdq_ent cmd = {
1205 .opcode = CMDQ_OP_PRI_RESP,
1206 .substream_valid = ssv,
1207 .pri = {
1208 .sid = sid,
1209 .ssid = ssid,
1210 .grpid = grpid,
1211 .resp = PRI_RESP_DENY,
1212 },
1213 };
1214
1215 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1216 }
1217 }
1218
1219 /* Sync our overflow flag, as we believe we're up to speed */
1220 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1221 return IRQ_HANDLED;
1222}
1223
1224static irqreturn_t arm_smmu_priq_handler(int irq, void *dev)
1225{
1226 irqreturn_t ret = IRQ_WAKE_THREAD;
1227 struct arm_smmu_device *smmu = dev;
1228 struct arm_smmu_queue *q = &smmu->priq.q;
1229
1230 /* PRIQ overflow indicates a programming error */
1231 if (queue_sync_prod(q) == -EOVERFLOW)
1232 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1233 else if (queue_empty(q))
1234 ret = IRQ_NONE;
1235
1236 return ret;
1237}
1238
1239static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
1240{
1241 /* We don't actually use CMD_SYNC interrupts for anything */
1242 return IRQ_HANDLED;
1243}
1244
1245static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1246
1247static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1248{
1249 u32 gerror, gerrorn;
1250 struct arm_smmu_device *smmu = dev;
1251
1252 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1253 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1254
1255 gerror ^= gerrorn;
1256 if (!(gerror & GERROR_ERR_MASK))
1257 return IRQ_NONE; /* No errors pending */
1258
1259 dev_warn(smmu->dev,
1260 "unexpected global error reported (0x%08x), this could be serious\n",
1261 gerror);
1262
1263 if (gerror & GERROR_SFM_ERR) {
1264 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1265 arm_smmu_device_disable(smmu);
1266 }
1267
1268 if (gerror & GERROR_MSI_GERROR_ABT_ERR)
1269 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1270
1271 if (gerror & GERROR_MSI_PRIQ_ABT_ERR) {
1272 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1273 arm_smmu_priq_handler(irq, smmu->dev);
1274 }
1275
1276 if (gerror & GERROR_MSI_EVTQ_ABT_ERR) {
1277 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1278 arm_smmu_evtq_handler(irq, smmu->dev);
1279 }
1280
1281 if (gerror & GERROR_MSI_CMDQ_ABT_ERR) {
1282 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1283 arm_smmu_cmdq_sync_handler(irq, smmu->dev);
1284 }
1285
1286 if (gerror & GERROR_PRIQ_ABT_ERR)
1287 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1288
1289 if (gerror & GERROR_EVTQ_ABT_ERR)
1290 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1291
1292 if (gerror & GERROR_CMDQ_ERR)
1293 arm_smmu_cmdq_skip_err(smmu);
1294
1295 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1296 return IRQ_HANDLED;
1297}
1298
1299/* IO_PGTABLE API */
1300static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1301{
1302 struct arm_smmu_cmdq_ent cmd;
1303
1304 cmd.opcode = CMDQ_OP_CMD_SYNC;
1305 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1306}
1307
1308static void arm_smmu_tlb_sync(void *cookie)
1309{
1310 struct arm_smmu_domain *smmu_domain = cookie;
1311 __arm_smmu_tlb_sync(smmu_domain->smmu);
1312}
1313
1314static void arm_smmu_tlb_inv_context(void *cookie)
1315{
1316 struct arm_smmu_domain *smmu_domain = cookie;
1317 struct arm_smmu_device *smmu = smmu_domain->smmu;
1318 struct arm_smmu_cmdq_ent cmd;
1319
1320 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1321 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1322 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1323 cmd.tlbi.vmid = 0;
1324 } else {
1325 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1326 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1327 }
1328
1329 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1330 __arm_smmu_tlb_sync(smmu);
1331}
1332
1333static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1334 bool leaf, void *cookie)
1335{
1336 struct arm_smmu_domain *smmu_domain = cookie;
1337 struct arm_smmu_device *smmu = smmu_domain->smmu;
1338 struct arm_smmu_cmdq_ent cmd = {
1339 .tlbi = {
1340 .leaf = leaf,
1341 .addr = iova,
1342 },
1343 };
1344
1345 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1346 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1347 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1348 } else {
1349 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1350 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1351 }
1352
1353 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1354}
1355
Will Deacon48ec83b2015-05-27 17:25:59 +01001356static struct iommu_gather_ops arm_smmu_gather_ops = {
1357 .tlb_flush_all = arm_smmu_tlb_inv_context,
1358 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1359 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon48ec83b2015-05-27 17:25:59 +01001360};
1361
1362/* IOMMU API */
1363static bool arm_smmu_capable(enum iommu_cap cap)
1364{
1365 switch (cap) {
1366 case IOMMU_CAP_CACHE_COHERENCY:
1367 return true;
1368 case IOMMU_CAP_INTR_REMAP:
1369 return true; /* MSIs are just memory writes */
1370 case IOMMU_CAP_NOEXEC:
1371 return true;
1372 default:
1373 return false;
1374 }
1375}
1376
1377static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1378{
1379 struct arm_smmu_domain *smmu_domain;
1380
1381 if (type != IOMMU_DOMAIN_UNMANAGED)
1382 return NULL;
1383
1384 /*
1385 * Allocate the domain and initialise some of its data structures.
1386 * We can't really do anything meaningful until we've added a
1387 * master.
1388 */
1389 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1390 if (!smmu_domain)
1391 return NULL;
1392
1393 mutex_init(&smmu_domain->init_mutex);
1394 spin_lock_init(&smmu_domain->pgtbl_lock);
1395 return &smmu_domain->domain;
1396}
1397
1398static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1399{
1400 int idx, size = 1 << span;
1401
1402 do {
1403 idx = find_first_zero_bit(map, size);
1404 if (idx == size)
1405 return -ENOSPC;
1406 } while (test_and_set_bit(idx, map));
1407
1408 return idx;
1409}
1410
1411static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1412{
1413 clear_bit(idx, map);
1414}
1415
1416static void arm_smmu_domain_free(struct iommu_domain *domain)
1417{
1418 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1419 struct arm_smmu_device *smmu = smmu_domain->smmu;
1420
Markus Elfringa6e08fb2015-06-29 17:47:43 +01001421 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon48ec83b2015-05-27 17:25:59 +01001422
1423 /* Free the CD and ASID, if we allocated them */
1424 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1425 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1426
1427 if (cfg->cdptr) {
1428 dma_free_coherent(smmu_domain->smmu->dev,
1429 CTXDESC_CD_DWORDS << 3,
1430 cfg->cdptr,
1431 cfg->cdptr_dma);
1432
1433 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1434 }
1435 } else {
1436 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1437 if (cfg->vmid)
1438 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1439 }
1440
1441 kfree(smmu_domain);
1442}
1443
1444static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1445 struct io_pgtable_cfg *pgtbl_cfg)
1446{
1447 int ret;
Will Deaconc0733a22015-10-13 17:51:14 +01001448 int asid;
Will Deacon48ec83b2015-05-27 17:25:59 +01001449 struct arm_smmu_device *smmu = smmu_domain->smmu;
1450 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1451
1452 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
1453 if (IS_ERR_VALUE(asid))
1454 return asid;
1455
1456 cfg->cdptr = dma_zalloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1457 &cfg->cdptr_dma, GFP_KERNEL);
1458 if (!cfg->cdptr) {
1459 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
Will Deaconc0733a22015-10-13 17:51:14 +01001460 ret = -ENOMEM;
Will Deacon48ec83b2015-05-27 17:25:59 +01001461 goto out_free_asid;
1462 }
1463
Will Deaconc0733a22015-10-13 17:51:14 +01001464 cfg->cd.asid = (u16)asid;
Will Deacon48ec83b2015-05-27 17:25:59 +01001465 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1466 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1467 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1468 return 0;
1469
1470out_free_asid:
1471 arm_smmu_bitmap_free(smmu->asid_map, asid);
1472 return ret;
1473}
1474
1475static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1476 struct io_pgtable_cfg *pgtbl_cfg)
1477{
Will Deaconc0733a22015-10-13 17:51:14 +01001478 int vmid;
Will Deacon48ec83b2015-05-27 17:25:59 +01001479 struct arm_smmu_device *smmu = smmu_domain->smmu;
1480 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1481
1482 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1483 if (IS_ERR_VALUE(vmid))
1484 return vmid;
1485
Will Deaconc0733a22015-10-13 17:51:14 +01001486 cfg->vmid = (u16)vmid;
Will Deacon48ec83b2015-05-27 17:25:59 +01001487 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1488 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1489 return 0;
1490}
1491
1492static struct iommu_ops arm_smmu_ops;
1493
1494static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1495{
1496 int ret;
1497 unsigned long ias, oas;
1498 enum io_pgtable_fmt fmt;
1499 struct io_pgtable_cfg pgtbl_cfg;
1500 struct io_pgtable_ops *pgtbl_ops;
1501 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1502 struct io_pgtable_cfg *);
1503 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1504 struct arm_smmu_device *smmu = smmu_domain->smmu;
1505
1506 /* Restrict the stage to what we can actually support */
1507 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1508 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1509 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1510 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1511
1512 switch (smmu_domain->stage) {
1513 case ARM_SMMU_DOMAIN_S1:
1514 ias = VA_BITS;
1515 oas = smmu->ias;
1516 fmt = ARM_64_LPAE_S1;
1517 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1518 break;
1519 case ARM_SMMU_DOMAIN_NESTED:
1520 case ARM_SMMU_DOMAIN_S2:
1521 ias = smmu->ias;
1522 oas = smmu->oas;
1523 fmt = ARM_64_LPAE_S2;
1524 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1525 break;
1526 default:
1527 return -EINVAL;
1528 }
1529
1530 pgtbl_cfg = (struct io_pgtable_cfg) {
1531 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
1532 .ias = ias,
1533 .oas = oas,
1534 .tlb = &arm_smmu_gather_ops,
Robin Murphybdc6d972015-07-29 19:46:07 +01001535 .iommu_dev = smmu->dev,
Will Deacon48ec83b2015-05-27 17:25:59 +01001536 };
1537
1538 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1539 if (!pgtbl_ops)
1540 return -ENOMEM;
1541
1542 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1543 smmu_domain->pgtbl_ops = pgtbl_ops;
1544
1545 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1546 if (IS_ERR_VALUE(ret))
1547 free_io_pgtable_ops(pgtbl_ops);
1548
1549 return ret;
1550}
1551
1552static struct arm_smmu_group *arm_smmu_group_get(struct device *dev)
1553{
1554 struct iommu_group *group;
1555 struct arm_smmu_group *smmu_group;
1556
1557 group = iommu_group_get(dev);
1558 if (!group)
1559 return NULL;
1560
1561 smmu_group = iommu_group_get_iommudata(group);
1562 iommu_group_put(group);
1563 return smmu_group;
1564}
1565
1566static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1567{
1568 __le64 *step;
1569 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1570
1571 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1572 struct arm_smmu_strtab_l1_desc *l1_desc;
1573 int idx;
1574
1575 /* Two-level walk */
1576 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1577 l1_desc = &cfg->l1_desc[idx];
1578 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1579 step = &l1_desc->l2ptr[idx];
1580 } else {
1581 /* Simple linear lookup */
1582 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1583 }
1584
1585 return step;
1586}
1587
1588static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group)
1589{
1590 int i;
1591 struct arm_smmu_domain *smmu_domain = smmu_group->domain;
1592 struct arm_smmu_strtab_ent *ste = &smmu_group->ste;
1593 struct arm_smmu_device *smmu = smmu_group->smmu;
1594
1595 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1596 ste->s1_cfg = &smmu_domain->s1_cfg;
1597 ste->s2_cfg = NULL;
1598 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
1599 } else {
1600 ste->s1_cfg = NULL;
1601 ste->s2_cfg = &smmu_domain->s2_cfg;
1602 }
1603
1604 for (i = 0; i < smmu_group->num_sids; ++i) {
1605 u32 sid = smmu_group->sids[i];
1606 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1607
1608 arm_smmu_write_strtab_ent(smmu, sid, step, ste);
1609 }
1610
1611 return 0;
1612}
1613
1614static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1615{
1616 int ret = 0;
1617 struct arm_smmu_device *smmu;
1618 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1619 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
1620
1621 if (!smmu_group)
1622 return -ENOENT;
1623
1624 /* Already attached to a different domain? */
1625 if (smmu_group->domain && smmu_group->domain != smmu_domain)
1626 return -EEXIST;
1627
1628 smmu = smmu_group->smmu;
1629 mutex_lock(&smmu_domain->init_mutex);
1630
1631 if (!smmu_domain->smmu) {
1632 smmu_domain->smmu = smmu;
1633 ret = arm_smmu_domain_finalise(domain);
1634 if (ret) {
1635 smmu_domain->smmu = NULL;
1636 goto out_unlock;
1637 }
1638 } else if (smmu_domain->smmu != smmu) {
1639 dev_err(dev,
1640 "cannot attach to SMMU %s (upstream of %s)\n",
1641 dev_name(smmu_domain->smmu->dev),
1642 dev_name(smmu->dev));
1643 ret = -ENXIO;
1644 goto out_unlock;
1645 }
1646
1647 /* Group already attached to this domain? */
1648 if (smmu_group->domain)
1649 goto out_unlock;
1650
1651 smmu_group->domain = smmu_domain;
1652 smmu_group->ste.bypass = false;
1653
1654 ret = arm_smmu_install_ste_for_group(smmu_group);
1655 if (IS_ERR_VALUE(ret))
1656 smmu_group->domain = NULL;
1657
1658out_unlock:
1659 mutex_unlock(&smmu_domain->init_mutex);
1660 return ret;
1661}
1662
1663static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
1664{
1665 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1666 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
1667
1668 BUG_ON(!smmu_domain);
1669 BUG_ON(!smmu_group);
1670
1671 mutex_lock(&smmu_domain->init_mutex);
1672 BUG_ON(smmu_group->domain != smmu_domain);
1673
1674 smmu_group->ste.bypass = true;
1675 if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group)))
1676 dev_warn(dev, "failed to install bypass STE\n");
1677
1678 smmu_group->domain = NULL;
1679 mutex_unlock(&smmu_domain->init_mutex);
1680}
1681
1682static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1683 phys_addr_t paddr, size_t size, int prot)
1684{
1685 int ret;
1686 unsigned long flags;
1687 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1688 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1689
1690 if (!ops)
1691 return -ENODEV;
1692
1693 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1694 ret = ops->map(ops, iova, paddr, size, prot);
1695 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1696 return ret;
1697}
1698
1699static size_t
1700arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1701{
1702 size_t ret;
1703 unsigned long flags;
1704 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1705 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1706
1707 if (!ops)
1708 return 0;
1709
1710 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1711 ret = ops->unmap(ops, iova, size);
1712 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1713 return ret;
1714}
1715
1716static phys_addr_t
1717arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1718{
1719 phys_addr_t ret;
1720 unsigned long flags;
1721 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1722 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1723
1724 if (!ops)
1725 return 0;
1726
1727 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1728 ret = ops->iova_to_phys(ops, iova);
1729 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1730
1731 return ret;
1732}
1733
1734static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *sidp)
1735{
1736 *(u32 *)sidp = alias;
1737 return 0; /* Continue walking */
1738}
1739
1740static void __arm_smmu_release_pci_iommudata(void *data)
1741{
1742 kfree(data);
1743}
1744
1745static struct arm_smmu_device *arm_smmu_get_for_pci_dev(struct pci_dev *pdev)
1746{
1747 struct device_node *of_node;
Will Deacon941a8022015-08-11 16:25:10 +01001748 struct platform_device *smmu_pdev;
1749 struct arm_smmu_device *smmu = NULL;
Will Deacon48ec83b2015-05-27 17:25:59 +01001750 struct pci_bus *bus = pdev->bus;
1751
1752 /* Walk up to the root bus */
1753 while (!pci_is_root_bus(bus))
1754 bus = bus->parent;
1755
1756 /* Follow the "iommus" phandle from the host controller */
1757 of_node = of_parse_phandle(bus->bridge->parent->of_node, "iommus", 0);
1758 if (!of_node)
1759 return NULL;
1760
1761 /* See if we can find an SMMU corresponding to the phandle */
Will Deacon941a8022015-08-11 16:25:10 +01001762 smmu_pdev = of_find_device_by_node(of_node);
1763 if (smmu_pdev)
1764 smmu = platform_get_drvdata(smmu_pdev);
1765
Will Deacon48ec83b2015-05-27 17:25:59 +01001766 of_node_put(of_node);
1767 return smmu;
1768}
1769
1770static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
1771{
1772 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
1773
1774 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
1775 limit *= 1UL << STRTAB_SPLIT;
1776
1777 return sid < limit;
1778}
1779
1780static int arm_smmu_add_device(struct device *dev)
1781{
1782 int i, ret;
1783 u32 sid, *sids;
1784 struct pci_dev *pdev;
1785 struct iommu_group *group;
1786 struct arm_smmu_group *smmu_group;
1787 struct arm_smmu_device *smmu;
1788
1789 /* We only support PCI, for now */
1790 if (!dev_is_pci(dev))
1791 return -ENODEV;
1792
1793 pdev = to_pci_dev(dev);
1794 group = iommu_group_get_for_dev(dev);
1795 if (IS_ERR(group))
1796 return PTR_ERR(group);
1797
1798 smmu_group = iommu_group_get_iommudata(group);
1799 if (!smmu_group) {
1800 smmu = arm_smmu_get_for_pci_dev(pdev);
1801 if (!smmu) {
1802 ret = -ENOENT;
1803 goto out_put_group;
1804 }
1805
1806 smmu_group = kzalloc(sizeof(*smmu_group), GFP_KERNEL);
1807 if (!smmu_group) {
1808 ret = -ENOMEM;
1809 goto out_put_group;
1810 }
1811
1812 smmu_group->ste.valid = true;
1813 smmu_group->smmu = smmu;
1814 iommu_group_set_iommudata(group, smmu_group,
1815 __arm_smmu_release_pci_iommudata);
1816 } else {
1817 smmu = smmu_group->smmu;
1818 }
1819
1820 /* Assume SID == RID until firmware tells us otherwise */
1821 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1822 for (i = 0; i < smmu_group->num_sids; ++i) {
1823 /* If we already know about this SID, then we're done */
1824 if (smmu_group->sids[i] == sid)
1825 return 0;
1826 }
1827
1828 /* Check the SID is in range of the SMMU and our stream table */
1829 if (!arm_smmu_sid_in_range(smmu, sid)) {
1830 ret = -ERANGE;
1831 goto out_put_group;
1832 }
1833
1834 /* Ensure l2 strtab is initialised */
1835 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1836 ret = arm_smmu_init_l2_strtab(smmu, sid);
1837 if (ret)
1838 goto out_put_group;
1839 }
1840
1841 /* Resize the SID array for the group */
1842 smmu_group->num_sids++;
1843 sids = krealloc(smmu_group->sids, smmu_group->num_sids * sizeof(*sids),
1844 GFP_KERNEL);
1845 if (!sids) {
1846 smmu_group->num_sids--;
1847 ret = -ENOMEM;
1848 goto out_put_group;
1849 }
1850
1851 /* Add the new SID */
1852 sids[smmu_group->num_sids - 1] = sid;
1853 smmu_group->sids = sids;
1854 return 0;
1855
1856out_put_group:
1857 iommu_group_put(group);
1858 return ret;
1859}
1860
1861static void arm_smmu_remove_device(struct device *dev)
1862{
1863 iommu_group_remove_device(dev);
1864}
1865
1866static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1867 enum iommu_attr attr, void *data)
1868{
1869 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1870
1871 switch (attr) {
1872 case DOMAIN_ATTR_NESTING:
1873 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1874 return 0;
1875 default:
1876 return -ENODEV;
1877 }
1878}
1879
1880static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1881 enum iommu_attr attr, void *data)
1882{
1883 int ret = 0;
1884 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1885
1886 mutex_lock(&smmu_domain->init_mutex);
1887
1888 switch (attr) {
1889 case DOMAIN_ATTR_NESTING:
1890 if (smmu_domain->smmu) {
1891 ret = -EPERM;
1892 goto out_unlock;
1893 }
1894
1895 if (*(int *)data)
1896 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1897 else
1898 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1899
1900 break;
1901 default:
1902 ret = -ENODEV;
1903 }
1904
1905out_unlock:
1906 mutex_unlock(&smmu_domain->init_mutex);
1907 return ret;
1908}
1909
1910static struct iommu_ops arm_smmu_ops = {
1911 .capable = arm_smmu_capable,
1912 .domain_alloc = arm_smmu_domain_alloc,
1913 .domain_free = arm_smmu_domain_free,
1914 .attach_dev = arm_smmu_attach_dev,
1915 .detach_dev = arm_smmu_detach_dev,
1916 .map = arm_smmu_map,
1917 .unmap = arm_smmu_unmap,
1918 .iova_to_phys = arm_smmu_iova_to_phys,
1919 .add_device = arm_smmu_add_device,
1920 .remove_device = arm_smmu_remove_device,
1921 .domain_get_attr = arm_smmu_domain_get_attr,
1922 .domain_set_attr = arm_smmu_domain_set_attr,
1923 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1924};
1925
1926/* Probing and initialisation functions */
1927static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
1928 struct arm_smmu_queue *q,
1929 unsigned long prod_off,
1930 unsigned long cons_off,
1931 size_t dwords)
1932{
1933 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
1934
1935 q->base = dma_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
1936 if (!q->base) {
1937 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
1938 qsz);
1939 return -ENOMEM;
1940 }
1941
1942 q->prod_reg = smmu->base + prod_off;
1943 q->cons_reg = smmu->base + cons_off;
1944 q->ent_dwords = dwords;
1945
1946 q->q_base = Q_BASE_RWA;
1947 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT;
1948 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
1949 << Q_BASE_LOG2SIZE_SHIFT;
1950
1951 q->prod = q->cons = 0;
1952 return 0;
1953}
1954
1955static void arm_smmu_free_one_queue(struct arm_smmu_device *smmu,
1956 struct arm_smmu_queue *q)
1957{
1958 size_t qsz = ((1 << q->max_n_shift) * q->ent_dwords) << 3;
1959
1960 dma_free_coherent(smmu->dev, qsz, q->base, q->base_dma);
1961}
1962
1963static void arm_smmu_free_queues(struct arm_smmu_device *smmu)
1964{
1965 arm_smmu_free_one_queue(smmu, &smmu->cmdq.q);
1966 arm_smmu_free_one_queue(smmu, &smmu->evtq.q);
1967
1968 if (smmu->features & ARM_SMMU_FEAT_PRI)
1969 arm_smmu_free_one_queue(smmu, &smmu->priq.q);
1970}
1971
1972static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
1973{
1974 int ret;
1975
1976 /* cmdq */
1977 spin_lock_init(&smmu->cmdq.lock);
1978 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
1979 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
1980 if (ret)
1981 goto out;
1982
1983 /* evtq */
1984 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
1985 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
1986 if (ret)
1987 goto out_free_cmdq;
1988
1989 /* priq */
1990 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
1991 return 0;
1992
1993 ret = arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
1994 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
1995 if (ret)
1996 goto out_free_evtq;
1997
1998 return 0;
1999
2000out_free_evtq:
2001 arm_smmu_free_one_queue(smmu, &smmu->evtq.q);
2002out_free_cmdq:
2003 arm_smmu_free_one_queue(smmu, &smmu->cmdq.q);
2004out:
2005 return ret;
2006}
2007
2008static void arm_smmu_free_l2_strtab(struct arm_smmu_device *smmu)
2009{
2010 int i;
2011 size_t size;
2012 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2013
2014 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
2015 for (i = 0; i < cfg->num_l1_ents; ++i) {
2016 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[i];
2017
2018 if (!desc->l2ptr)
2019 continue;
2020
2021 dma_free_coherent(smmu->dev, size, desc->l2ptr,
2022 desc->l2ptr_dma);
2023 }
2024}
2025
2026static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
2027{
2028 unsigned int i;
2029 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2030 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
2031 void *strtab = smmu->strtab_cfg.strtab;
2032
2033 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
2034 if (!cfg->l1_desc) {
2035 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
2036 return -ENOMEM;
2037 }
2038
2039 for (i = 0; i < cfg->num_l1_ents; ++i) {
2040 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
2041 strtab += STRTAB_L1_DESC_DWORDS << 3;
2042 }
2043
2044 return 0;
2045}
2046
2047static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2048{
2049 void *strtab;
2050 u64 reg;
Will Deacond2e88e72015-06-30 10:02:28 +01002051 u32 size, l1size;
Will Deacon48ec83b2015-05-27 17:25:59 +01002052 int ret;
2053 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2054
Will Deacon28c8b402015-07-16 17:50:12 +01002055 /*
2056 * If we can resolve everything with a single L2 table, then we
2057 * just need a single L1 descriptor. Otherwise, calculate the L1
2058 * size, capped to the SIDSIZE.
2059 */
2060 if (smmu->sid_bits < STRTAB_SPLIT) {
2061 size = 0;
2062 } else {
2063 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2064 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2065 }
Will Deacond2e88e72015-06-30 10:02:28 +01002066 cfg->num_l1_ents = 1 << size;
2067
2068 size += STRTAB_SPLIT;
2069 if (size < smmu->sid_bits)
Will Deacon48ec83b2015-05-27 17:25:59 +01002070 dev_warn(smmu->dev,
2071 "2-level strtab only covers %u/%u bits of SID\n",
Will Deacond2e88e72015-06-30 10:02:28 +01002072 size, smmu->sid_bits);
Will Deacon48ec83b2015-05-27 17:25:59 +01002073
Will Deacond2e88e72015-06-30 10:02:28 +01002074 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2075 strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
Will Deacon48ec83b2015-05-27 17:25:59 +01002076 GFP_KERNEL);
2077 if (!strtab) {
2078 dev_err(smmu->dev,
2079 "failed to allocate l1 stream table (%u bytes)\n",
2080 size);
2081 return -ENOMEM;
2082 }
2083 cfg->strtab = strtab;
2084
2085 /* Configure strtab_base_cfg for 2 levels */
2086 reg = STRTAB_BASE_CFG_FMT_2LVL;
2087 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2088 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2089 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
2090 << STRTAB_BASE_CFG_SPLIT_SHIFT;
2091 cfg->strtab_base_cfg = reg;
2092
2093 ret = arm_smmu_init_l1_strtab(smmu);
2094 if (ret)
2095 dma_free_coherent(smmu->dev,
Will Deacond2e88e72015-06-30 10:02:28 +01002096 l1size,
Will Deacon48ec83b2015-05-27 17:25:59 +01002097 strtab,
2098 cfg->strtab_dma);
2099 return ret;
2100}
2101
2102static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2103{
2104 void *strtab;
2105 u64 reg;
2106 u32 size;
2107 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2108
2109 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
2110 strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2111 GFP_KERNEL);
2112 if (!strtab) {
2113 dev_err(smmu->dev,
2114 "failed to allocate linear stream table (%u bytes)\n",
2115 size);
2116 return -ENOMEM;
2117 }
2118 cfg->strtab = strtab;
2119 cfg->num_l1_ents = 1 << smmu->sid_bits;
2120
2121 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2122 reg = STRTAB_BASE_CFG_FMT_LINEAR;
2123 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2124 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2125 cfg->strtab_base_cfg = reg;
2126
2127 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2128 return 0;
2129}
2130
2131static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2132{
2133 u64 reg;
2134 int ret;
2135
2136 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2137 ret = arm_smmu_init_strtab_2lvl(smmu);
2138 else
2139 ret = arm_smmu_init_strtab_linear(smmu);
2140
2141 if (ret)
2142 return ret;
2143
2144 /* Set the strtab base address */
2145 reg = smmu->strtab_cfg.strtab_dma &
2146 STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT;
2147 reg |= STRTAB_BASE_RA;
2148 smmu->strtab_cfg.strtab_base = reg;
2149
2150 /* Allocate the first VMID for stage-2 bypass STEs */
2151 set_bit(0, smmu->vmid_map);
2152 return 0;
2153}
2154
2155static void arm_smmu_free_strtab(struct arm_smmu_device *smmu)
2156{
2157 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2158 u32 size = cfg->num_l1_ents;
2159
2160 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
2161 arm_smmu_free_l2_strtab(smmu);
2162 size *= STRTAB_L1_DESC_DWORDS << 3;
2163 } else {
2164 size *= STRTAB_STE_DWORDS * 3;
2165 }
2166
2167 dma_free_coherent(smmu->dev, size, cfg->strtab, cfg->strtab_dma);
2168}
2169
2170static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2171{
2172 int ret;
2173
2174 ret = arm_smmu_init_queues(smmu);
2175 if (ret)
2176 return ret;
2177
2178 ret = arm_smmu_init_strtab(smmu);
2179 if (ret)
2180 goto out_free_queues;
2181
2182 return 0;
2183
2184out_free_queues:
2185 arm_smmu_free_queues(smmu);
2186 return ret;
2187}
2188
2189static void arm_smmu_free_structures(struct arm_smmu_device *smmu)
2190{
2191 arm_smmu_free_strtab(smmu);
2192 arm_smmu_free_queues(smmu);
2193}
2194
2195static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2196 unsigned int reg_off, unsigned int ack_off)
2197{
2198 u32 reg;
2199
2200 writel_relaxed(val, smmu->base + reg_off);
2201 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2202 1, ARM_SMMU_POLL_TIMEOUT_US);
2203}
2204
Marc Zyngier166bdbd2015-10-13 18:32:30 +01002205static void arm_smmu_free_msis(void *data)
2206{
2207 struct device *dev = data;
2208 platform_msi_domain_free_irqs(dev);
2209}
2210
2211static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
2212{
2213 phys_addr_t doorbell;
2214 struct device *dev = msi_desc_to_dev(desc);
2215 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2216 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
2217
2218 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
2219 doorbell &= MSI_CFG0_ADDR_MASK << MSI_CFG0_ADDR_SHIFT;
2220
2221 writeq_relaxed(doorbell, smmu->base + cfg[0]);
2222 writel_relaxed(msg->data, smmu->base + cfg[1]);
2223 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
2224}
2225
2226static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
2227{
2228 struct msi_desc *desc;
2229 int ret, nvec = ARM_SMMU_MAX_MSIS;
2230 struct device *dev = smmu->dev;
2231
2232 /* Clear the MSI address regs */
2233 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2234 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2235
2236 if (smmu->features & ARM_SMMU_FEAT_PRI)
2237 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2238 else
2239 nvec--;
2240
2241 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
2242 return;
2243
2244 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
2245 ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
2246 if (ret) {
2247 dev_warn(dev, "failed to allocate MSIs\n");
2248 return;
2249 }
2250
2251 for_each_msi_entry(desc, dev) {
2252 switch (desc->platform.msi_index) {
2253 case EVTQ_MSI_INDEX:
2254 smmu->evtq.q.irq = desc->irq;
2255 break;
2256 case GERROR_MSI_INDEX:
2257 smmu->gerr_irq = desc->irq;
2258 break;
2259 case PRIQ_MSI_INDEX:
2260 smmu->priq.q.irq = desc->irq;
2261 break;
2262 default: /* Unknown */
2263 continue;
2264 }
2265 }
2266
2267 /* Add callback to free MSIs on teardown */
2268 devm_add_action(dev, arm_smmu_free_msis, dev);
2269}
2270
Will Deacon48ec83b2015-05-27 17:25:59 +01002271static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2272{
2273 int ret, irq;
Marc Zyngierccd63852015-07-15 11:55:18 +01002274 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
Will Deacon48ec83b2015-05-27 17:25:59 +01002275
2276 /* Disable IRQs first */
2277 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2278 ARM_SMMU_IRQ_CTRLACK);
2279 if (ret) {
2280 dev_err(smmu->dev, "failed to disable irqs\n");
2281 return ret;
2282 }
2283
Marc Zyngier166bdbd2015-10-13 18:32:30 +01002284 arm_smmu_setup_msis(smmu);
Will Deacon48ec83b2015-05-27 17:25:59 +01002285
Marc Zyngier166bdbd2015-10-13 18:32:30 +01002286 /* Request interrupt lines */
Will Deacon48ec83b2015-05-27 17:25:59 +01002287 irq = smmu->evtq.q.irq;
2288 if (irq) {
2289 ret = devm_request_threaded_irq(smmu->dev, irq,
2290 arm_smmu_evtq_handler,
2291 arm_smmu_evtq_thread,
2292 0, "arm-smmu-v3-evtq", smmu);
2293 if (IS_ERR_VALUE(ret))
2294 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2295 }
2296
2297 irq = smmu->cmdq.q.irq;
2298 if (irq) {
2299 ret = devm_request_irq(smmu->dev, irq,
2300 arm_smmu_cmdq_sync_handler, 0,
2301 "arm-smmu-v3-cmdq-sync", smmu);
2302 if (IS_ERR_VALUE(ret))
2303 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2304 }
2305
2306 irq = smmu->gerr_irq;
2307 if (irq) {
2308 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2309 0, "arm-smmu-v3-gerror", smmu);
2310 if (IS_ERR_VALUE(ret))
2311 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2312 }
2313
2314 if (smmu->features & ARM_SMMU_FEAT_PRI) {
Will Deacon48ec83b2015-05-27 17:25:59 +01002315 irq = smmu->priq.q.irq;
2316 if (irq) {
2317 ret = devm_request_threaded_irq(smmu->dev, irq,
2318 arm_smmu_priq_handler,
2319 arm_smmu_priq_thread,
2320 0, "arm-smmu-v3-priq",
2321 smmu);
2322 if (IS_ERR_VALUE(ret))
2323 dev_warn(smmu->dev,
2324 "failed to enable priq irq\n");
Marc Zyngierccd63852015-07-15 11:55:18 +01002325 else
2326 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
Will Deacon48ec83b2015-05-27 17:25:59 +01002327 }
2328 }
2329
2330 /* Enable interrupt generation on the SMMU */
Marc Zyngierccd63852015-07-15 11:55:18 +01002331 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
Will Deacon48ec83b2015-05-27 17:25:59 +01002332 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2333 if (ret)
2334 dev_warn(smmu->dev, "failed to enable irqs\n");
2335
2336 return 0;
2337}
2338
2339static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2340{
2341 int ret;
2342
2343 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2344 if (ret)
2345 dev_err(smmu->dev, "failed to clear cr0\n");
2346
2347 return ret;
2348}
2349
2350static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
2351{
2352 int ret;
2353 u32 reg, enables;
2354 struct arm_smmu_cmdq_ent cmd;
2355
2356 /* Clear CR0 and sync (disables SMMU and queue processing) */
2357 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2358 if (reg & CR0_SMMUEN)
2359 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2360
2361 ret = arm_smmu_device_disable(smmu);
2362 if (ret)
2363 return ret;
2364
2365 /* CR1 (table and queue memory attributes) */
2366 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
2367 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
2368 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
2369 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
2370 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
2371 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
2372 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2373
2374 /* CR2 (random crap) */
2375 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2376 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2377
2378 /* Stream table */
2379 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2380 smmu->base + ARM_SMMU_STRTAB_BASE);
2381 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2382 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2383
2384 /* Command queue */
2385 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2386 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2387 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2388
2389 enables = CR0_CMDQEN;
2390 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2391 ARM_SMMU_CR0ACK);
2392 if (ret) {
2393 dev_err(smmu->dev, "failed to enable command queue\n");
2394 return ret;
2395 }
2396
2397 /* Invalidate any cached configuration */
2398 cmd.opcode = CMDQ_OP_CFGI_ALL;
2399 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2400 cmd.opcode = CMDQ_OP_CMD_SYNC;
2401 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2402
2403 /* Invalidate any stale TLB entries */
2404 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2405 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2406 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2407 }
2408
2409 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2410 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2411 cmd.opcode = CMDQ_OP_CMD_SYNC;
2412 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2413
2414 /* Event queue */
2415 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
2416 writel_relaxed(smmu->evtq.q.prod, smmu->base + ARM_SMMU_EVTQ_PROD);
2417 writel_relaxed(smmu->evtq.q.cons, smmu->base + ARM_SMMU_EVTQ_CONS);
2418
2419 enables |= CR0_EVTQEN;
2420 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2421 ARM_SMMU_CR0ACK);
2422 if (ret) {
2423 dev_err(smmu->dev, "failed to enable event queue\n");
2424 return ret;
2425 }
2426
2427 /* PRI queue */
2428 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2429 writeq_relaxed(smmu->priq.q.q_base,
2430 smmu->base + ARM_SMMU_PRIQ_BASE);
2431 writel_relaxed(smmu->priq.q.prod,
2432 smmu->base + ARM_SMMU_PRIQ_PROD);
2433 writel_relaxed(smmu->priq.q.cons,
2434 smmu->base + ARM_SMMU_PRIQ_CONS);
2435
2436 enables |= CR0_PRIQEN;
2437 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2438 ARM_SMMU_CR0ACK);
2439 if (ret) {
2440 dev_err(smmu->dev, "failed to enable PRI queue\n");
2441 return ret;
2442 }
2443 }
2444
2445 ret = arm_smmu_setup_irqs(smmu);
2446 if (ret) {
2447 dev_err(smmu->dev, "failed to setup irqs\n");
2448 return ret;
2449 }
2450
2451 /* Enable the SMMU interface */
2452 enables |= CR0_SMMUEN;
2453 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2454 ARM_SMMU_CR0ACK);
2455 if (ret) {
2456 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2457 return ret;
2458 }
2459
2460 return 0;
2461}
2462
2463static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
2464{
2465 u32 reg;
2466 bool coherent;
2467 unsigned long pgsize_bitmap = 0;
2468
2469 /* IDR0 */
2470 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2471
2472 /* 2-level structures */
2473 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
2474 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2475
2476 if (reg & IDR0_CD2L)
2477 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2478
2479 /*
2480 * Translation table endianness.
2481 * We currently require the same endianness as the CPU, but this
2482 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2483 */
2484 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
2485 case IDR0_TTENDIAN_MIXED:
2486 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2487 break;
2488#ifdef __BIG_ENDIAN
2489 case IDR0_TTENDIAN_BE:
2490 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2491 break;
2492#else
2493 case IDR0_TTENDIAN_LE:
2494 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2495 break;
2496#endif
2497 default:
2498 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2499 return -ENXIO;
2500 }
2501
2502 /* Boolean feature flags */
2503 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2504 smmu->features |= ARM_SMMU_FEAT_PRI;
2505
2506 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2507 smmu->features |= ARM_SMMU_FEAT_ATS;
2508
2509 if (reg & IDR0_SEV)
2510 smmu->features |= ARM_SMMU_FEAT_SEV;
2511
2512 if (reg & IDR0_MSI)
2513 smmu->features |= ARM_SMMU_FEAT_MSI;
2514
2515 if (reg & IDR0_HYP)
2516 smmu->features |= ARM_SMMU_FEAT_HYP;
2517
2518 /*
2519 * The dma-coherent property is used in preference to the ID
2520 * register, but warn on mismatch.
2521 */
2522 coherent = of_dma_is_coherent(smmu->dev->of_node);
2523 if (coherent)
2524 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2525
2526 if (!!(reg & IDR0_COHACC) != coherent)
2527 dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
2528 coherent ? "true" : "false");
2529
2530 if (reg & IDR0_STALL_MODEL)
2531 smmu->features |= ARM_SMMU_FEAT_STALLS;
2532
2533 if (reg & IDR0_S1P)
2534 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2535
2536 if (reg & IDR0_S2P)
2537 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2538
2539 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2540 dev_err(smmu->dev, "no translation support!\n");
2541 return -ENXIO;
2542 }
2543
2544 /* We only support the AArch64 table format at present */
2545 if ((reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) < IDR0_TTF_AARCH64) {
2546 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2547 return -ENXIO;
2548 }
2549
2550 /* ASID/VMID sizes */
2551 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2552 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2553
2554 /* IDR1 */
2555 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2556 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2557 dev_err(smmu->dev, "embedded implementation not supported\n");
2558 return -ENXIO;
2559 }
2560
2561 /* Queue sizes, capped at 4k */
2562 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
2563 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
2564 if (!smmu->cmdq.q.max_n_shift) {
2565 /* Odd alignment restrictions on the base, so ignore for now */
2566 dev_err(smmu->dev, "unit-length command queue not supported\n");
2567 return -ENXIO;
2568 }
2569
2570 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
2571 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
2572 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
2573 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
2574
2575 /* SID/SSID sizes */
2576 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2577 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2578
2579 /* IDR5 */
2580 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2581
2582 /* Maximum number of outstanding stalls */
2583 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
2584 & IDR5_STALL_MAX_MASK;
2585
2586 /* Page sizes */
2587 if (reg & IDR5_GRAN64K)
2588 pgsize_bitmap |= SZ_64K | SZ_512M;
2589 if (reg & IDR5_GRAN16K)
2590 pgsize_bitmap |= SZ_16K | SZ_32M;
2591 if (reg & IDR5_GRAN4K)
2592 pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2593
2594 arm_smmu_ops.pgsize_bitmap &= pgsize_bitmap;
2595
2596 /* Output address size */
2597 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
2598 case IDR5_OAS_32_BIT:
2599 smmu->oas = 32;
2600 break;
2601 case IDR5_OAS_36_BIT:
2602 smmu->oas = 36;
2603 break;
2604 case IDR5_OAS_40_BIT:
2605 smmu->oas = 40;
2606 break;
2607 case IDR5_OAS_42_BIT:
2608 smmu->oas = 42;
2609 break;
2610 case IDR5_OAS_44_BIT:
2611 smmu->oas = 44;
2612 break;
Will Deacon85430962015-08-03 10:35:40 +01002613 default:
2614 dev_info(smmu->dev,
2615 "unknown output address size. Truncating to 48-bit\n");
2616 /* Fallthrough */
Will Deacon48ec83b2015-05-27 17:25:59 +01002617 case IDR5_OAS_48_BIT:
2618 smmu->oas = 48;
Will Deacon48ec83b2015-05-27 17:25:59 +01002619 }
2620
2621 /* Set the DMA mask for our table walker */
2622 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2623 dev_warn(smmu->dev,
2624 "failed to set DMA mask for table walker\n");
2625
2626 if (!smmu->ias)
2627 smmu->ias = smmu->oas;
2628
2629 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2630 smmu->ias, smmu->oas, smmu->features);
2631 return 0;
2632}
2633
2634static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2635{
2636 int irq, ret;
2637 struct resource *res;
2638 struct arm_smmu_device *smmu;
2639 struct device *dev = &pdev->dev;
2640
2641 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2642 if (!smmu) {
2643 dev_err(dev, "failed to allocate arm_smmu_device\n");
2644 return -ENOMEM;
2645 }
2646 smmu->dev = dev;
2647
2648 /* Base address */
2649 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2650 if (resource_size(res) + 1 < SZ_128K) {
2651 dev_err(dev, "MMIO region too small (%pr)\n", res);
2652 return -EINVAL;
2653 }
2654
2655 smmu->base = devm_ioremap_resource(dev, res);
2656 if (IS_ERR(smmu->base))
2657 return PTR_ERR(smmu->base);
2658
2659 /* Interrupt lines */
2660 irq = platform_get_irq_byname(pdev, "eventq");
2661 if (irq > 0)
2662 smmu->evtq.q.irq = irq;
2663
2664 irq = platform_get_irq_byname(pdev, "priq");
2665 if (irq > 0)
2666 smmu->priq.q.irq = irq;
2667
2668 irq = platform_get_irq_byname(pdev, "cmdq-sync");
2669 if (irq > 0)
2670 smmu->cmdq.q.irq = irq;
2671
2672 irq = platform_get_irq_byname(pdev, "gerror");
2673 if (irq > 0)
2674 smmu->gerr_irq = irq;
2675
Zhen Lei5e929462015-07-07 04:30:18 +01002676 parse_driver_options(smmu);
2677
Will Deacon48ec83b2015-05-27 17:25:59 +01002678 /* Probe the h/w */
2679 ret = arm_smmu_device_probe(smmu);
2680 if (ret)
2681 return ret;
2682
2683 /* Initialise in-memory data structures */
2684 ret = arm_smmu_init_structures(smmu);
2685 if (ret)
2686 return ret;
2687
Marc Zyngier166bdbd2015-10-13 18:32:30 +01002688 /* Record our private device structure */
2689 platform_set_drvdata(pdev, smmu);
2690
Will Deacon48ec83b2015-05-27 17:25:59 +01002691 /* Reset the device */
2692 ret = arm_smmu_device_reset(smmu);
2693 if (ret)
2694 goto out_free_structures;
2695
Will Deacon48ec83b2015-05-27 17:25:59 +01002696 return 0;
2697
2698out_free_structures:
2699 arm_smmu_free_structures(smmu);
2700 return ret;
2701}
2702
2703static int arm_smmu_device_remove(struct platform_device *pdev)
2704{
Will Deacon941a8022015-08-11 16:25:10 +01002705 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon48ec83b2015-05-27 17:25:59 +01002706
2707 arm_smmu_device_disable(smmu);
2708 arm_smmu_free_structures(smmu);
2709 return 0;
2710}
2711
2712static struct of_device_id arm_smmu_of_match[] = {
2713 { .compatible = "arm,smmu-v3", },
2714 { },
2715};
2716MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2717
2718static struct platform_driver arm_smmu_driver = {
2719 .driver = {
2720 .name = "arm-smmu-v3",
2721 .of_match_table = of_match_ptr(arm_smmu_of_match),
2722 },
2723 .probe = arm_smmu_device_dt_probe,
2724 .remove = arm_smmu_device_remove,
2725};
2726
2727static int __init arm_smmu_init(void)
2728{
2729 struct device_node *np;
2730 int ret;
2731
2732 np = of_find_matching_node(NULL, arm_smmu_of_match);
2733 if (!np)
2734 return 0;
2735
2736 of_node_put(np);
2737
2738 ret = platform_driver_register(&arm_smmu_driver);
2739 if (ret)
2740 return ret;
2741
2742 return bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2743}
2744
2745static void __exit arm_smmu_exit(void)
2746{
2747 return platform_driver_unregister(&arm_smmu_driver);
2748}
2749
2750subsys_initcall(arm_smmu_init);
2751module_exit(arm_smmu_exit);
2752
2753MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2754MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2755MODULE_LICENSE("GPL v2");