blob: 286e890e7d64caa31867044f568e3a3cf6ce2ba9 [file] [log] [blame]
Will Deacon48ec83b2015-05-27 17:25:59 +01001/*
2 * IOMMU API for ARM architected SMMUv3 implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2015 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 *
20 * This driver is powered by bad coffee and bombay mix.
21 */
22
23#include <linux/delay.h>
24#include <linux/err.h>
25#include <linux/interrupt.h>
26#include <linux/iommu.h>
27#include <linux/iopoll.h>
28#include <linux/module.h>
29#include <linux/of.h>
30#include <linux/of_address.h>
31#include <linux/pci.h>
32#include <linux/platform_device.h>
33
34#include "io-pgtable.h"
35
36/* MMIO registers */
37#define ARM_SMMU_IDR0 0x0
38#define IDR0_ST_LVL_SHIFT 27
39#define IDR0_ST_LVL_MASK 0x3
40#define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
41#define IDR0_STALL_MODEL (3 << 24)
42#define IDR0_TTENDIAN_SHIFT 21
43#define IDR0_TTENDIAN_MASK 0x3
44#define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
45#define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
46#define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
47#define IDR0_CD2L (1 << 19)
48#define IDR0_VMID16 (1 << 18)
49#define IDR0_PRI (1 << 16)
50#define IDR0_SEV (1 << 14)
51#define IDR0_MSI (1 << 13)
52#define IDR0_ASID16 (1 << 12)
53#define IDR0_ATS (1 << 10)
54#define IDR0_HYP (1 << 9)
55#define IDR0_COHACC (1 << 4)
56#define IDR0_TTF_SHIFT 2
57#define IDR0_TTF_MASK 0x3
58#define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
Will Deaconf0c453d2015-08-20 12:12:32 +010059#define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT)
Will Deacon48ec83b2015-05-27 17:25:59 +010060#define IDR0_S1P (1 << 1)
61#define IDR0_S2P (1 << 0)
62
63#define ARM_SMMU_IDR1 0x4
64#define IDR1_TABLES_PRESET (1 << 30)
65#define IDR1_QUEUES_PRESET (1 << 29)
66#define IDR1_REL (1 << 28)
67#define IDR1_CMDQ_SHIFT 21
68#define IDR1_CMDQ_MASK 0x1f
69#define IDR1_EVTQ_SHIFT 16
70#define IDR1_EVTQ_MASK 0x1f
71#define IDR1_PRIQ_SHIFT 11
72#define IDR1_PRIQ_MASK 0x1f
73#define IDR1_SSID_SHIFT 6
74#define IDR1_SSID_MASK 0x1f
75#define IDR1_SID_SHIFT 0
76#define IDR1_SID_MASK 0x3f
77
78#define ARM_SMMU_IDR5 0x14
79#define IDR5_STALL_MAX_SHIFT 16
80#define IDR5_STALL_MAX_MASK 0xffff
81#define IDR5_GRAN64K (1 << 6)
82#define IDR5_GRAN16K (1 << 5)
83#define IDR5_GRAN4K (1 << 4)
84#define IDR5_OAS_SHIFT 0
85#define IDR5_OAS_MASK 0x7
86#define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
87#define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
88#define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
89#define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
90#define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
91#define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
92
93#define ARM_SMMU_CR0 0x20
94#define CR0_CMDQEN (1 << 3)
95#define CR0_EVTQEN (1 << 2)
96#define CR0_PRIQEN (1 << 1)
97#define CR0_SMMUEN (1 << 0)
98
99#define ARM_SMMU_CR0ACK 0x24
100
101#define ARM_SMMU_CR1 0x28
102#define CR1_SH_NSH 0
103#define CR1_SH_OSH 2
104#define CR1_SH_ISH 3
105#define CR1_CACHE_NC 0
106#define CR1_CACHE_WB 1
107#define CR1_CACHE_WT 2
108#define CR1_TABLE_SH_SHIFT 10
109#define CR1_TABLE_OC_SHIFT 8
110#define CR1_TABLE_IC_SHIFT 6
111#define CR1_QUEUE_SH_SHIFT 4
112#define CR1_QUEUE_OC_SHIFT 2
113#define CR1_QUEUE_IC_SHIFT 0
114
115#define ARM_SMMU_CR2 0x2c
116#define CR2_PTM (1 << 2)
117#define CR2_RECINVSID (1 << 1)
118#define CR2_E2H (1 << 0)
119
120#define ARM_SMMU_IRQ_CTRL 0x50
121#define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
Marc Zyngierccd63852015-07-15 11:55:18 +0100122#define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
Will Deacon48ec83b2015-05-27 17:25:59 +0100123#define IRQ_CTRL_GERROR_IRQEN (1 << 0)
124
125#define ARM_SMMU_IRQ_CTRLACK 0x54
126
127#define ARM_SMMU_GERROR 0x60
128#define GERROR_SFM_ERR (1 << 8)
129#define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
130#define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
131#define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
132#define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
133#define GERROR_PRIQ_ABT_ERR (1 << 3)
134#define GERROR_EVTQ_ABT_ERR (1 << 2)
135#define GERROR_CMDQ_ERR (1 << 0)
136#define GERROR_ERR_MASK 0xfd
137
138#define ARM_SMMU_GERRORN 0x64
139
140#define ARM_SMMU_GERROR_IRQ_CFG0 0x68
141#define ARM_SMMU_GERROR_IRQ_CFG1 0x70
142#define ARM_SMMU_GERROR_IRQ_CFG2 0x74
143
144#define ARM_SMMU_STRTAB_BASE 0x80
145#define STRTAB_BASE_RA (1UL << 62)
146#define STRTAB_BASE_ADDR_SHIFT 6
147#define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
148
149#define ARM_SMMU_STRTAB_BASE_CFG 0x88
150#define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
151#define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
152#define STRTAB_BASE_CFG_SPLIT_SHIFT 6
153#define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
154#define STRTAB_BASE_CFG_FMT_SHIFT 16
155#define STRTAB_BASE_CFG_FMT_MASK 0x3
156#define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
157#define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
158
159#define ARM_SMMU_CMDQ_BASE 0x90
160#define ARM_SMMU_CMDQ_PROD 0x98
161#define ARM_SMMU_CMDQ_CONS 0x9c
162
163#define ARM_SMMU_EVTQ_BASE 0xa0
164#define ARM_SMMU_EVTQ_PROD 0x100a8
165#define ARM_SMMU_EVTQ_CONS 0x100ac
166#define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
167#define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
168#define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
169
170#define ARM_SMMU_PRIQ_BASE 0xc0
171#define ARM_SMMU_PRIQ_PROD 0x100c8
172#define ARM_SMMU_PRIQ_CONS 0x100cc
173#define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
174#define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
175#define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
176
177/* Common MSI config fields */
Will Deacon48ec83b2015-05-27 17:25:59 +0100178#define MSI_CFG0_ADDR_SHIFT 2
179#define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
Marc Zyngierec11d632015-07-15 11:55:19 +0100180#define MSI_CFG2_SH_SHIFT 4
181#define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
182#define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
183#define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
184#define MSI_CFG2_MEMATTR_SHIFT 0
185#define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
Will Deacon48ec83b2015-05-27 17:25:59 +0100186
187#define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
188#define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
189#define Q_OVERFLOW_FLAG (1 << 31)
190#define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
191#define Q_ENT(q, p) ((q)->base + \
192 Q_IDX(q, p) * (q)->ent_dwords)
193
194#define Q_BASE_RWA (1UL << 62)
195#define Q_BASE_ADDR_SHIFT 5
196#define Q_BASE_ADDR_MASK 0xfffffffffffUL
197#define Q_BASE_LOG2SIZE_SHIFT 0
198#define Q_BASE_LOG2SIZE_MASK 0x1fUL
199
200/*
201 * Stream table.
202 *
203 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
Zhen Leie2f4c232015-07-07 04:30:17 +0100204 * 2lvl: 128k L1 entries,
205 * 256 lazy entries per table (each table covers a PCI bus)
Will Deacon48ec83b2015-05-27 17:25:59 +0100206 */
Zhen Leie2f4c232015-07-07 04:30:17 +0100207#define STRTAB_L1_SZ_SHIFT 20
Will Deacon48ec83b2015-05-27 17:25:59 +0100208#define STRTAB_SPLIT 8
209
210#define STRTAB_L1_DESC_DWORDS 1
211#define STRTAB_L1_DESC_SPAN_SHIFT 0
212#define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
213#define STRTAB_L1_DESC_L2PTR_SHIFT 6
214#define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
215
216#define STRTAB_STE_DWORDS 8
217#define STRTAB_STE_0_V (1UL << 0)
218#define STRTAB_STE_0_CFG_SHIFT 1
219#define STRTAB_STE_0_CFG_MASK 0x7UL
220#define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
221#define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
222#define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
223#define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
224
225#define STRTAB_STE_0_S1FMT_SHIFT 4
226#define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
227#define STRTAB_STE_0_S1CTXPTR_SHIFT 6
228#define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
229#define STRTAB_STE_0_S1CDMAX_SHIFT 59
230#define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
231
232#define STRTAB_STE_1_S1C_CACHE_NC 0UL
233#define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
234#define STRTAB_STE_1_S1C_CACHE_WT 2UL
235#define STRTAB_STE_1_S1C_CACHE_WB 3UL
236#define STRTAB_STE_1_S1C_SH_NSH 0UL
237#define STRTAB_STE_1_S1C_SH_OSH 2UL
238#define STRTAB_STE_1_S1C_SH_ISH 3UL
239#define STRTAB_STE_1_S1CIR_SHIFT 2
240#define STRTAB_STE_1_S1COR_SHIFT 4
241#define STRTAB_STE_1_S1CSH_SHIFT 6
242
243#define STRTAB_STE_1_S1STALLD (1UL << 27)
244
245#define STRTAB_STE_1_EATS_ABT 0UL
246#define STRTAB_STE_1_EATS_TRANS 1UL
247#define STRTAB_STE_1_EATS_S1CHK 2UL
248#define STRTAB_STE_1_EATS_SHIFT 28
249
250#define STRTAB_STE_1_STRW_NSEL1 0UL
251#define STRTAB_STE_1_STRW_EL2 2UL
252#define STRTAB_STE_1_STRW_SHIFT 30
253
254#define STRTAB_STE_2_S2VMID_SHIFT 0
255#define STRTAB_STE_2_S2VMID_MASK 0xffffUL
256#define STRTAB_STE_2_VTCR_SHIFT 32
257#define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
258#define STRTAB_STE_2_S2AA64 (1UL << 51)
259#define STRTAB_STE_2_S2ENDI (1UL << 52)
260#define STRTAB_STE_2_S2PTW (1UL << 54)
261#define STRTAB_STE_2_S2R (1UL << 58)
262
263#define STRTAB_STE_3_S2TTB_SHIFT 4
264#define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
265
266/* Context descriptor (stage-1 only) */
267#define CTXDESC_CD_DWORDS 8
268#define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
269#define ARM64_TCR_T0SZ_SHIFT 0
270#define ARM64_TCR_T0SZ_MASK 0x1fUL
271#define CTXDESC_CD_0_TCR_TG0_SHIFT 6
272#define ARM64_TCR_TG0_SHIFT 14
273#define ARM64_TCR_TG0_MASK 0x3UL
274#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
Zhen Lei5d58c622015-06-26 09:32:59 +0100275#define ARM64_TCR_IRGN0_SHIFT 8
Will Deacon48ec83b2015-05-27 17:25:59 +0100276#define ARM64_TCR_IRGN0_MASK 0x3UL
277#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
Zhen Lei5d58c622015-06-26 09:32:59 +0100278#define ARM64_TCR_ORGN0_SHIFT 10
Will Deacon48ec83b2015-05-27 17:25:59 +0100279#define ARM64_TCR_ORGN0_MASK 0x3UL
280#define CTXDESC_CD_0_TCR_SH0_SHIFT 12
281#define ARM64_TCR_SH0_SHIFT 12
282#define ARM64_TCR_SH0_MASK 0x3UL
283#define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
284#define ARM64_TCR_EPD0_SHIFT 7
285#define ARM64_TCR_EPD0_MASK 0x1UL
286#define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
287#define ARM64_TCR_EPD1_SHIFT 23
288#define ARM64_TCR_EPD1_MASK 0x1UL
289
290#define CTXDESC_CD_0_ENDI (1UL << 15)
291#define CTXDESC_CD_0_V (1UL << 31)
292
293#define CTXDESC_CD_0_TCR_IPS_SHIFT 32
294#define ARM64_TCR_IPS_SHIFT 32
295#define ARM64_TCR_IPS_MASK 0x7UL
296#define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
297#define ARM64_TCR_TBI0_SHIFT 37
298#define ARM64_TCR_TBI0_MASK 0x1UL
299
300#define CTXDESC_CD_0_AA64 (1UL << 41)
301#define CTXDESC_CD_0_R (1UL << 45)
302#define CTXDESC_CD_0_A (1UL << 46)
303#define CTXDESC_CD_0_ASET_SHIFT 47
304#define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
305#define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
306#define CTXDESC_CD_0_ASID_SHIFT 48
307#define CTXDESC_CD_0_ASID_MASK 0xffffUL
308
309#define CTXDESC_CD_1_TTB0_SHIFT 4
310#define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
311
312#define CTXDESC_CD_3_MAIR_SHIFT 0
313
314/* Convert between AArch64 (CPU) TCR format and SMMU CD format */
315#define ARM_SMMU_TCR2CD(tcr, fld) \
316 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
317 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
318
319/* Command queue */
320#define CMDQ_ENT_DWORDS 2
321#define CMDQ_MAX_SZ_SHIFT 8
322
323#define CMDQ_ERR_SHIFT 24
324#define CMDQ_ERR_MASK 0x7f
325#define CMDQ_ERR_CERROR_NONE_IDX 0
326#define CMDQ_ERR_CERROR_ILL_IDX 1
327#define CMDQ_ERR_CERROR_ABT_IDX 2
328
329#define CMDQ_0_OP_SHIFT 0
330#define CMDQ_0_OP_MASK 0xffUL
331#define CMDQ_0_SSV (1UL << 11)
332
333#define CMDQ_PREFETCH_0_SID_SHIFT 32
334#define CMDQ_PREFETCH_1_SIZE_SHIFT 0
335#define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
336
337#define CMDQ_CFGI_0_SID_SHIFT 32
338#define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
339#define CMDQ_CFGI_1_LEAF (1UL << 0)
340#define CMDQ_CFGI_1_RANGE_SHIFT 0
341#define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
342
343#define CMDQ_TLBI_0_VMID_SHIFT 32
344#define CMDQ_TLBI_0_ASID_SHIFT 48
345#define CMDQ_TLBI_1_LEAF (1UL << 0)
Will Deacon1c27df12015-09-18 16:12:56 +0100346#define CMDQ_TLBI_1_VA_MASK ~0xfffUL
347#define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL
Will Deacon48ec83b2015-05-27 17:25:59 +0100348
349#define CMDQ_PRI_0_SSID_SHIFT 12
350#define CMDQ_PRI_0_SSID_MASK 0xfffffUL
351#define CMDQ_PRI_0_SID_SHIFT 32
352#define CMDQ_PRI_0_SID_MASK 0xffffffffUL
353#define CMDQ_PRI_1_GRPID_SHIFT 0
354#define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
355#define CMDQ_PRI_1_RESP_SHIFT 12
356#define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
357#define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
358#define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
359
360#define CMDQ_SYNC_0_CS_SHIFT 12
361#define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
362#define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
363
364/* Event queue */
365#define EVTQ_ENT_DWORDS 4
366#define EVTQ_MAX_SZ_SHIFT 7
367
368#define EVTQ_0_ID_SHIFT 0
369#define EVTQ_0_ID_MASK 0xffUL
370
371/* PRI queue */
372#define PRIQ_ENT_DWORDS 2
373#define PRIQ_MAX_SZ_SHIFT 8
374
375#define PRIQ_0_SID_SHIFT 0
376#define PRIQ_0_SID_MASK 0xffffffffUL
377#define PRIQ_0_SSID_SHIFT 32
378#define PRIQ_0_SSID_MASK 0xfffffUL
379#define PRIQ_0_OF (1UL << 57)
380#define PRIQ_0_PERM_PRIV (1UL << 58)
381#define PRIQ_0_PERM_EXEC (1UL << 59)
382#define PRIQ_0_PERM_READ (1UL << 60)
383#define PRIQ_0_PERM_WRITE (1UL << 61)
384#define PRIQ_0_PRG_LAST (1UL << 62)
385#define PRIQ_0_SSID_V (1UL << 63)
386
387#define PRIQ_1_PRG_IDX_SHIFT 0
388#define PRIQ_1_PRG_IDX_MASK 0x1ffUL
389#define PRIQ_1_ADDR_SHIFT 12
390#define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
391
392/* High-level queue structures */
393#define ARM_SMMU_POLL_TIMEOUT_US 100
394
395static bool disable_bypass;
396module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
397MODULE_PARM_DESC(disable_bypass,
398 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
399
400enum pri_resp {
401 PRI_RESP_DENY,
402 PRI_RESP_FAIL,
403 PRI_RESP_SUCC,
404};
405
406struct arm_smmu_cmdq_ent {
407 /* Common fields */
408 u8 opcode;
409 bool substream_valid;
410
411 /* Command-specific fields */
412 union {
413 #define CMDQ_OP_PREFETCH_CFG 0x1
414 struct {
415 u32 sid;
416 u8 size;
417 u64 addr;
418 } prefetch;
419
420 #define CMDQ_OP_CFGI_STE 0x3
421 #define CMDQ_OP_CFGI_ALL 0x4
422 struct {
423 u32 sid;
424 union {
425 bool leaf;
426 u8 span;
427 };
428 } cfgi;
429
430 #define CMDQ_OP_TLBI_NH_ASID 0x11
431 #define CMDQ_OP_TLBI_NH_VA 0x12
432 #define CMDQ_OP_TLBI_EL2_ALL 0x20
433 #define CMDQ_OP_TLBI_S12_VMALL 0x28
434 #define CMDQ_OP_TLBI_S2_IPA 0x2a
435 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
436 struct {
437 u16 asid;
438 u16 vmid;
439 bool leaf;
440 u64 addr;
441 } tlbi;
442
443 #define CMDQ_OP_PRI_RESP 0x41
444 struct {
445 u32 sid;
446 u32 ssid;
447 u16 grpid;
448 enum pri_resp resp;
449 } pri;
450
451 #define CMDQ_OP_CMD_SYNC 0x46
452 };
453};
454
455struct arm_smmu_queue {
456 int irq; /* Wired interrupt */
457
458 __le64 *base;
459 dma_addr_t base_dma;
460 u64 q_base;
461
462 size_t ent_dwords;
463 u32 max_n_shift;
464 u32 prod;
465 u32 cons;
466
467 u32 __iomem *prod_reg;
468 u32 __iomem *cons_reg;
469};
470
471struct arm_smmu_cmdq {
472 struct arm_smmu_queue q;
473 spinlock_t lock;
474};
475
476struct arm_smmu_evtq {
477 struct arm_smmu_queue q;
478 u32 max_stalls;
479};
480
481struct arm_smmu_priq {
482 struct arm_smmu_queue q;
483};
484
485/* High-level stream table and context descriptor structures */
486struct arm_smmu_strtab_l1_desc {
487 u8 span;
488
489 __le64 *l2ptr;
490 dma_addr_t l2ptr_dma;
491};
492
493struct arm_smmu_s1_cfg {
494 __le64 *cdptr;
495 dma_addr_t cdptr_dma;
496
497 struct arm_smmu_ctx_desc {
498 u16 asid;
499 u64 ttbr;
500 u64 tcr;
501 u64 mair;
502 } cd;
503};
504
505struct arm_smmu_s2_cfg {
506 u16 vmid;
507 u64 vttbr;
508 u64 vtcr;
509};
510
511struct arm_smmu_strtab_ent {
512 bool valid;
513
514 bool bypass; /* Overrides s1/s2 config */
515 struct arm_smmu_s1_cfg *s1_cfg;
516 struct arm_smmu_s2_cfg *s2_cfg;
517};
518
519struct arm_smmu_strtab_cfg {
520 __le64 *strtab;
521 dma_addr_t strtab_dma;
522 struct arm_smmu_strtab_l1_desc *l1_desc;
523 unsigned int num_l1_ents;
524
525 u64 strtab_base;
526 u32 strtab_base_cfg;
527};
528
529/* An SMMUv3 instance */
530struct arm_smmu_device {
531 struct device *dev;
532 void __iomem *base;
533
534#define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
535#define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
536#define ARM_SMMU_FEAT_TT_LE (1 << 2)
537#define ARM_SMMU_FEAT_TT_BE (1 << 3)
538#define ARM_SMMU_FEAT_PRI (1 << 4)
539#define ARM_SMMU_FEAT_ATS (1 << 5)
540#define ARM_SMMU_FEAT_SEV (1 << 6)
541#define ARM_SMMU_FEAT_MSI (1 << 7)
542#define ARM_SMMU_FEAT_COHERENCY (1 << 8)
543#define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
544#define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
545#define ARM_SMMU_FEAT_STALLS (1 << 11)
546#define ARM_SMMU_FEAT_HYP (1 << 12)
547 u32 features;
548
Zhen Lei5e929462015-07-07 04:30:18 +0100549#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
550 u32 options;
551
Will Deacon48ec83b2015-05-27 17:25:59 +0100552 struct arm_smmu_cmdq cmdq;
553 struct arm_smmu_evtq evtq;
554 struct arm_smmu_priq priq;
555
556 int gerr_irq;
557
558 unsigned long ias; /* IPA */
559 unsigned long oas; /* PA */
560
561#define ARM_SMMU_MAX_ASIDS (1 << 16)
562 unsigned int asid_bits;
563 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
564
565#define ARM_SMMU_MAX_VMIDS (1 << 16)
566 unsigned int vmid_bits;
567 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
568
569 unsigned int ssid_bits;
570 unsigned int sid_bits;
571
572 struct arm_smmu_strtab_cfg strtab_cfg;
573 struct list_head list;
574};
575
576/* SMMU private data for an IOMMU group */
577struct arm_smmu_group {
578 struct arm_smmu_device *smmu;
579 struct arm_smmu_domain *domain;
580 int num_sids;
581 u32 *sids;
582 struct arm_smmu_strtab_ent ste;
583};
584
585/* SMMU private data for an IOMMU domain */
586enum arm_smmu_domain_stage {
587 ARM_SMMU_DOMAIN_S1 = 0,
588 ARM_SMMU_DOMAIN_S2,
589 ARM_SMMU_DOMAIN_NESTED,
590};
591
592struct arm_smmu_domain {
593 struct arm_smmu_device *smmu;
594 struct mutex init_mutex; /* Protects smmu pointer */
595
596 struct io_pgtable_ops *pgtbl_ops;
597 spinlock_t pgtbl_lock;
598
599 enum arm_smmu_domain_stage stage;
600 union {
601 struct arm_smmu_s1_cfg s1_cfg;
602 struct arm_smmu_s2_cfg s2_cfg;
603 };
604
605 struct iommu_domain domain;
606};
607
608/* Our list of SMMU instances */
609static DEFINE_SPINLOCK(arm_smmu_devices_lock);
610static LIST_HEAD(arm_smmu_devices);
611
Zhen Lei5e929462015-07-07 04:30:18 +0100612struct arm_smmu_option_prop {
613 u32 opt;
614 const char *prop;
615};
616
617static struct arm_smmu_option_prop arm_smmu_options[] = {
618 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
619 { 0, NULL},
620};
621
Will Deacon48ec83b2015-05-27 17:25:59 +0100622static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
623{
624 return container_of(dom, struct arm_smmu_domain, domain);
625}
626
Zhen Lei5e929462015-07-07 04:30:18 +0100627static void parse_driver_options(struct arm_smmu_device *smmu)
628{
629 int i = 0;
630
631 do {
632 if (of_property_read_bool(smmu->dev->of_node,
633 arm_smmu_options[i].prop)) {
634 smmu->options |= arm_smmu_options[i].opt;
635 dev_notice(smmu->dev, "option %s\n",
636 arm_smmu_options[i].prop);
637 }
638 } while (arm_smmu_options[++i].opt);
639}
640
Will Deacon48ec83b2015-05-27 17:25:59 +0100641/* Low-level queue manipulation functions */
642static bool queue_full(struct arm_smmu_queue *q)
643{
644 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
645 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
646}
647
648static bool queue_empty(struct arm_smmu_queue *q)
649{
650 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
651 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
652}
653
654static void queue_sync_cons(struct arm_smmu_queue *q)
655{
656 q->cons = readl_relaxed(q->cons_reg);
657}
658
659static void queue_inc_cons(struct arm_smmu_queue *q)
660{
661 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
662
663 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
664 writel(q->cons, q->cons_reg);
665}
666
667static int queue_sync_prod(struct arm_smmu_queue *q)
668{
669 int ret = 0;
670 u32 prod = readl_relaxed(q->prod_reg);
671
672 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
673 ret = -EOVERFLOW;
674
675 q->prod = prod;
676 return ret;
677}
678
679static void queue_inc_prod(struct arm_smmu_queue *q)
680{
681 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
682
683 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
684 writel(q->prod, q->prod_reg);
685}
686
687static bool __queue_cons_before(struct arm_smmu_queue *q, u32 until)
688{
689 if (Q_WRP(q, q->cons) == Q_WRP(q, until))
690 return Q_IDX(q, q->cons) < Q_IDX(q, until);
691
692 return Q_IDX(q, q->cons) >= Q_IDX(q, until);
693}
694
695static int queue_poll_cons(struct arm_smmu_queue *q, u32 until, bool wfe)
696{
697 ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
698
699 while (queue_sync_cons(q), __queue_cons_before(q, until)) {
700 if (ktime_compare(ktime_get(), timeout) > 0)
701 return -ETIMEDOUT;
702
703 if (wfe) {
704 wfe();
705 } else {
706 cpu_relax();
707 udelay(1);
708 }
709 }
710
711 return 0;
712}
713
714static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
715{
716 int i;
717
718 for (i = 0; i < n_dwords; ++i)
719 *dst++ = cpu_to_le64(*src++);
720}
721
722static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
723{
724 if (queue_full(q))
725 return -ENOSPC;
726
727 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
728 queue_inc_prod(q);
729 return 0;
730}
731
732static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
733{
734 int i;
735
736 for (i = 0; i < n_dwords; ++i)
737 *dst++ = le64_to_cpu(*src++);
738}
739
740static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
741{
742 if (queue_empty(q))
743 return -EAGAIN;
744
745 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
746 queue_inc_cons(q);
747 return 0;
748}
749
750/* High-level queue accessors */
751static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
752{
753 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
754 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
755
756 switch (ent->opcode) {
757 case CMDQ_OP_TLBI_EL2_ALL:
758 case CMDQ_OP_TLBI_NSNH_ALL:
759 break;
760 case CMDQ_OP_PREFETCH_CFG:
761 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
762 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
763 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
764 break;
765 case CMDQ_OP_CFGI_STE:
766 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
767 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
768 break;
769 case CMDQ_OP_CFGI_ALL:
770 /* Cover the entire SID range */
771 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
772 break;
773 case CMDQ_OP_TLBI_NH_VA:
774 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
Will Deacon1c27df12015-09-18 16:12:56 +0100775 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
776 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
777 break;
Will Deacon48ec83b2015-05-27 17:25:59 +0100778 case CMDQ_OP_TLBI_S2_IPA:
779 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
780 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
Will Deacon1c27df12015-09-18 16:12:56 +0100781 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
Will Deacon48ec83b2015-05-27 17:25:59 +0100782 break;
783 case CMDQ_OP_TLBI_NH_ASID:
784 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
785 /* Fallthrough */
786 case CMDQ_OP_TLBI_S12_VMALL:
787 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
788 break;
789 case CMDQ_OP_PRI_RESP:
790 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
791 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
792 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
793 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
794 switch (ent->pri.resp) {
795 case PRI_RESP_DENY:
796 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
797 break;
798 case PRI_RESP_FAIL:
799 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
800 break;
801 case PRI_RESP_SUCC:
802 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
803 break;
804 default:
805 return -EINVAL;
806 }
807 break;
808 case CMDQ_OP_CMD_SYNC:
809 cmd[0] |= CMDQ_SYNC_0_CS_SEV;
810 break;
811 default:
812 return -ENOENT;
813 }
814
815 return 0;
816}
817
818static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
819{
820 static const char *cerror_str[] = {
821 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
822 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
823 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
824 };
825
826 int i;
827 u64 cmd[CMDQ_ENT_DWORDS];
828 struct arm_smmu_queue *q = &smmu->cmdq.q;
829 u32 cons = readl_relaxed(q->cons_reg);
830 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
831 struct arm_smmu_cmdq_ent cmd_sync = {
832 .opcode = CMDQ_OP_CMD_SYNC,
833 };
834
835 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
836 cerror_str[idx]);
837
838 switch (idx) {
839 case CMDQ_ERR_CERROR_ILL_IDX:
840 break;
841 case CMDQ_ERR_CERROR_ABT_IDX:
842 dev_err(smmu->dev, "retrying command fetch\n");
843 case CMDQ_ERR_CERROR_NONE_IDX:
844 return;
845 }
846
847 /*
848 * We may have concurrent producers, so we need to be careful
849 * not to touch any of the shadow cmdq state.
850 */
851 queue_read(cmd, Q_ENT(q, idx), q->ent_dwords);
852 dev_err(smmu->dev, "skipping command in error state:\n");
853 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
854 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
855
856 /* Convert the erroneous command into a CMD_SYNC */
857 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
858 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
859 return;
860 }
861
862 queue_write(cmd, Q_ENT(q, idx), q->ent_dwords);
863}
864
865static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
866 struct arm_smmu_cmdq_ent *ent)
867{
868 u32 until;
869 u64 cmd[CMDQ_ENT_DWORDS];
870 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
871 struct arm_smmu_queue *q = &smmu->cmdq.q;
872
873 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
874 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
875 ent->opcode);
876 return;
877 }
878
879 spin_lock(&smmu->cmdq.lock);
880 while (until = q->prod + 1, queue_insert_raw(q, cmd) == -ENOSPC) {
881 /*
882 * Keep the queue locked, otherwise the producer could wrap
883 * twice and we could see a future consumer pointer that looks
884 * like it's behind us.
885 */
886 if (queue_poll_cons(q, until, wfe))
887 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
888 }
889
890 if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, until, wfe))
891 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
892 spin_unlock(&smmu->cmdq.lock);
893}
894
895/* Context descriptor manipulation functions */
896static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
897{
898 u64 val = 0;
899
900 /* Repack the TCR. Just care about TTBR0 for now */
901 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
902 val |= ARM_SMMU_TCR2CD(tcr, TG0);
903 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
904 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
905 val |= ARM_SMMU_TCR2CD(tcr, SH0);
906 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
907 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
908 val |= ARM_SMMU_TCR2CD(tcr, IPS);
909 val |= ARM_SMMU_TCR2CD(tcr, TBI0);
910
911 return val;
912}
913
914static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
915 struct arm_smmu_s1_cfg *cfg)
916{
917 u64 val;
918
919 /*
920 * We don't need to issue any invalidation here, as we'll invalidate
921 * the STE when installing the new entry anyway.
922 */
923 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
924#ifdef __BIG_ENDIAN
925 CTXDESC_CD_0_ENDI |
926#endif
927 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
928 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
929 CTXDESC_CD_0_V;
930 cfg->cdptr[0] = cpu_to_le64(val);
931
932 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
933 cfg->cdptr[1] = cpu_to_le64(val);
934
935 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
936}
937
938/* Stream table manipulation functions */
939static void
940arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
941{
942 u64 val = 0;
943
944 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
945 << STRTAB_L1_DESC_SPAN_SHIFT;
946 val |= desc->l2ptr_dma &
947 STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT;
948
949 *dst = cpu_to_le64(val);
950}
951
952static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
953{
954 struct arm_smmu_cmdq_ent cmd = {
955 .opcode = CMDQ_OP_CFGI_STE,
956 .cfgi = {
957 .sid = sid,
958 .leaf = true,
959 },
960 };
961
962 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
963 cmd.opcode = CMDQ_OP_CMD_SYNC;
964 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
965}
966
967static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
968 __le64 *dst, struct arm_smmu_strtab_ent *ste)
969{
970 /*
971 * This is hideously complicated, but we only really care about
972 * three cases at the moment:
973 *
974 * 1. Invalid (all zero) -> bypass (init)
975 * 2. Bypass -> translation (attach)
976 * 3. Translation -> bypass (detach)
977 *
978 * Given that we can't update the STE atomically and the SMMU
979 * doesn't read the thing in a defined order, that leaves us
980 * with the following maintenance requirements:
981 *
982 * 1. Update Config, return (init time STEs aren't live)
983 * 2. Write everything apart from dword 0, sync, write dword 0, sync
984 * 3. Update Config, sync
985 */
986 u64 val = le64_to_cpu(dst[0]);
987 bool ste_live = false;
988 struct arm_smmu_cmdq_ent prefetch_cmd = {
989 .opcode = CMDQ_OP_PREFETCH_CFG,
990 .prefetch = {
991 .sid = sid,
992 },
993 };
994
995 if (val & STRTAB_STE_0_V) {
996 u64 cfg;
997
998 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
999 switch (cfg) {
1000 case STRTAB_STE_0_CFG_BYPASS:
1001 break;
1002 case STRTAB_STE_0_CFG_S1_TRANS:
1003 case STRTAB_STE_0_CFG_S2_TRANS:
1004 ste_live = true;
1005 break;
1006 default:
1007 BUG(); /* STE corruption */
1008 }
1009 }
1010
1011 /* Nuke the existing Config, as we're going to rewrite it */
1012 val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
1013
1014 if (ste->valid)
1015 val |= STRTAB_STE_0_V;
1016 else
1017 val &= ~STRTAB_STE_0_V;
1018
1019 if (ste->bypass) {
1020 val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
1021 : STRTAB_STE_0_CFG_BYPASS;
1022 dst[0] = cpu_to_le64(val);
1023 dst[2] = 0; /* Nuke the VMID */
1024 if (ste_live)
1025 arm_smmu_sync_ste_for_sid(smmu, sid);
1026 return;
1027 }
1028
1029 if (ste->s1_cfg) {
1030 BUG_ON(ste_live);
1031 dst[1] = cpu_to_le64(
1032 STRTAB_STE_1_S1C_CACHE_WBRA
1033 << STRTAB_STE_1_S1CIR_SHIFT |
1034 STRTAB_STE_1_S1C_CACHE_WBRA
1035 << STRTAB_STE_1_S1COR_SHIFT |
1036 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
1037 STRTAB_STE_1_S1STALLD |
1038#ifdef CONFIG_PCI_ATS
1039 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1040#endif
1041 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
1042
1043 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
1044 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
1045 STRTAB_STE_0_CFG_S1_TRANS;
1046
1047 }
1048
1049 if (ste->s2_cfg) {
1050 BUG_ON(ste_live);
1051 dst[2] = cpu_to_le64(
1052 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
1053 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
1054 << STRTAB_STE_2_VTCR_SHIFT |
1055#ifdef __BIG_ENDIAN
1056 STRTAB_STE_2_S2ENDI |
1057#endif
1058 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1059 STRTAB_STE_2_S2R);
1060
1061 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
1062 STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT);
1063
1064 val |= STRTAB_STE_0_CFG_S2_TRANS;
1065 }
1066
1067 arm_smmu_sync_ste_for_sid(smmu, sid);
1068 dst[0] = cpu_to_le64(val);
1069 arm_smmu_sync_ste_for_sid(smmu, sid);
1070
1071 /* It's likely that we'll want to use the new STE soon */
Zhen Lei5e929462015-07-07 04:30:18 +01001072 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1073 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
Will Deacon48ec83b2015-05-27 17:25:59 +01001074}
1075
1076static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1077{
1078 unsigned int i;
1079 struct arm_smmu_strtab_ent ste = {
1080 .valid = true,
1081 .bypass = true,
1082 };
1083
1084 for (i = 0; i < nent; ++i) {
1085 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
1086 strtab += STRTAB_STE_DWORDS;
1087 }
1088}
1089
1090static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1091{
1092 size_t size;
1093 void *strtab;
1094 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1095 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1096
1097 if (desc->l2ptr)
1098 return 0;
1099
1100 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
Zhen Lei69146e72015-06-26 09:32:58 +01001101 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
Will Deacon48ec83b2015-05-27 17:25:59 +01001102
1103 desc->span = STRTAB_SPLIT + 1;
1104 desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1105 GFP_KERNEL);
1106 if (!desc->l2ptr) {
1107 dev_err(smmu->dev,
1108 "failed to allocate l2 stream table for SID %u\n",
1109 sid);
1110 return -ENOMEM;
1111 }
1112
1113 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1114 arm_smmu_write_strtab_l1_desc(strtab, desc);
1115 return 0;
1116}
1117
1118/* IRQ and event handlers */
1119static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1120{
1121 int i;
1122 struct arm_smmu_device *smmu = dev;
1123 struct arm_smmu_queue *q = &smmu->evtq.q;
1124 u64 evt[EVTQ_ENT_DWORDS];
1125
1126 while (!queue_remove_raw(q, evt)) {
1127 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
1128
1129 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1130 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1131 dev_info(smmu->dev, "\t0x%016llx\n",
1132 (unsigned long long)evt[i]);
1133 }
1134
1135 /* Sync our overflow flag, as we believe we're up to speed */
1136 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1137 return IRQ_HANDLED;
1138}
1139
1140static irqreturn_t arm_smmu_evtq_handler(int irq, void *dev)
1141{
1142 irqreturn_t ret = IRQ_WAKE_THREAD;
1143 struct arm_smmu_device *smmu = dev;
1144 struct arm_smmu_queue *q = &smmu->evtq.q;
1145
1146 /*
1147 * Not much we can do on overflow, so scream and pretend we're
1148 * trying harder.
1149 */
1150 if (queue_sync_prod(q) == -EOVERFLOW)
1151 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1152 else if (queue_empty(q))
1153 ret = IRQ_NONE;
1154
1155 return ret;
1156}
1157
1158static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1159{
1160 struct arm_smmu_device *smmu = dev;
1161 struct arm_smmu_queue *q = &smmu->priq.q;
1162 u64 evt[PRIQ_ENT_DWORDS];
1163
1164 while (!queue_remove_raw(q, evt)) {
1165 u32 sid, ssid;
1166 u16 grpid;
1167 bool ssv, last;
1168
1169 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1170 ssv = evt[0] & PRIQ_0_SSID_V;
1171 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1172 last = evt[0] & PRIQ_0_PRG_LAST;
1173 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
1174
1175 dev_info(smmu->dev, "unexpected PRI request received:\n");
1176 dev_info(smmu->dev,
1177 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1178 sid, ssid, grpid, last ? "L" : "",
1179 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1180 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1181 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1182 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1183 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1184
1185 if (last) {
1186 struct arm_smmu_cmdq_ent cmd = {
1187 .opcode = CMDQ_OP_PRI_RESP,
1188 .substream_valid = ssv,
1189 .pri = {
1190 .sid = sid,
1191 .ssid = ssid,
1192 .grpid = grpid,
1193 .resp = PRI_RESP_DENY,
1194 },
1195 };
1196
1197 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1198 }
1199 }
1200
1201 /* Sync our overflow flag, as we believe we're up to speed */
1202 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1203 return IRQ_HANDLED;
1204}
1205
1206static irqreturn_t arm_smmu_priq_handler(int irq, void *dev)
1207{
1208 irqreturn_t ret = IRQ_WAKE_THREAD;
1209 struct arm_smmu_device *smmu = dev;
1210 struct arm_smmu_queue *q = &smmu->priq.q;
1211
1212 /* PRIQ overflow indicates a programming error */
1213 if (queue_sync_prod(q) == -EOVERFLOW)
1214 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1215 else if (queue_empty(q))
1216 ret = IRQ_NONE;
1217
1218 return ret;
1219}
1220
1221static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
1222{
1223 /* We don't actually use CMD_SYNC interrupts for anything */
1224 return IRQ_HANDLED;
1225}
1226
1227static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1228
1229static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1230{
1231 u32 gerror, gerrorn;
1232 struct arm_smmu_device *smmu = dev;
1233
1234 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1235 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1236
1237 gerror ^= gerrorn;
1238 if (!(gerror & GERROR_ERR_MASK))
1239 return IRQ_NONE; /* No errors pending */
1240
1241 dev_warn(smmu->dev,
1242 "unexpected global error reported (0x%08x), this could be serious\n",
1243 gerror);
1244
1245 if (gerror & GERROR_SFM_ERR) {
1246 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1247 arm_smmu_device_disable(smmu);
1248 }
1249
1250 if (gerror & GERROR_MSI_GERROR_ABT_ERR)
1251 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1252
1253 if (gerror & GERROR_MSI_PRIQ_ABT_ERR) {
1254 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1255 arm_smmu_priq_handler(irq, smmu->dev);
1256 }
1257
1258 if (gerror & GERROR_MSI_EVTQ_ABT_ERR) {
1259 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1260 arm_smmu_evtq_handler(irq, smmu->dev);
1261 }
1262
1263 if (gerror & GERROR_MSI_CMDQ_ABT_ERR) {
1264 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1265 arm_smmu_cmdq_sync_handler(irq, smmu->dev);
1266 }
1267
1268 if (gerror & GERROR_PRIQ_ABT_ERR)
1269 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1270
1271 if (gerror & GERROR_EVTQ_ABT_ERR)
1272 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1273
1274 if (gerror & GERROR_CMDQ_ERR)
1275 arm_smmu_cmdq_skip_err(smmu);
1276
1277 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1278 return IRQ_HANDLED;
1279}
1280
1281/* IO_PGTABLE API */
1282static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1283{
1284 struct arm_smmu_cmdq_ent cmd;
1285
1286 cmd.opcode = CMDQ_OP_CMD_SYNC;
1287 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1288}
1289
1290static void arm_smmu_tlb_sync(void *cookie)
1291{
1292 struct arm_smmu_domain *smmu_domain = cookie;
1293 __arm_smmu_tlb_sync(smmu_domain->smmu);
1294}
1295
1296static void arm_smmu_tlb_inv_context(void *cookie)
1297{
1298 struct arm_smmu_domain *smmu_domain = cookie;
1299 struct arm_smmu_device *smmu = smmu_domain->smmu;
1300 struct arm_smmu_cmdq_ent cmd;
1301
1302 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1303 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1304 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1305 cmd.tlbi.vmid = 0;
1306 } else {
1307 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1308 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1309 }
1310
1311 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1312 __arm_smmu_tlb_sync(smmu);
1313}
1314
1315static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1316 bool leaf, void *cookie)
1317{
1318 struct arm_smmu_domain *smmu_domain = cookie;
1319 struct arm_smmu_device *smmu = smmu_domain->smmu;
1320 struct arm_smmu_cmdq_ent cmd = {
1321 .tlbi = {
1322 .leaf = leaf,
1323 .addr = iova,
1324 },
1325 };
1326
1327 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1328 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1329 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1330 } else {
1331 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1332 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1333 }
1334
1335 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1336}
1337
Will Deacon48ec83b2015-05-27 17:25:59 +01001338static struct iommu_gather_ops arm_smmu_gather_ops = {
1339 .tlb_flush_all = arm_smmu_tlb_inv_context,
1340 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1341 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon48ec83b2015-05-27 17:25:59 +01001342};
1343
1344/* IOMMU API */
1345static bool arm_smmu_capable(enum iommu_cap cap)
1346{
1347 switch (cap) {
1348 case IOMMU_CAP_CACHE_COHERENCY:
1349 return true;
1350 case IOMMU_CAP_INTR_REMAP:
1351 return true; /* MSIs are just memory writes */
1352 case IOMMU_CAP_NOEXEC:
1353 return true;
1354 default:
1355 return false;
1356 }
1357}
1358
1359static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1360{
1361 struct arm_smmu_domain *smmu_domain;
1362
1363 if (type != IOMMU_DOMAIN_UNMANAGED)
1364 return NULL;
1365
1366 /*
1367 * Allocate the domain and initialise some of its data structures.
1368 * We can't really do anything meaningful until we've added a
1369 * master.
1370 */
1371 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1372 if (!smmu_domain)
1373 return NULL;
1374
1375 mutex_init(&smmu_domain->init_mutex);
1376 spin_lock_init(&smmu_domain->pgtbl_lock);
1377 return &smmu_domain->domain;
1378}
1379
1380static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1381{
1382 int idx, size = 1 << span;
1383
1384 do {
1385 idx = find_first_zero_bit(map, size);
1386 if (idx == size)
1387 return -ENOSPC;
1388 } while (test_and_set_bit(idx, map));
1389
1390 return idx;
1391}
1392
1393static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1394{
1395 clear_bit(idx, map);
1396}
1397
1398static void arm_smmu_domain_free(struct iommu_domain *domain)
1399{
1400 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1401 struct arm_smmu_device *smmu = smmu_domain->smmu;
1402
Markus Elfringa6e08fb2015-06-29 17:47:43 +01001403 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon48ec83b2015-05-27 17:25:59 +01001404
1405 /* Free the CD and ASID, if we allocated them */
1406 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1407 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1408
1409 if (cfg->cdptr) {
1410 dma_free_coherent(smmu_domain->smmu->dev,
1411 CTXDESC_CD_DWORDS << 3,
1412 cfg->cdptr,
1413 cfg->cdptr_dma);
1414
1415 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1416 }
1417 } else {
1418 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1419 if (cfg->vmid)
1420 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1421 }
1422
1423 kfree(smmu_domain);
1424}
1425
1426static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1427 struct io_pgtable_cfg *pgtbl_cfg)
1428{
1429 int ret;
1430 u16 asid;
1431 struct arm_smmu_device *smmu = smmu_domain->smmu;
1432 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1433
1434 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
1435 if (IS_ERR_VALUE(asid))
1436 return asid;
1437
1438 cfg->cdptr = dma_zalloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1439 &cfg->cdptr_dma, GFP_KERNEL);
1440 if (!cfg->cdptr) {
1441 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1442 goto out_free_asid;
1443 }
1444
1445 cfg->cd.asid = asid;
1446 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1447 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1448 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1449 return 0;
1450
1451out_free_asid:
1452 arm_smmu_bitmap_free(smmu->asid_map, asid);
1453 return ret;
1454}
1455
1456static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1457 struct io_pgtable_cfg *pgtbl_cfg)
1458{
1459 u16 vmid;
1460 struct arm_smmu_device *smmu = smmu_domain->smmu;
1461 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1462
1463 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1464 if (IS_ERR_VALUE(vmid))
1465 return vmid;
1466
1467 cfg->vmid = vmid;
1468 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1469 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1470 return 0;
1471}
1472
1473static struct iommu_ops arm_smmu_ops;
1474
1475static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1476{
1477 int ret;
1478 unsigned long ias, oas;
1479 enum io_pgtable_fmt fmt;
1480 struct io_pgtable_cfg pgtbl_cfg;
1481 struct io_pgtable_ops *pgtbl_ops;
1482 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1483 struct io_pgtable_cfg *);
1484 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1485 struct arm_smmu_device *smmu = smmu_domain->smmu;
1486
1487 /* Restrict the stage to what we can actually support */
1488 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1489 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1490 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1491 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1492
1493 switch (smmu_domain->stage) {
1494 case ARM_SMMU_DOMAIN_S1:
1495 ias = VA_BITS;
1496 oas = smmu->ias;
1497 fmt = ARM_64_LPAE_S1;
1498 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1499 break;
1500 case ARM_SMMU_DOMAIN_NESTED:
1501 case ARM_SMMU_DOMAIN_S2:
1502 ias = smmu->ias;
1503 oas = smmu->oas;
1504 fmt = ARM_64_LPAE_S2;
1505 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1506 break;
1507 default:
1508 return -EINVAL;
1509 }
1510
1511 pgtbl_cfg = (struct io_pgtable_cfg) {
1512 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
1513 .ias = ias,
1514 .oas = oas,
1515 .tlb = &arm_smmu_gather_ops,
Robin Murphybdc6d972015-07-29 19:46:07 +01001516 .iommu_dev = smmu->dev,
Will Deacon48ec83b2015-05-27 17:25:59 +01001517 };
1518
1519 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1520 if (!pgtbl_ops)
1521 return -ENOMEM;
1522
1523 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1524 smmu_domain->pgtbl_ops = pgtbl_ops;
1525
1526 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1527 if (IS_ERR_VALUE(ret))
1528 free_io_pgtable_ops(pgtbl_ops);
1529
1530 return ret;
1531}
1532
1533static struct arm_smmu_group *arm_smmu_group_get(struct device *dev)
1534{
1535 struct iommu_group *group;
1536 struct arm_smmu_group *smmu_group;
1537
1538 group = iommu_group_get(dev);
1539 if (!group)
1540 return NULL;
1541
1542 smmu_group = iommu_group_get_iommudata(group);
1543 iommu_group_put(group);
1544 return smmu_group;
1545}
1546
1547static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1548{
1549 __le64 *step;
1550 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1551
1552 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1553 struct arm_smmu_strtab_l1_desc *l1_desc;
1554 int idx;
1555
1556 /* Two-level walk */
1557 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1558 l1_desc = &cfg->l1_desc[idx];
1559 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1560 step = &l1_desc->l2ptr[idx];
1561 } else {
1562 /* Simple linear lookup */
1563 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1564 }
1565
1566 return step;
1567}
1568
1569static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group)
1570{
1571 int i;
1572 struct arm_smmu_domain *smmu_domain = smmu_group->domain;
1573 struct arm_smmu_strtab_ent *ste = &smmu_group->ste;
1574 struct arm_smmu_device *smmu = smmu_group->smmu;
1575
1576 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1577 ste->s1_cfg = &smmu_domain->s1_cfg;
1578 ste->s2_cfg = NULL;
1579 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
1580 } else {
1581 ste->s1_cfg = NULL;
1582 ste->s2_cfg = &smmu_domain->s2_cfg;
1583 }
1584
1585 for (i = 0; i < smmu_group->num_sids; ++i) {
1586 u32 sid = smmu_group->sids[i];
1587 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1588
1589 arm_smmu_write_strtab_ent(smmu, sid, step, ste);
1590 }
1591
1592 return 0;
1593}
1594
1595static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1596{
1597 int ret = 0;
1598 struct arm_smmu_device *smmu;
1599 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1600 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
1601
1602 if (!smmu_group)
1603 return -ENOENT;
1604
1605 /* Already attached to a different domain? */
1606 if (smmu_group->domain && smmu_group->domain != smmu_domain)
1607 return -EEXIST;
1608
1609 smmu = smmu_group->smmu;
1610 mutex_lock(&smmu_domain->init_mutex);
1611
1612 if (!smmu_domain->smmu) {
1613 smmu_domain->smmu = smmu;
1614 ret = arm_smmu_domain_finalise(domain);
1615 if (ret) {
1616 smmu_domain->smmu = NULL;
1617 goto out_unlock;
1618 }
1619 } else if (smmu_domain->smmu != smmu) {
1620 dev_err(dev,
1621 "cannot attach to SMMU %s (upstream of %s)\n",
1622 dev_name(smmu_domain->smmu->dev),
1623 dev_name(smmu->dev));
1624 ret = -ENXIO;
1625 goto out_unlock;
1626 }
1627
1628 /* Group already attached to this domain? */
1629 if (smmu_group->domain)
1630 goto out_unlock;
1631
1632 smmu_group->domain = smmu_domain;
1633 smmu_group->ste.bypass = false;
1634
1635 ret = arm_smmu_install_ste_for_group(smmu_group);
1636 if (IS_ERR_VALUE(ret))
1637 smmu_group->domain = NULL;
1638
1639out_unlock:
1640 mutex_unlock(&smmu_domain->init_mutex);
1641 return ret;
1642}
1643
1644static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
1645{
1646 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1647 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
1648
1649 BUG_ON(!smmu_domain);
1650 BUG_ON(!smmu_group);
1651
1652 mutex_lock(&smmu_domain->init_mutex);
1653 BUG_ON(smmu_group->domain != smmu_domain);
1654
1655 smmu_group->ste.bypass = true;
1656 if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group)))
1657 dev_warn(dev, "failed to install bypass STE\n");
1658
1659 smmu_group->domain = NULL;
1660 mutex_unlock(&smmu_domain->init_mutex);
1661}
1662
1663static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1664 phys_addr_t paddr, size_t size, int prot)
1665{
1666 int ret;
1667 unsigned long flags;
1668 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1669 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1670
1671 if (!ops)
1672 return -ENODEV;
1673
1674 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1675 ret = ops->map(ops, iova, paddr, size, prot);
1676 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1677 return ret;
1678}
1679
1680static size_t
1681arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1682{
1683 size_t ret;
1684 unsigned long flags;
1685 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1686 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1687
1688 if (!ops)
1689 return 0;
1690
1691 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1692 ret = ops->unmap(ops, iova, size);
1693 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1694 return ret;
1695}
1696
1697static phys_addr_t
1698arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1699{
1700 phys_addr_t ret;
1701 unsigned long flags;
1702 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1703 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1704
1705 if (!ops)
1706 return 0;
1707
1708 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1709 ret = ops->iova_to_phys(ops, iova);
1710 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1711
1712 return ret;
1713}
1714
1715static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *sidp)
1716{
1717 *(u32 *)sidp = alias;
1718 return 0; /* Continue walking */
1719}
1720
1721static void __arm_smmu_release_pci_iommudata(void *data)
1722{
1723 kfree(data);
1724}
1725
1726static struct arm_smmu_device *arm_smmu_get_for_pci_dev(struct pci_dev *pdev)
1727{
1728 struct device_node *of_node;
1729 struct arm_smmu_device *curr, *smmu = NULL;
1730 struct pci_bus *bus = pdev->bus;
1731
1732 /* Walk up to the root bus */
1733 while (!pci_is_root_bus(bus))
1734 bus = bus->parent;
1735
1736 /* Follow the "iommus" phandle from the host controller */
1737 of_node = of_parse_phandle(bus->bridge->parent->of_node, "iommus", 0);
1738 if (!of_node)
1739 return NULL;
1740
1741 /* See if we can find an SMMU corresponding to the phandle */
1742 spin_lock(&arm_smmu_devices_lock);
1743 list_for_each_entry(curr, &arm_smmu_devices, list) {
1744 if (curr->dev->of_node == of_node) {
1745 smmu = curr;
1746 break;
1747 }
1748 }
1749 spin_unlock(&arm_smmu_devices_lock);
1750 of_node_put(of_node);
1751 return smmu;
1752}
1753
1754static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
1755{
1756 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
1757
1758 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
1759 limit *= 1UL << STRTAB_SPLIT;
1760
1761 return sid < limit;
1762}
1763
1764static int arm_smmu_add_device(struct device *dev)
1765{
1766 int i, ret;
1767 u32 sid, *sids;
1768 struct pci_dev *pdev;
1769 struct iommu_group *group;
1770 struct arm_smmu_group *smmu_group;
1771 struct arm_smmu_device *smmu;
1772
1773 /* We only support PCI, for now */
1774 if (!dev_is_pci(dev))
1775 return -ENODEV;
1776
1777 pdev = to_pci_dev(dev);
1778 group = iommu_group_get_for_dev(dev);
1779 if (IS_ERR(group))
1780 return PTR_ERR(group);
1781
1782 smmu_group = iommu_group_get_iommudata(group);
1783 if (!smmu_group) {
1784 smmu = arm_smmu_get_for_pci_dev(pdev);
1785 if (!smmu) {
1786 ret = -ENOENT;
1787 goto out_put_group;
1788 }
1789
1790 smmu_group = kzalloc(sizeof(*smmu_group), GFP_KERNEL);
1791 if (!smmu_group) {
1792 ret = -ENOMEM;
1793 goto out_put_group;
1794 }
1795
1796 smmu_group->ste.valid = true;
1797 smmu_group->smmu = smmu;
1798 iommu_group_set_iommudata(group, smmu_group,
1799 __arm_smmu_release_pci_iommudata);
1800 } else {
1801 smmu = smmu_group->smmu;
1802 }
1803
1804 /* Assume SID == RID until firmware tells us otherwise */
1805 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1806 for (i = 0; i < smmu_group->num_sids; ++i) {
1807 /* If we already know about this SID, then we're done */
1808 if (smmu_group->sids[i] == sid)
1809 return 0;
1810 }
1811
1812 /* Check the SID is in range of the SMMU and our stream table */
1813 if (!arm_smmu_sid_in_range(smmu, sid)) {
1814 ret = -ERANGE;
1815 goto out_put_group;
1816 }
1817
1818 /* Ensure l2 strtab is initialised */
1819 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1820 ret = arm_smmu_init_l2_strtab(smmu, sid);
1821 if (ret)
1822 goto out_put_group;
1823 }
1824
1825 /* Resize the SID array for the group */
1826 smmu_group->num_sids++;
1827 sids = krealloc(smmu_group->sids, smmu_group->num_sids * sizeof(*sids),
1828 GFP_KERNEL);
1829 if (!sids) {
1830 smmu_group->num_sids--;
1831 ret = -ENOMEM;
1832 goto out_put_group;
1833 }
1834
1835 /* Add the new SID */
1836 sids[smmu_group->num_sids - 1] = sid;
1837 smmu_group->sids = sids;
1838 return 0;
1839
1840out_put_group:
1841 iommu_group_put(group);
1842 return ret;
1843}
1844
1845static void arm_smmu_remove_device(struct device *dev)
1846{
1847 iommu_group_remove_device(dev);
1848}
1849
1850static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1851 enum iommu_attr attr, void *data)
1852{
1853 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1854
1855 switch (attr) {
1856 case DOMAIN_ATTR_NESTING:
1857 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1858 return 0;
1859 default:
1860 return -ENODEV;
1861 }
1862}
1863
1864static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1865 enum iommu_attr attr, void *data)
1866{
1867 int ret = 0;
1868 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1869
1870 mutex_lock(&smmu_domain->init_mutex);
1871
1872 switch (attr) {
1873 case DOMAIN_ATTR_NESTING:
1874 if (smmu_domain->smmu) {
1875 ret = -EPERM;
1876 goto out_unlock;
1877 }
1878
1879 if (*(int *)data)
1880 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1881 else
1882 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1883
1884 break;
1885 default:
1886 ret = -ENODEV;
1887 }
1888
1889out_unlock:
1890 mutex_unlock(&smmu_domain->init_mutex);
1891 return ret;
1892}
1893
1894static struct iommu_ops arm_smmu_ops = {
1895 .capable = arm_smmu_capable,
1896 .domain_alloc = arm_smmu_domain_alloc,
1897 .domain_free = arm_smmu_domain_free,
1898 .attach_dev = arm_smmu_attach_dev,
1899 .detach_dev = arm_smmu_detach_dev,
1900 .map = arm_smmu_map,
1901 .unmap = arm_smmu_unmap,
1902 .iova_to_phys = arm_smmu_iova_to_phys,
1903 .add_device = arm_smmu_add_device,
1904 .remove_device = arm_smmu_remove_device,
1905 .domain_get_attr = arm_smmu_domain_get_attr,
1906 .domain_set_attr = arm_smmu_domain_set_attr,
1907 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1908};
1909
1910/* Probing and initialisation functions */
1911static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
1912 struct arm_smmu_queue *q,
1913 unsigned long prod_off,
1914 unsigned long cons_off,
1915 size_t dwords)
1916{
1917 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
1918
1919 q->base = dma_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
1920 if (!q->base) {
1921 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
1922 qsz);
1923 return -ENOMEM;
1924 }
1925
1926 q->prod_reg = smmu->base + prod_off;
1927 q->cons_reg = smmu->base + cons_off;
1928 q->ent_dwords = dwords;
1929
1930 q->q_base = Q_BASE_RWA;
1931 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT;
1932 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
1933 << Q_BASE_LOG2SIZE_SHIFT;
1934
1935 q->prod = q->cons = 0;
1936 return 0;
1937}
1938
1939static void arm_smmu_free_one_queue(struct arm_smmu_device *smmu,
1940 struct arm_smmu_queue *q)
1941{
1942 size_t qsz = ((1 << q->max_n_shift) * q->ent_dwords) << 3;
1943
1944 dma_free_coherent(smmu->dev, qsz, q->base, q->base_dma);
1945}
1946
1947static void arm_smmu_free_queues(struct arm_smmu_device *smmu)
1948{
1949 arm_smmu_free_one_queue(smmu, &smmu->cmdq.q);
1950 arm_smmu_free_one_queue(smmu, &smmu->evtq.q);
1951
1952 if (smmu->features & ARM_SMMU_FEAT_PRI)
1953 arm_smmu_free_one_queue(smmu, &smmu->priq.q);
1954}
1955
1956static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
1957{
1958 int ret;
1959
1960 /* cmdq */
1961 spin_lock_init(&smmu->cmdq.lock);
1962 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
1963 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
1964 if (ret)
1965 goto out;
1966
1967 /* evtq */
1968 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
1969 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
1970 if (ret)
1971 goto out_free_cmdq;
1972
1973 /* priq */
1974 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
1975 return 0;
1976
1977 ret = arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
1978 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
1979 if (ret)
1980 goto out_free_evtq;
1981
1982 return 0;
1983
1984out_free_evtq:
1985 arm_smmu_free_one_queue(smmu, &smmu->evtq.q);
1986out_free_cmdq:
1987 arm_smmu_free_one_queue(smmu, &smmu->cmdq.q);
1988out:
1989 return ret;
1990}
1991
1992static void arm_smmu_free_l2_strtab(struct arm_smmu_device *smmu)
1993{
1994 int i;
1995 size_t size;
1996 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1997
1998 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1999 for (i = 0; i < cfg->num_l1_ents; ++i) {
2000 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[i];
2001
2002 if (!desc->l2ptr)
2003 continue;
2004
2005 dma_free_coherent(smmu->dev, size, desc->l2ptr,
2006 desc->l2ptr_dma);
2007 }
2008}
2009
2010static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
2011{
2012 unsigned int i;
2013 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2014 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
2015 void *strtab = smmu->strtab_cfg.strtab;
2016
2017 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
2018 if (!cfg->l1_desc) {
2019 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
2020 return -ENOMEM;
2021 }
2022
2023 for (i = 0; i < cfg->num_l1_ents; ++i) {
2024 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
2025 strtab += STRTAB_L1_DESC_DWORDS << 3;
2026 }
2027
2028 return 0;
2029}
2030
2031static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2032{
2033 void *strtab;
2034 u64 reg;
Will Deacond2e88e72015-06-30 10:02:28 +01002035 u32 size, l1size;
Will Deacon48ec83b2015-05-27 17:25:59 +01002036 int ret;
2037 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2038
Will Deacon28c8b402015-07-16 17:50:12 +01002039 /*
2040 * If we can resolve everything with a single L2 table, then we
2041 * just need a single L1 descriptor. Otherwise, calculate the L1
2042 * size, capped to the SIDSIZE.
2043 */
2044 if (smmu->sid_bits < STRTAB_SPLIT) {
2045 size = 0;
2046 } else {
2047 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2048 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2049 }
Will Deacond2e88e72015-06-30 10:02:28 +01002050 cfg->num_l1_ents = 1 << size;
2051
2052 size += STRTAB_SPLIT;
2053 if (size < smmu->sid_bits)
Will Deacon48ec83b2015-05-27 17:25:59 +01002054 dev_warn(smmu->dev,
2055 "2-level strtab only covers %u/%u bits of SID\n",
Will Deacond2e88e72015-06-30 10:02:28 +01002056 size, smmu->sid_bits);
Will Deacon48ec83b2015-05-27 17:25:59 +01002057
Will Deacond2e88e72015-06-30 10:02:28 +01002058 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2059 strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
Will Deacon48ec83b2015-05-27 17:25:59 +01002060 GFP_KERNEL);
2061 if (!strtab) {
2062 dev_err(smmu->dev,
2063 "failed to allocate l1 stream table (%u bytes)\n",
2064 size);
2065 return -ENOMEM;
2066 }
2067 cfg->strtab = strtab;
2068
2069 /* Configure strtab_base_cfg for 2 levels */
2070 reg = STRTAB_BASE_CFG_FMT_2LVL;
2071 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2072 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2073 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
2074 << STRTAB_BASE_CFG_SPLIT_SHIFT;
2075 cfg->strtab_base_cfg = reg;
2076
2077 ret = arm_smmu_init_l1_strtab(smmu);
2078 if (ret)
2079 dma_free_coherent(smmu->dev,
Will Deacond2e88e72015-06-30 10:02:28 +01002080 l1size,
Will Deacon48ec83b2015-05-27 17:25:59 +01002081 strtab,
2082 cfg->strtab_dma);
2083 return ret;
2084}
2085
2086static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2087{
2088 void *strtab;
2089 u64 reg;
2090 u32 size;
2091 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2092
2093 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
2094 strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2095 GFP_KERNEL);
2096 if (!strtab) {
2097 dev_err(smmu->dev,
2098 "failed to allocate linear stream table (%u bytes)\n",
2099 size);
2100 return -ENOMEM;
2101 }
2102 cfg->strtab = strtab;
2103 cfg->num_l1_ents = 1 << smmu->sid_bits;
2104
2105 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2106 reg = STRTAB_BASE_CFG_FMT_LINEAR;
2107 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2108 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2109 cfg->strtab_base_cfg = reg;
2110
2111 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2112 return 0;
2113}
2114
2115static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2116{
2117 u64 reg;
2118 int ret;
2119
2120 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2121 ret = arm_smmu_init_strtab_2lvl(smmu);
2122 else
2123 ret = arm_smmu_init_strtab_linear(smmu);
2124
2125 if (ret)
2126 return ret;
2127
2128 /* Set the strtab base address */
2129 reg = smmu->strtab_cfg.strtab_dma &
2130 STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT;
2131 reg |= STRTAB_BASE_RA;
2132 smmu->strtab_cfg.strtab_base = reg;
2133
2134 /* Allocate the first VMID for stage-2 bypass STEs */
2135 set_bit(0, smmu->vmid_map);
2136 return 0;
2137}
2138
2139static void arm_smmu_free_strtab(struct arm_smmu_device *smmu)
2140{
2141 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2142 u32 size = cfg->num_l1_ents;
2143
2144 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
2145 arm_smmu_free_l2_strtab(smmu);
2146 size *= STRTAB_L1_DESC_DWORDS << 3;
2147 } else {
2148 size *= STRTAB_STE_DWORDS * 3;
2149 }
2150
2151 dma_free_coherent(smmu->dev, size, cfg->strtab, cfg->strtab_dma);
2152}
2153
2154static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2155{
2156 int ret;
2157
2158 ret = arm_smmu_init_queues(smmu);
2159 if (ret)
2160 return ret;
2161
2162 ret = arm_smmu_init_strtab(smmu);
2163 if (ret)
2164 goto out_free_queues;
2165
2166 return 0;
2167
2168out_free_queues:
2169 arm_smmu_free_queues(smmu);
2170 return ret;
2171}
2172
2173static void arm_smmu_free_structures(struct arm_smmu_device *smmu)
2174{
2175 arm_smmu_free_strtab(smmu);
2176 arm_smmu_free_queues(smmu);
2177}
2178
2179static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2180 unsigned int reg_off, unsigned int ack_off)
2181{
2182 u32 reg;
2183
2184 writel_relaxed(val, smmu->base + reg_off);
2185 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2186 1, ARM_SMMU_POLL_TIMEOUT_US);
2187}
2188
2189static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2190{
2191 int ret, irq;
Marc Zyngierccd63852015-07-15 11:55:18 +01002192 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
Will Deacon48ec83b2015-05-27 17:25:59 +01002193
2194 /* Disable IRQs first */
2195 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2196 ARM_SMMU_IRQ_CTRLACK);
2197 if (ret) {
2198 dev_err(smmu->dev, "failed to disable irqs\n");
2199 return ret;
2200 }
2201
2202 /* Clear the MSI address regs */
2203 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2204 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2205
2206 /* Request wired interrupt lines */
2207 irq = smmu->evtq.q.irq;
2208 if (irq) {
2209 ret = devm_request_threaded_irq(smmu->dev, irq,
2210 arm_smmu_evtq_handler,
2211 arm_smmu_evtq_thread,
2212 0, "arm-smmu-v3-evtq", smmu);
2213 if (IS_ERR_VALUE(ret))
2214 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2215 }
2216
2217 irq = smmu->cmdq.q.irq;
2218 if (irq) {
2219 ret = devm_request_irq(smmu->dev, irq,
2220 arm_smmu_cmdq_sync_handler, 0,
2221 "arm-smmu-v3-cmdq-sync", smmu);
2222 if (IS_ERR_VALUE(ret))
2223 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2224 }
2225
2226 irq = smmu->gerr_irq;
2227 if (irq) {
2228 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2229 0, "arm-smmu-v3-gerror", smmu);
2230 if (IS_ERR_VALUE(ret))
2231 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2232 }
2233
2234 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2235 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2236
2237 irq = smmu->priq.q.irq;
2238 if (irq) {
2239 ret = devm_request_threaded_irq(smmu->dev, irq,
2240 arm_smmu_priq_handler,
2241 arm_smmu_priq_thread,
2242 0, "arm-smmu-v3-priq",
2243 smmu);
2244 if (IS_ERR_VALUE(ret))
2245 dev_warn(smmu->dev,
2246 "failed to enable priq irq\n");
Marc Zyngierccd63852015-07-15 11:55:18 +01002247 else
2248 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
Will Deacon48ec83b2015-05-27 17:25:59 +01002249 }
2250 }
2251
2252 /* Enable interrupt generation on the SMMU */
Marc Zyngierccd63852015-07-15 11:55:18 +01002253 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
Will Deacon48ec83b2015-05-27 17:25:59 +01002254 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2255 if (ret)
2256 dev_warn(smmu->dev, "failed to enable irqs\n");
2257
2258 return 0;
2259}
2260
2261static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2262{
2263 int ret;
2264
2265 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2266 if (ret)
2267 dev_err(smmu->dev, "failed to clear cr0\n");
2268
2269 return ret;
2270}
2271
2272static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
2273{
2274 int ret;
2275 u32 reg, enables;
2276 struct arm_smmu_cmdq_ent cmd;
2277
2278 /* Clear CR0 and sync (disables SMMU and queue processing) */
2279 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2280 if (reg & CR0_SMMUEN)
2281 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2282
2283 ret = arm_smmu_device_disable(smmu);
2284 if (ret)
2285 return ret;
2286
2287 /* CR1 (table and queue memory attributes) */
2288 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
2289 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
2290 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
2291 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
2292 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
2293 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
2294 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2295
2296 /* CR2 (random crap) */
2297 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2298 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2299
2300 /* Stream table */
2301 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2302 smmu->base + ARM_SMMU_STRTAB_BASE);
2303 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2304 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2305
2306 /* Command queue */
2307 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2308 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2309 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2310
2311 enables = CR0_CMDQEN;
2312 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2313 ARM_SMMU_CR0ACK);
2314 if (ret) {
2315 dev_err(smmu->dev, "failed to enable command queue\n");
2316 return ret;
2317 }
2318
2319 /* Invalidate any cached configuration */
2320 cmd.opcode = CMDQ_OP_CFGI_ALL;
2321 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2322 cmd.opcode = CMDQ_OP_CMD_SYNC;
2323 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2324
2325 /* Invalidate any stale TLB entries */
2326 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2327 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2328 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2329 }
2330
2331 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2332 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2333 cmd.opcode = CMDQ_OP_CMD_SYNC;
2334 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2335
2336 /* Event queue */
2337 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
2338 writel_relaxed(smmu->evtq.q.prod, smmu->base + ARM_SMMU_EVTQ_PROD);
2339 writel_relaxed(smmu->evtq.q.cons, smmu->base + ARM_SMMU_EVTQ_CONS);
2340
2341 enables |= CR0_EVTQEN;
2342 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2343 ARM_SMMU_CR0ACK);
2344 if (ret) {
2345 dev_err(smmu->dev, "failed to enable event queue\n");
2346 return ret;
2347 }
2348
2349 /* PRI queue */
2350 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2351 writeq_relaxed(smmu->priq.q.q_base,
2352 smmu->base + ARM_SMMU_PRIQ_BASE);
2353 writel_relaxed(smmu->priq.q.prod,
2354 smmu->base + ARM_SMMU_PRIQ_PROD);
2355 writel_relaxed(smmu->priq.q.cons,
2356 smmu->base + ARM_SMMU_PRIQ_CONS);
2357
2358 enables |= CR0_PRIQEN;
2359 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2360 ARM_SMMU_CR0ACK);
2361 if (ret) {
2362 dev_err(smmu->dev, "failed to enable PRI queue\n");
2363 return ret;
2364 }
2365 }
2366
2367 ret = arm_smmu_setup_irqs(smmu);
2368 if (ret) {
2369 dev_err(smmu->dev, "failed to setup irqs\n");
2370 return ret;
2371 }
2372
2373 /* Enable the SMMU interface */
2374 enables |= CR0_SMMUEN;
2375 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2376 ARM_SMMU_CR0ACK);
2377 if (ret) {
2378 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2379 return ret;
2380 }
2381
2382 return 0;
2383}
2384
2385static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
2386{
2387 u32 reg;
2388 bool coherent;
2389 unsigned long pgsize_bitmap = 0;
2390
2391 /* IDR0 */
2392 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2393
2394 /* 2-level structures */
2395 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
2396 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2397
2398 if (reg & IDR0_CD2L)
2399 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2400
2401 /*
2402 * Translation table endianness.
2403 * We currently require the same endianness as the CPU, but this
2404 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2405 */
2406 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
2407 case IDR0_TTENDIAN_MIXED:
2408 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2409 break;
2410#ifdef __BIG_ENDIAN
2411 case IDR0_TTENDIAN_BE:
2412 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2413 break;
2414#else
2415 case IDR0_TTENDIAN_LE:
2416 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2417 break;
2418#endif
2419 default:
2420 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2421 return -ENXIO;
2422 }
2423
2424 /* Boolean feature flags */
2425 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2426 smmu->features |= ARM_SMMU_FEAT_PRI;
2427
2428 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2429 smmu->features |= ARM_SMMU_FEAT_ATS;
2430
2431 if (reg & IDR0_SEV)
2432 smmu->features |= ARM_SMMU_FEAT_SEV;
2433
2434 if (reg & IDR0_MSI)
2435 smmu->features |= ARM_SMMU_FEAT_MSI;
2436
2437 if (reg & IDR0_HYP)
2438 smmu->features |= ARM_SMMU_FEAT_HYP;
2439
2440 /*
2441 * The dma-coherent property is used in preference to the ID
2442 * register, but warn on mismatch.
2443 */
2444 coherent = of_dma_is_coherent(smmu->dev->of_node);
2445 if (coherent)
2446 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2447
2448 if (!!(reg & IDR0_COHACC) != coherent)
2449 dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
2450 coherent ? "true" : "false");
2451
2452 if (reg & IDR0_STALL_MODEL)
2453 smmu->features |= ARM_SMMU_FEAT_STALLS;
2454
2455 if (reg & IDR0_S1P)
2456 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2457
2458 if (reg & IDR0_S2P)
2459 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2460
2461 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2462 dev_err(smmu->dev, "no translation support!\n");
2463 return -ENXIO;
2464 }
2465
2466 /* We only support the AArch64 table format at present */
Will Deaconf0c453d2015-08-20 12:12:32 +01002467 switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
2468 case IDR0_TTF_AARCH32_64:
2469 smmu->ias = 40;
2470 /* Fallthrough */
2471 case IDR0_TTF_AARCH64:
2472 break;
2473 default:
Will Deacon48ec83b2015-05-27 17:25:59 +01002474 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2475 return -ENXIO;
2476 }
2477
2478 /* ASID/VMID sizes */
2479 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2480 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2481
2482 /* IDR1 */
2483 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2484 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2485 dev_err(smmu->dev, "embedded implementation not supported\n");
2486 return -ENXIO;
2487 }
2488
2489 /* Queue sizes, capped at 4k */
2490 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
2491 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
2492 if (!smmu->cmdq.q.max_n_shift) {
2493 /* Odd alignment restrictions on the base, so ignore for now */
2494 dev_err(smmu->dev, "unit-length command queue not supported\n");
2495 return -ENXIO;
2496 }
2497
2498 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
2499 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
2500 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
2501 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
2502
2503 /* SID/SSID sizes */
2504 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2505 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2506
2507 /* IDR5 */
2508 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2509
2510 /* Maximum number of outstanding stalls */
2511 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
2512 & IDR5_STALL_MAX_MASK;
2513
2514 /* Page sizes */
2515 if (reg & IDR5_GRAN64K)
2516 pgsize_bitmap |= SZ_64K | SZ_512M;
2517 if (reg & IDR5_GRAN16K)
2518 pgsize_bitmap |= SZ_16K | SZ_32M;
2519 if (reg & IDR5_GRAN4K)
2520 pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2521
2522 arm_smmu_ops.pgsize_bitmap &= pgsize_bitmap;
2523
2524 /* Output address size */
2525 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
2526 case IDR5_OAS_32_BIT:
2527 smmu->oas = 32;
2528 break;
2529 case IDR5_OAS_36_BIT:
2530 smmu->oas = 36;
2531 break;
2532 case IDR5_OAS_40_BIT:
2533 smmu->oas = 40;
2534 break;
2535 case IDR5_OAS_42_BIT:
2536 smmu->oas = 42;
2537 break;
2538 case IDR5_OAS_44_BIT:
2539 smmu->oas = 44;
2540 break;
Will Deacon85430962015-08-03 10:35:40 +01002541 default:
2542 dev_info(smmu->dev,
2543 "unknown output address size. Truncating to 48-bit\n");
2544 /* Fallthrough */
Will Deacon48ec83b2015-05-27 17:25:59 +01002545 case IDR5_OAS_48_BIT:
2546 smmu->oas = 48;
Will Deacon48ec83b2015-05-27 17:25:59 +01002547 }
2548
2549 /* Set the DMA mask for our table walker */
2550 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2551 dev_warn(smmu->dev,
2552 "failed to set DMA mask for table walker\n");
2553
Will Deaconf0c453d2015-08-20 12:12:32 +01002554 smmu->ias = max(smmu->ias, smmu->oas);
Will Deacon48ec83b2015-05-27 17:25:59 +01002555
2556 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2557 smmu->ias, smmu->oas, smmu->features);
2558 return 0;
2559}
2560
2561static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2562{
2563 int irq, ret;
2564 struct resource *res;
2565 struct arm_smmu_device *smmu;
2566 struct device *dev = &pdev->dev;
2567
2568 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2569 if (!smmu) {
2570 dev_err(dev, "failed to allocate arm_smmu_device\n");
2571 return -ENOMEM;
2572 }
2573 smmu->dev = dev;
2574
2575 /* Base address */
2576 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2577 if (resource_size(res) + 1 < SZ_128K) {
2578 dev_err(dev, "MMIO region too small (%pr)\n", res);
2579 return -EINVAL;
2580 }
2581
2582 smmu->base = devm_ioremap_resource(dev, res);
2583 if (IS_ERR(smmu->base))
2584 return PTR_ERR(smmu->base);
2585
2586 /* Interrupt lines */
2587 irq = platform_get_irq_byname(pdev, "eventq");
2588 if (irq > 0)
2589 smmu->evtq.q.irq = irq;
2590
2591 irq = platform_get_irq_byname(pdev, "priq");
2592 if (irq > 0)
2593 smmu->priq.q.irq = irq;
2594
2595 irq = platform_get_irq_byname(pdev, "cmdq-sync");
2596 if (irq > 0)
2597 smmu->cmdq.q.irq = irq;
2598
2599 irq = platform_get_irq_byname(pdev, "gerror");
2600 if (irq > 0)
2601 smmu->gerr_irq = irq;
2602
Zhen Lei5e929462015-07-07 04:30:18 +01002603 parse_driver_options(smmu);
2604
Will Deacon48ec83b2015-05-27 17:25:59 +01002605 /* Probe the h/w */
2606 ret = arm_smmu_device_probe(smmu);
2607 if (ret)
2608 return ret;
2609
2610 /* Initialise in-memory data structures */
2611 ret = arm_smmu_init_structures(smmu);
2612 if (ret)
2613 return ret;
2614
2615 /* Reset the device */
2616 ret = arm_smmu_device_reset(smmu);
2617 if (ret)
2618 goto out_free_structures;
2619
2620 /* Record our private device structure */
2621 INIT_LIST_HEAD(&smmu->list);
2622 spin_lock(&arm_smmu_devices_lock);
2623 list_add(&smmu->list, &arm_smmu_devices);
2624 spin_unlock(&arm_smmu_devices_lock);
2625 return 0;
2626
2627out_free_structures:
2628 arm_smmu_free_structures(smmu);
2629 return ret;
2630}
2631
2632static int arm_smmu_device_remove(struct platform_device *pdev)
2633{
2634 struct arm_smmu_device *curr, *smmu = NULL;
2635 struct device *dev = &pdev->dev;
2636
2637 spin_lock(&arm_smmu_devices_lock);
2638 list_for_each_entry(curr, &arm_smmu_devices, list) {
2639 if (curr->dev == dev) {
2640 smmu = curr;
2641 list_del(&smmu->list);
2642 break;
2643 }
2644 }
2645 spin_unlock(&arm_smmu_devices_lock);
2646
2647 if (!smmu)
2648 return -ENODEV;
2649
2650 arm_smmu_device_disable(smmu);
2651 arm_smmu_free_structures(smmu);
2652 return 0;
2653}
2654
2655static struct of_device_id arm_smmu_of_match[] = {
2656 { .compatible = "arm,smmu-v3", },
2657 { },
2658};
2659MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2660
2661static struct platform_driver arm_smmu_driver = {
2662 .driver = {
2663 .name = "arm-smmu-v3",
2664 .of_match_table = of_match_ptr(arm_smmu_of_match),
2665 },
2666 .probe = arm_smmu_device_dt_probe,
2667 .remove = arm_smmu_device_remove,
2668};
2669
2670static int __init arm_smmu_init(void)
2671{
2672 struct device_node *np;
2673 int ret;
2674
2675 np = of_find_matching_node(NULL, arm_smmu_of_match);
2676 if (!np)
2677 return 0;
2678
2679 of_node_put(np);
2680
2681 ret = platform_driver_register(&arm_smmu_driver);
2682 if (ret)
2683 return ret;
2684
2685 return bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2686}
2687
2688static void __exit arm_smmu_exit(void)
2689{
2690 return platform_driver_unregister(&arm_smmu_driver);
2691}
2692
2693subsys_initcall(arm_smmu_init);
2694module_exit(arm_smmu_exit);
2695
2696MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2697MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2698MODULE_LICENSE("GPL v2");